1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * net/tipc/crypto.c: TIPC crypto for key handling & packet en/decryption |
4 | * |
5 | * Copyright (c) 2019, Ericsson AB |
6 | * All rights reserved. |
7 | * |
8 | * Redistribution and use in source and binary forms, with or without |
9 | * modification, are permitted provided that the following conditions are met: |
10 | * |
11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice, this list of conditions and the following disclaimer. |
13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. |
16 | * 3. Neither the names of the copyright holders nor the names of its |
17 | * contributors may be used to endorse or promote products derived from |
18 | * this software without specific prior written permission. |
19 | * |
20 | * Alternatively, this software may be distributed under the terms of the |
21 | * GNU General Public License ("GPL") version 2 as published by the Free |
22 | * Software Foundation. |
23 | * |
24 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
28 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
31 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
32 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
33 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
34 | * POSSIBILITY OF SUCH DAMAGE. |
35 | */ |
36 | |
37 | #include <crypto/aead.h> |
38 | #include <crypto/aes.h> |
39 | #include <crypto/rng.h> |
40 | #include "crypto.h" |
41 | #include "msg.h" |
42 | #include "bcast.h" |
43 | |
44 | #define TIPC_TX_GRACE_PERIOD msecs_to_jiffies(5000) /* 5s */ |
45 | #define TIPC_TX_LASTING_TIME msecs_to_jiffies(10000) /* 10s */ |
46 | #define TIPC_RX_ACTIVE_LIM msecs_to_jiffies(3000) /* 3s */ |
47 | #define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(15000) /* 15s */ |
48 | |
49 | #define TIPC_MAX_TFMS_DEF 10 |
50 | #define TIPC_MAX_TFMS_LIM 1000 |
51 | |
52 | #define TIPC_REKEYING_INTV_DEF (60 * 24) /* default: 1 day */ |
53 | |
54 | /* |
55 | * TIPC Key ids |
56 | */ |
57 | enum { |
58 | KEY_MASTER = 0, |
59 | KEY_MIN = KEY_MASTER, |
60 | KEY_1 = 1, |
61 | KEY_2, |
62 | KEY_3, |
63 | KEY_MAX = KEY_3, |
64 | }; |
65 | |
66 | /* |
67 | * TIPC Crypto statistics |
68 | */ |
69 | enum { |
70 | STAT_OK, |
71 | STAT_NOK, |
72 | STAT_ASYNC, |
73 | STAT_ASYNC_OK, |
74 | STAT_ASYNC_NOK, |
75 | STAT_BADKEYS, /* tx only */ |
76 | STAT_BADMSGS = STAT_BADKEYS, /* rx only */ |
77 | STAT_NOKEYS, |
78 | STAT_SWITCHES, |
79 | |
80 | MAX_STATS, |
81 | }; |
82 | |
83 | /* TIPC crypto statistics' header */ |
84 | static const char *hstats[MAX_STATS] = {"ok" , "nok" , "async" , "async_ok" , |
85 | "async_nok" , "badmsgs" , "nokeys" , |
86 | "switches" }; |
87 | |
88 | /* Max TFMs number per key */ |
89 | int sysctl_tipc_max_tfms __read_mostly = TIPC_MAX_TFMS_DEF; |
90 | /* Key exchange switch, default: on */ |
91 | int sysctl_tipc_key_exchange_enabled __read_mostly = 1; |
92 | |
93 | /* |
94 | * struct tipc_key - TIPC keys' status indicator |
95 | * |
96 | * 7 6 5 4 3 2 1 0 |
97 | * +-----+-----+-----+-----+-----+-----+-----+-----+ |
98 | * key: | (reserved)|passive idx| active idx|pending idx| |
99 | * +-----+-----+-----+-----+-----+-----+-----+-----+ |
100 | */ |
101 | struct tipc_key { |
102 | #define KEY_BITS (2) |
103 | #define KEY_MASK ((1 << KEY_BITS) - 1) |
104 | union { |
105 | struct { |
106 | #if defined(__LITTLE_ENDIAN_BITFIELD) |
107 | u8 pending:2, |
108 | active:2, |
109 | passive:2, /* rx only */ |
110 | reserved:2; |
111 | #elif defined(__BIG_ENDIAN_BITFIELD) |
112 | u8 reserved:2, |
113 | passive:2, /* rx only */ |
114 | active:2, |
115 | pending:2; |
116 | #else |
117 | #error "Please fix <asm/byteorder.h>" |
118 | #endif |
119 | } __packed; |
120 | u8 keys; |
121 | }; |
122 | }; |
123 | |
124 | /** |
125 | * struct tipc_tfm - TIPC TFM structure to form a list of TFMs |
126 | * @tfm: cipher handle/key |
127 | * @list: linked list of TFMs |
128 | */ |
129 | struct tipc_tfm { |
130 | struct crypto_aead *tfm; |
131 | struct list_head list; |
132 | }; |
133 | |
134 | /** |
135 | * struct tipc_aead - TIPC AEAD key structure |
136 | * @tfm_entry: per-cpu pointer to one entry in TFM list |
137 | * @crypto: TIPC crypto owns this key |
138 | * @cloned: reference to the source key in case cloning |
139 | * @users: the number of the key users (TX/RX) |
140 | * @salt: the key's SALT value |
141 | * @authsize: authentication tag size (max = 16) |
142 | * @mode: crypto mode is applied to the key |
143 | * @hint: a hint for user key |
144 | * @rcu: struct rcu_head |
145 | * @key: the aead key |
146 | * @gen: the key's generation |
147 | * @seqno: the key seqno (cluster scope) |
148 | * @refcnt: the key reference counter |
149 | */ |
150 | struct tipc_aead { |
151 | #define TIPC_AEAD_HINT_LEN (5) |
152 | struct tipc_tfm * __percpu *tfm_entry; |
153 | struct tipc_crypto *crypto; |
154 | struct tipc_aead *cloned; |
155 | atomic_t users; |
156 | u32 salt; |
157 | u8 authsize; |
158 | u8 mode; |
159 | char hint[2 * TIPC_AEAD_HINT_LEN + 1]; |
160 | struct rcu_head rcu; |
161 | struct tipc_aead_key *key; |
162 | u16 gen; |
163 | |
164 | atomic64_t seqno ____cacheline_aligned; |
165 | refcount_t refcnt ____cacheline_aligned; |
166 | |
167 | } ____cacheline_aligned; |
168 | |
169 | /** |
170 | * struct tipc_crypto_stats - TIPC Crypto statistics |
171 | * @stat: array of crypto statistics |
172 | */ |
173 | struct tipc_crypto_stats { |
174 | unsigned int stat[MAX_STATS]; |
175 | }; |
176 | |
177 | /** |
178 | * struct tipc_crypto - TIPC TX/RX crypto structure |
179 | * @net: struct net |
180 | * @node: TIPC node (RX) |
181 | * @aead: array of pointers to AEAD keys for encryption/decryption |
182 | * @peer_rx_active: replicated peer RX active key index |
183 | * @key_gen: TX/RX key generation |
184 | * @key: the key states |
185 | * @skey_mode: session key's mode |
186 | * @skey: received session key |
187 | * @wq: common workqueue on TX crypto |
188 | * @work: delayed work sched for TX/RX |
189 | * @key_distr: key distributing state |
190 | * @rekeying_intv: rekeying interval (in minutes) |
191 | * @stats: the crypto statistics |
192 | * @name: the crypto name |
193 | * @sndnxt: the per-peer sndnxt (TX) |
194 | * @timer1: general timer 1 (jiffies) |
195 | * @timer2: general timer 2 (jiffies) |
196 | * @working: the crypto is working or not |
197 | * @key_master: flag indicates if master key exists |
198 | * @legacy_user: flag indicates if a peer joins w/o master key (for bwd comp.) |
199 | * @nokey: no key indication |
200 | * @flags: combined flags field |
201 | * @lock: tipc_key lock |
202 | */ |
203 | struct tipc_crypto { |
204 | struct net *net; |
205 | struct tipc_node *node; |
206 | struct tipc_aead __rcu *aead[KEY_MAX + 1]; |
207 | atomic_t peer_rx_active; |
208 | u16 key_gen; |
209 | struct tipc_key key; |
210 | u8 skey_mode; |
211 | struct tipc_aead_key *skey; |
212 | struct workqueue_struct *wq; |
213 | struct delayed_work work; |
214 | #define KEY_DISTR_SCHED 1 |
215 | #define KEY_DISTR_COMPL 2 |
216 | atomic_t key_distr; |
217 | u32 rekeying_intv; |
218 | |
219 | struct tipc_crypto_stats __percpu *stats; |
220 | char name[48]; |
221 | |
222 | atomic64_t sndnxt ____cacheline_aligned; |
223 | unsigned long timer1; |
224 | unsigned long timer2; |
225 | union { |
226 | struct { |
227 | u8 working:1; |
228 | u8 key_master:1; |
229 | u8 legacy_user:1; |
230 | u8 nokey: 1; |
231 | }; |
232 | u8 flags; |
233 | }; |
234 | spinlock_t lock; /* crypto lock */ |
235 | |
236 | } ____cacheline_aligned; |
237 | |
238 | /* struct tipc_crypto_tx_ctx - TX context for callbacks */ |
239 | struct tipc_crypto_tx_ctx { |
240 | struct tipc_aead *aead; |
241 | struct tipc_bearer *bearer; |
242 | struct tipc_media_addr dst; |
243 | }; |
244 | |
245 | /* struct tipc_crypto_rx_ctx - RX context for callbacks */ |
246 | struct tipc_crypto_rx_ctx { |
247 | struct tipc_aead *aead; |
248 | struct tipc_bearer *bearer; |
249 | }; |
250 | |
251 | static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead); |
252 | static inline void tipc_aead_put(struct tipc_aead *aead); |
253 | static void tipc_aead_free(struct rcu_head *rp); |
254 | static int tipc_aead_users(struct tipc_aead __rcu *aead); |
255 | static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim); |
256 | static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim); |
257 | static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val); |
258 | static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead); |
259 | static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey, |
260 | u8 mode); |
261 | static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src); |
262 | static void *tipc_aead_mem_alloc(struct crypto_aead *tfm, |
263 | unsigned int crypto_ctx_size, |
264 | u8 **iv, struct aead_request **req, |
265 | struct scatterlist **sg, int nsg); |
266 | static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, |
267 | struct tipc_bearer *b, |
268 | struct tipc_media_addr *dst, |
269 | struct tipc_node *__dnode); |
270 | static void tipc_aead_encrypt_done(void *data, int err); |
271 | static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead, |
272 | struct sk_buff *skb, struct tipc_bearer *b); |
273 | static void tipc_aead_decrypt_done(void *data, int err); |
274 | static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr); |
275 | static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead, |
276 | u8 tx_key, struct sk_buff *skb, |
277 | struct tipc_crypto *__rx); |
278 | static inline void tipc_crypto_key_set_state(struct tipc_crypto *c, |
279 | u8 new_passive, |
280 | u8 new_active, |
281 | u8 new_pending); |
282 | static int tipc_crypto_key_attach(struct tipc_crypto *c, |
283 | struct tipc_aead *aead, u8 pos, |
284 | bool master_key); |
285 | static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending); |
286 | static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx, |
287 | struct tipc_crypto *rx, |
288 | struct sk_buff *skb, |
289 | u8 tx_key); |
290 | static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb); |
291 | static int tipc_crypto_key_revoke(struct net *net, u8 tx_key); |
292 | static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb, |
293 | struct tipc_bearer *b, |
294 | struct tipc_media_addr *dst, |
295 | struct tipc_node *__dnode, u8 type); |
296 | static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, |
297 | struct tipc_bearer *b, |
298 | struct sk_buff **skb, int err); |
299 | static void tipc_crypto_do_cmd(struct net *net, int cmd); |
300 | static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf); |
301 | static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new, |
302 | char *buf); |
303 | static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey, |
304 | u16 gen, u8 mode, u32 dnode); |
305 | static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr); |
306 | static void tipc_crypto_work_tx(struct work_struct *work); |
307 | static void tipc_crypto_work_rx(struct work_struct *work); |
308 | static int tipc_aead_key_generate(struct tipc_aead_key *skey); |
309 | |
310 | #define is_tx(crypto) (!(crypto)->node) |
311 | #define is_rx(crypto) (!is_tx(crypto)) |
312 | |
313 | #define key_next(cur) ((cur) % KEY_MAX + 1) |
314 | |
315 | #define tipc_aead_rcu_ptr(rcu_ptr, lock) \ |
316 | rcu_dereference_protected((rcu_ptr), lockdep_is_held(lock)) |
317 | |
318 | #define tipc_aead_rcu_replace(rcu_ptr, ptr, lock) \ |
319 | do { \ |
320 | struct tipc_aead *__tmp = rcu_dereference_protected((rcu_ptr), \ |
321 | lockdep_is_held(lock)); \ |
322 | rcu_assign_pointer((rcu_ptr), (ptr)); \ |
323 | tipc_aead_put(__tmp); \ |
324 | } while (0) |
325 | |
326 | #define tipc_crypto_key_detach(rcu_ptr, lock) \ |
327 | tipc_aead_rcu_replace((rcu_ptr), NULL, lock) |
328 | |
329 | /** |
330 | * tipc_aead_key_validate - Validate a AEAD user key |
331 | * @ukey: pointer to user key data |
332 | * @info: netlink info pointer |
333 | */ |
334 | int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info) |
335 | { |
336 | int keylen; |
337 | |
338 | /* Check if algorithm exists */ |
339 | if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) { |
340 | GENL_SET_ERR_MSG(info, "unable to load the algorithm (module existed?)" ); |
341 | return -ENODEV; |
342 | } |
343 | |
344 | /* Currently, we only support the "gcm(aes)" cipher algorithm */ |
345 | if (strcmp(ukey->alg_name, "gcm(aes)" )) { |
346 | GENL_SET_ERR_MSG(info, "not supported yet the algorithm" ); |
347 | return -ENOTSUPP; |
348 | } |
349 | |
350 | /* Check if key size is correct */ |
351 | keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; |
352 | if (unlikely(keylen != TIPC_AES_GCM_KEY_SIZE_128 && |
353 | keylen != TIPC_AES_GCM_KEY_SIZE_192 && |
354 | keylen != TIPC_AES_GCM_KEY_SIZE_256)) { |
355 | GENL_SET_ERR_MSG(info, "incorrect key length (20, 28 or 36 octets?)" ); |
356 | return -EKEYREJECTED; |
357 | } |
358 | |
359 | return 0; |
360 | } |
361 | |
362 | /** |
363 | * tipc_aead_key_generate - Generate new session key |
364 | * @skey: input/output key with new content |
365 | * |
366 | * Return: 0 in case of success, otherwise < 0 |
367 | */ |
368 | static int tipc_aead_key_generate(struct tipc_aead_key *skey) |
369 | { |
370 | int rc = 0; |
371 | |
372 | /* Fill the key's content with a random value via RNG cipher */ |
373 | rc = crypto_get_default_rng(); |
374 | if (likely(!rc)) { |
375 | rc = crypto_rng_get_bytes(tfm: crypto_default_rng, rdata: skey->key, |
376 | dlen: skey->keylen); |
377 | crypto_put_default_rng(); |
378 | } |
379 | |
380 | return rc; |
381 | } |
382 | |
383 | static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead) |
384 | { |
385 | struct tipc_aead *tmp; |
386 | |
387 | rcu_read_lock(); |
388 | tmp = rcu_dereference(aead); |
389 | if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt))) |
390 | tmp = NULL; |
391 | rcu_read_unlock(); |
392 | |
393 | return tmp; |
394 | } |
395 | |
396 | static inline void tipc_aead_put(struct tipc_aead *aead) |
397 | { |
398 | if (aead && refcount_dec_and_test(r: &aead->refcnt)) |
399 | call_rcu(head: &aead->rcu, func: tipc_aead_free); |
400 | } |
401 | |
402 | /** |
403 | * tipc_aead_free - Release AEAD key incl. all the TFMs in the list |
404 | * @rp: rcu head pointer |
405 | */ |
406 | static void tipc_aead_free(struct rcu_head *rp) |
407 | { |
408 | struct tipc_aead *aead = container_of(rp, struct tipc_aead, rcu); |
409 | struct tipc_tfm *tfm_entry, *head, *tmp; |
410 | |
411 | if (aead->cloned) { |
412 | tipc_aead_put(aead: aead->cloned); |
413 | } else { |
414 | head = *get_cpu_ptr(aead->tfm_entry); |
415 | put_cpu_ptr(aead->tfm_entry); |
416 | list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) { |
417 | crypto_free_aead(tfm: tfm_entry->tfm); |
418 | list_del(entry: &tfm_entry->list); |
419 | kfree(objp: tfm_entry); |
420 | } |
421 | /* Free the head */ |
422 | crypto_free_aead(tfm: head->tfm); |
423 | list_del(entry: &head->list); |
424 | kfree(objp: head); |
425 | } |
426 | free_percpu(pdata: aead->tfm_entry); |
427 | kfree_sensitive(objp: aead->key); |
428 | kfree(objp: aead); |
429 | } |
430 | |
431 | static int tipc_aead_users(struct tipc_aead __rcu *aead) |
432 | { |
433 | struct tipc_aead *tmp; |
434 | int users = 0; |
435 | |
436 | rcu_read_lock(); |
437 | tmp = rcu_dereference(aead); |
438 | if (tmp) |
439 | users = atomic_read(v: &tmp->users); |
440 | rcu_read_unlock(); |
441 | |
442 | return users; |
443 | } |
444 | |
445 | static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim) |
446 | { |
447 | struct tipc_aead *tmp; |
448 | |
449 | rcu_read_lock(); |
450 | tmp = rcu_dereference(aead); |
451 | if (tmp) |
452 | atomic_add_unless(v: &tmp->users, a: 1, u: lim); |
453 | rcu_read_unlock(); |
454 | } |
455 | |
456 | static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim) |
457 | { |
458 | struct tipc_aead *tmp; |
459 | |
460 | rcu_read_lock(); |
461 | tmp = rcu_dereference(aead); |
462 | if (tmp) |
463 | atomic_add_unless(v: &rcu_dereference(aead)->users, a: -1, u: lim); |
464 | rcu_read_unlock(); |
465 | } |
466 | |
467 | static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val) |
468 | { |
469 | struct tipc_aead *tmp; |
470 | int cur; |
471 | |
472 | rcu_read_lock(); |
473 | tmp = rcu_dereference(aead); |
474 | if (tmp) { |
475 | do { |
476 | cur = atomic_read(v: &tmp->users); |
477 | if (cur == val) |
478 | break; |
479 | } while (atomic_cmpxchg(v: &tmp->users, old: cur, new: val) != cur); |
480 | } |
481 | rcu_read_unlock(); |
482 | } |
483 | |
484 | /** |
485 | * tipc_aead_tfm_next - Move TFM entry to the next one in list and return it |
486 | * @aead: the AEAD key pointer |
487 | */ |
488 | static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead) |
489 | { |
490 | struct tipc_tfm **tfm_entry; |
491 | struct crypto_aead *tfm; |
492 | |
493 | tfm_entry = get_cpu_ptr(aead->tfm_entry); |
494 | *tfm_entry = list_next_entry(*tfm_entry, list); |
495 | tfm = (*tfm_entry)->tfm; |
496 | put_cpu_ptr(tfm_entry); |
497 | |
498 | return tfm; |
499 | } |
500 | |
501 | /** |
502 | * tipc_aead_init - Initiate TIPC AEAD |
503 | * @aead: returned new TIPC AEAD key handle pointer |
504 | * @ukey: pointer to user key data |
505 | * @mode: the key mode |
506 | * |
507 | * Allocate a (list of) new cipher transformation (TFM) with the specific user |
508 | * key data if valid. The number of the allocated TFMs can be set via the sysfs |
509 | * "net/tipc/max_tfms" first. |
510 | * Also, all the other AEAD data are also initialized. |
511 | * |
512 | * Return: 0 if the initiation is successful, otherwise: < 0 |
513 | */ |
514 | static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey, |
515 | u8 mode) |
516 | { |
517 | struct tipc_tfm *tfm_entry, *head; |
518 | struct crypto_aead *tfm; |
519 | struct tipc_aead *tmp; |
520 | int keylen, err, cpu; |
521 | int tfm_cnt = 0; |
522 | |
523 | if (unlikely(*aead)) |
524 | return -EEXIST; |
525 | |
526 | /* Allocate a new AEAD */ |
527 | tmp = kzalloc(size: sizeof(*tmp), GFP_ATOMIC); |
528 | if (unlikely(!tmp)) |
529 | return -ENOMEM; |
530 | |
531 | /* The key consists of two parts: [AES-KEY][SALT] */ |
532 | keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; |
533 | |
534 | /* Allocate per-cpu TFM entry pointer */ |
535 | tmp->tfm_entry = alloc_percpu(struct tipc_tfm *); |
536 | if (!tmp->tfm_entry) { |
537 | kfree_sensitive(objp: tmp); |
538 | return -ENOMEM; |
539 | } |
540 | |
541 | /* Make a list of TFMs with the user key data */ |
542 | do { |
543 | tfm = crypto_alloc_aead(alg_name: ukey->alg_name, type: 0, mask: 0); |
544 | if (IS_ERR(ptr: tfm)) { |
545 | err = PTR_ERR(ptr: tfm); |
546 | break; |
547 | } |
548 | |
549 | if (unlikely(!tfm_cnt && |
550 | crypto_aead_ivsize(tfm) != TIPC_AES_GCM_IV_SIZE)) { |
551 | crypto_free_aead(tfm); |
552 | err = -ENOTSUPP; |
553 | break; |
554 | } |
555 | |
556 | err = crypto_aead_setauthsize(tfm, TIPC_AES_GCM_TAG_SIZE); |
557 | err |= crypto_aead_setkey(tfm, key: ukey->key, keylen); |
558 | if (unlikely(err)) { |
559 | crypto_free_aead(tfm); |
560 | break; |
561 | } |
562 | |
563 | tfm_entry = kmalloc(size: sizeof(*tfm_entry), GFP_KERNEL); |
564 | if (unlikely(!tfm_entry)) { |
565 | crypto_free_aead(tfm); |
566 | err = -ENOMEM; |
567 | break; |
568 | } |
569 | INIT_LIST_HEAD(list: &tfm_entry->list); |
570 | tfm_entry->tfm = tfm; |
571 | |
572 | /* First entry? */ |
573 | if (!tfm_cnt) { |
574 | head = tfm_entry; |
575 | for_each_possible_cpu(cpu) { |
576 | *per_cpu_ptr(tmp->tfm_entry, cpu) = head; |
577 | } |
578 | } else { |
579 | list_add_tail(new: &tfm_entry->list, head: &head->list); |
580 | } |
581 | |
582 | } while (++tfm_cnt < sysctl_tipc_max_tfms); |
583 | |
584 | /* Not any TFM is allocated? */ |
585 | if (!tfm_cnt) { |
586 | free_percpu(pdata: tmp->tfm_entry); |
587 | kfree_sensitive(objp: tmp); |
588 | return err; |
589 | } |
590 | |
591 | /* Form a hex string of some last bytes as the key's hint */ |
592 | bin2hex(dst: tmp->hint, src: ukey->key + keylen - TIPC_AEAD_HINT_LEN, |
593 | TIPC_AEAD_HINT_LEN); |
594 | |
595 | /* Initialize the other data */ |
596 | tmp->mode = mode; |
597 | tmp->cloned = NULL; |
598 | tmp->authsize = TIPC_AES_GCM_TAG_SIZE; |
599 | tmp->key = kmemdup(p: ukey, size: tipc_aead_key_size(key: ukey), GFP_KERNEL); |
600 | if (!tmp->key) { |
601 | tipc_aead_free(rp: &tmp->rcu); |
602 | return -ENOMEM; |
603 | } |
604 | memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE); |
605 | atomic_set(v: &tmp->users, i: 0); |
606 | atomic64_set(v: &tmp->seqno, i: 0); |
607 | refcount_set(r: &tmp->refcnt, n: 1); |
608 | |
609 | *aead = tmp; |
610 | return 0; |
611 | } |
612 | |
613 | /** |
614 | * tipc_aead_clone - Clone a TIPC AEAD key |
615 | * @dst: dest key for the cloning |
616 | * @src: source key to clone from |
617 | * |
618 | * Make a "copy" of the source AEAD key data to the dest, the TFMs list is |
619 | * common for the keys. |
620 | * A reference to the source is hold in the "cloned" pointer for the later |
621 | * freeing purposes. |
622 | * |
623 | * Note: this must be done in cluster-key mode only! |
624 | * Return: 0 in case of success, otherwise < 0 |
625 | */ |
626 | static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src) |
627 | { |
628 | struct tipc_aead *aead; |
629 | int cpu; |
630 | |
631 | if (!src) |
632 | return -ENOKEY; |
633 | |
634 | if (src->mode != CLUSTER_KEY) |
635 | return -EINVAL; |
636 | |
637 | if (unlikely(*dst)) |
638 | return -EEXIST; |
639 | |
640 | aead = kzalloc(size: sizeof(*aead), GFP_ATOMIC); |
641 | if (unlikely(!aead)) |
642 | return -ENOMEM; |
643 | |
644 | aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC); |
645 | if (unlikely(!aead->tfm_entry)) { |
646 | kfree_sensitive(objp: aead); |
647 | return -ENOMEM; |
648 | } |
649 | |
650 | for_each_possible_cpu(cpu) { |
651 | *per_cpu_ptr(aead->tfm_entry, cpu) = |
652 | *per_cpu_ptr(src->tfm_entry, cpu); |
653 | } |
654 | |
655 | memcpy(aead->hint, src->hint, sizeof(src->hint)); |
656 | aead->mode = src->mode; |
657 | aead->salt = src->salt; |
658 | aead->authsize = src->authsize; |
659 | atomic_set(v: &aead->users, i: 0); |
660 | atomic64_set(v: &aead->seqno, i: 0); |
661 | refcount_set(r: &aead->refcnt, n: 1); |
662 | |
663 | WARN_ON(!refcount_inc_not_zero(&src->refcnt)); |
664 | aead->cloned = src; |
665 | |
666 | *dst = aead; |
667 | return 0; |
668 | } |
669 | |
670 | /** |
671 | * tipc_aead_mem_alloc - Allocate memory for AEAD request operations |
672 | * @tfm: cipher handle to be registered with the request |
673 | * @crypto_ctx_size: size of crypto context for callback |
674 | * @iv: returned pointer to IV data |
675 | * @req: returned pointer to AEAD request data |
676 | * @sg: returned pointer to SG lists |
677 | * @nsg: number of SG lists to be allocated |
678 | * |
679 | * Allocate memory to store the crypto context data, AEAD request, IV and SG |
680 | * lists, the memory layout is as follows: |
681 | * crypto_ctx || iv || aead_req || sg[] |
682 | * |
683 | * Return: the pointer to the memory areas in case of success, otherwise NULL |
684 | */ |
685 | static void *tipc_aead_mem_alloc(struct crypto_aead *tfm, |
686 | unsigned int crypto_ctx_size, |
687 | u8 **iv, struct aead_request **req, |
688 | struct scatterlist **sg, int nsg) |
689 | { |
690 | unsigned int iv_size, req_size; |
691 | unsigned int len; |
692 | u8 *mem; |
693 | |
694 | iv_size = crypto_aead_ivsize(tfm); |
695 | req_size = sizeof(**req) + crypto_aead_reqsize(tfm); |
696 | |
697 | len = crypto_ctx_size; |
698 | len += iv_size; |
699 | len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1); |
700 | len = ALIGN(len, crypto_tfm_ctx_alignment()); |
701 | len += req_size; |
702 | len = ALIGN(len, __alignof__(struct scatterlist)); |
703 | len += nsg * sizeof(**sg); |
704 | |
705 | mem = kmalloc(size: len, GFP_ATOMIC); |
706 | if (!mem) |
707 | return NULL; |
708 | |
709 | *iv = (u8 *)PTR_ALIGN(mem + crypto_ctx_size, |
710 | crypto_aead_alignmask(tfm) + 1); |
711 | *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size, |
712 | crypto_tfm_ctx_alignment()); |
713 | *sg = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size, |
714 | __alignof__(struct scatterlist)); |
715 | |
716 | return (void *)mem; |
717 | } |
718 | |
719 | /** |
720 | * tipc_aead_encrypt - Encrypt a message |
721 | * @aead: TIPC AEAD key for the message encryption |
722 | * @skb: the input/output skb |
723 | * @b: TIPC bearer where the message will be delivered after the encryption |
724 | * @dst: the destination media address |
725 | * @__dnode: TIPC dest node if "known" |
726 | * |
727 | * Return: |
728 | * * 0 : if the encryption has completed |
729 | * * -EINPROGRESS/-EBUSY : if a callback will be performed |
730 | * * < 0 : the encryption has failed |
731 | */ |
732 | static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, |
733 | struct tipc_bearer *b, |
734 | struct tipc_media_addr *dst, |
735 | struct tipc_node *__dnode) |
736 | { |
737 | struct crypto_aead *tfm = tipc_aead_tfm_next(aead); |
738 | struct tipc_crypto_tx_ctx *tx_ctx; |
739 | struct aead_request *req; |
740 | struct sk_buff *trailer; |
741 | struct scatterlist *sg; |
742 | struct tipc_ehdr *ehdr; |
743 | int ehsz, len, tailen, nsg, rc; |
744 | void *ctx; |
745 | u32 salt; |
746 | u8 *iv; |
747 | |
748 | /* Make sure message len at least 4-byte aligned */ |
749 | len = ALIGN(skb->len, 4); |
750 | tailen = len - skb->len + aead->authsize; |
751 | |
752 | /* Expand skb tail for authentication tag: |
753 | * As for simplicity, we'd have made sure skb having enough tailroom |
754 | * for authentication tag @skb allocation. Even when skb is nonlinear |
755 | * but there is no frag_list, it should be still fine! |
756 | * Otherwise, we must cow it to be a writable buffer with the tailroom. |
757 | */ |
758 | SKB_LINEAR_ASSERT(skb); |
759 | if (tailen > skb_tailroom(skb)) { |
760 | pr_debug("TX(): skb tailroom is not enough: %d, requires: %d\n" , |
761 | skb_tailroom(skb), tailen); |
762 | } |
763 | |
764 | nsg = skb_cow_data(skb, tailbits: tailen, trailer: &trailer); |
765 | if (unlikely(nsg < 0)) { |
766 | pr_err("TX: skb_cow_data() returned %d\n" , nsg); |
767 | return nsg; |
768 | } |
769 | |
770 | pskb_put(skb, tail: trailer, len: tailen); |
771 | |
772 | /* Allocate memory for the AEAD operation */ |
773 | ctx = tipc_aead_mem_alloc(tfm, crypto_ctx_size: sizeof(*tx_ctx), iv: &iv, req: &req, sg: &sg, nsg); |
774 | if (unlikely(!ctx)) |
775 | return -ENOMEM; |
776 | TIPC_SKB_CB(skb)->crypto_ctx = ctx; |
777 | |
778 | /* Map skb to the sg lists */ |
779 | sg_init_table(sg, nsg); |
780 | rc = skb_to_sgvec(skb, sg, offset: 0, len: skb->len); |
781 | if (unlikely(rc < 0)) { |
782 | pr_err("TX: skb_to_sgvec() returned %d, nsg %d!\n" , rc, nsg); |
783 | goto exit; |
784 | } |
785 | |
786 | /* Prepare IV: [SALT (4 octets)][SEQNO (8 octets)] |
787 | * In case we're in cluster-key mode, SALT is varied by xor-ing with |
788 | * the source address (or w0 of id), otherwise with the dest address |
789 | * if dest is known. |
790 | */ |
791 | ehdr = (struct tipc_ehdr *)skb->data; |
792 | salt = aead->salt; |
793 | if (aead->mode == CLUSTER_KEY) |
794 | salt ^= __be32_to_cpu(ehdr->addr); |
795 | else if (__dnode) |
796 | salt ^= tipc_node_get_addr(node: __dnode); |
797 | memcpy(iv, &salt, 4); |
798 | memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); |
799 | |
800 | /* Prepare request */ |
801 | ehsz = tipc_ehdr_size(ehdr); |
802 | aead_request_set_tfm(req, tfm); |
803 | aead_request_set_ad(req, assoclen: ehsz); |
804 | aead_request_set_crypt(req, src: sg, dst: sg, cryptlen: len - ehsz, iv); |
805 | |
806 | /* Set callback function & data */ |
807 | aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
808 | compl: tipc_aead_encrypt_done, data: skb); |
809 | tx_ctx = (struct tipc_crypto_tx_ctx *)ctx; |
810 | tx_ctx->aead = aead; |
811 | tx_ctx->bearer = b; |
812 | memcpy(&tx_ctx->dst, dst, sizeof(*dst)); |
813 | |
814 | /* Hold bearer */ |
815 | if (unlikely(!tipc_bearer_hold(b))) { |
816 | rc = -ENODEV; |
817 | goto exit; |
818 | } |
819 | |
820 | /* Now, do encrypt */ |
821 | rc = crypto_aead_encrypt(req); |
822 | if (rc == -EINPROGRESS || rc == -EBUSY) |
823 | return rc; |
824 | |
825 | tipc_bearer_put(b); |
826 | |
827 | exit: |
828 | kfree(objp: ctx); |
829 | TIPC_SKB_CB(skb)->crypto_ctx = NULL; |
830 | return rc; |
831 | } |
832 | |
833 | static void tipc_aead_encrypt_done(void *data, int err) |
834 | { |
835 | struct sk_buff *skb = data; |
836 | struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; |
837 | struct tipc_bearer *b = tx_ctx->bearer; |
838 | struct tipc_aead *aead = tx_ctx->aead; |
839 | struct tipc_crypto *tx = aead->crypto; |
840 | struct net *net = tx->net; |
841 | |
842 | switch (err) { |
843 | case 0: |
844 | this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]); |
845 | rcu_read_lock(); |
846 | if (likely(test_bit(0, &b->up))) |
847 | b->media->send_msg(net, skb, b, &tx_ctx->dst); |
848 | else |
849 | kfree_skb(skb); |
850 | rcu_read_unlock(); |
851 | break; |
852 | case -EINPROGRESS: |
853 | return; |
854 | default: |
855 | this_cpu_inc(tx->stats->stat[STAT_ASYNC_NOK]); |
856 | kfree_skb(skb); |
857 | break; |
858 | } |
859 | |
860 | kfree(objp: tx_ctx); |
861 | tipc_bearer_put(b); |
862 | tipc_aead_put(aead); |
863 | } |
864 | |
865 | /** |
866 | * tipc_aead_decrypt - Decrypt an encrypted message |
867 | * @net: struct net |
868 | * @aead: TIPC AEAD for the message decryption |
869 | * @skb: the input/output skb |
870 | * @b: TIPC bearer where the message has been received |
871 | * |
872 | * Return: |
873 | * * 0 : if the decryption has completed |
874 | * * -EINPROGRESS/-EBUSY : if a callback will be performed |
875 | * * < 0 : the decryption has failed |
876 | */ |
877 | static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead, |
878 | struct sk_buff *skb, struct tipc_bearer *b) |
879 | { |
880 | struct tipc_crypto_rx_ctx *rx_ctx; |
881 | struct aead_request *req; |
882 | struct crypto_aead *tfm; |
883 | struct sk_buff *unused; |
884 | struct scatterlist *sg; |
885 | struct tipc_ehdr *ehdr; |
886 | int ehsz, nsg, rc; |
887 | void *ctx; |
888 | u32 salt; |
889 | u8 *iv; |
890 | |
891 | if (unlikely(!aead)) |
892 | return -ENOKEY; |
893 | |
894 | nsg = skb_cow_data(skb, tailbits: 0, trailer: &unused); |
895 | if (unlikely(nsg < 0)) { |
896 | pr_err("RX: skb_cow_data() returned %d\n" , nsg); |
897 | return nsg; |
898 | } |
899 | |
900 | /* Allocate memory for the AEAD operation */ |
901 | tfm = tipc_aead_tfm_next(aead); |
902 | ctx = tipc_aead_mem_alloc(tfm, crypto_ctx_size: sizeof(*rx_ctx), iv: &iv, req: &req, sg: &sg, nsg); |
903 | if (unlikely(!ctx)) |
904 | return -ENOMEM; |
905 | TIPC_SKB_CB(skb)->crypto_ctx = ctx; |
906 | |
907 | /* Map skb to the sg lists */ |
908 | sg_init_table(sg, nsg); |
909 | rc = skb_to_sgvec(skb, sg, offset: 0, len: skb->len); |
910 | if (unlikely(rc < 0)) { |
911 | pr_err("RX: skb_to_sgvec() returned %d, nsg %d\n" , rc, nsg); |
912 | goto exit; |
913 | } |
914 | |
915 | /* Reconstruct IV: */ |
916 | ehdr = (struct tipc_ehdr *)skb->data; |
917 | salt = aead->salt; |
918 | if (aead->mode == CLUSTER_KEY) |
919 | salt ^= __be32_to_cpu(ehdr->addr); |
920 | else if (ehdr->destined) |
921 | salt ^= tipc_own_addr(net); |
922 | memcpy(iv, &salt, 4); |
923 | memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); |
924 | |
925 | /* Prepare request */ |
926 | ehsz = tipc_ehdr_size(ehdr); |
927 | aead_request_set_tfm(req, tfm); |
928 | aead_request_set_ad(req, assoclen: ehsz); |
929 | aead_request_set_crypt(req, src: sg, dst: sg, cryptlen: skb->len - ehsz, iv); |
930 | |
931 | /* Set callback function & data */ |
932 | aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
933 | compl: tipc_aead_decrypt_done, data: skb); |
934 | rx_ctx = (struct tipc_crypto_rx_ctx *)ctx; |
935 | rx_ctx->aead = aead; |
936 | rx_ctx->bearer = b; |
937 | |
938 | /* Hold bearer */ |
939 | if (unlikely(!tipc_bearer_hold(b))) { |
940 | rc = -ENODEV; |
941 | goto exit; |
942 | } |
943 | |
944 | /* Now, do decrypt */ |
945 | rc = crypto_aead_decrypt(req); |
946 | if (rc == -EINPROGRESS || rc == -EBUSY) |
947 | return rc; |
948 | |
949 | tipc_bearer_put(b); |
950 | |
951 | exit: |
952 | kfree(objp: ctx); |
953 | TIPC_SKB_CB(skb)->crypto_ctx = NULL; |
954 | return rc; |
955 | } |
956 | |
957 | static void tipc_aead_decrypt_done(void *data, int err) |
958 | { |
959 | struct sk_buff *skb = data; |
960 | struct tipc_crypto_rx_ctx *rx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; |
961 | struct tipc_bearer *b = rx_ctx->bearer; |
962 | struct tipc_aead *aead = rx_ctx->aead; |
963 | struct tipc_crypto_stats __percpu *stats = aead->crypto->stats; |
964 | struct net *net = aead->crypto->net; |
965 | |
966 | switch (err) { |
967 | case 0: |
968 | this_cpu_inc(stats->stat[STAT_ASYNC_OK]); |
969 | break; |
970 | case -EINPROGRESS: |
971 | return; |
972 | default: |
973 | this_cpu_inc(stats->stat[STAT_ASYNC_NOK]); |
974 | break; |
975 | } |
976 | |
977 | kfree(objp: rx_ctx); |
978 | tipc_crypto_rcv_complete(net, aead, b, skb: &skb, err); |
979 | if (likely(skb)) { |
980 | if (likely(test_bit(0, &b->up))) |
981 | tipc_rcv(net, skb, b); |
982 | else |
983 | kfree_skb(skb); |
984 | } |
985 | |
986 | tipc_bearer_put(b); |
987 | } |
988 | |
989 | static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr) |
990 | { |
991 | return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE; |
992 | } |
993 | |
994 | /** |
995 | * tipc_ehdr_validate - Validate an encryption message |
996 | * @skb: the message buffer |
997 | * |
998 | * Return: "true" if this is a valid encryption message, otherwise "false" |
999 | */ |
1000 | bool tipc_ehdr_validate(struct sk_buff *skb) |
1001 | { |
1002 | struct tipc_ehdr *ehdr; |
1003 | int ehsz; |
1004 | |
1005 | if (unlikely(!pskb_may_pull(skb, EHDR_MIN_SIZE))) |
1006 | return false; |
1007 | |
1008 | ehdr = (struct tipc_ehdr *)skb->data; |
1009 | if (unlikely(ehdr->version != TIPC_EVERSION)) |
1010 | return false; |
1011 | ehsz = tipc_ehdr_size(ehdr); |
1012 | if (unlikely(!pskb_may_pull(skb, ehsz))) |
1013 | return false; |
1014 | if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE)) |
1015 | return false; |
1016 | |
1017 | return true; |
1018 | } |
1019 | |
1020 | /** |
1021 | * tipc_ehdr_build - Build TIPC encryption message header |
1022 | * @net: struct net |
1023 | * @aead: TX AEAD key to be used for the message encryption |
1024 | * @tx_key: key id used for the message encryption |
1025 | * @skb: input/output message skb |
1026 | * @__rx: RX crypto handle if dest is "known" |
1027 | * |
1028 | * Return: the header size if the building is successful, otherwise < 0 |
1029 | */ |
1030 | static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead, |
1031 | u8 tx_key, struct sk_buff *skb, |
1032 | struct tipc_crypto *__rx) |
1033 | { |
1034 | struct tipc_msg *hdr = buf_msg(skb); |
1035 | struct tipc_ehdr *ehdr; |
1036 | u32 user = msg_user(m: hdr); |
1037 | u64 seqno; |
1038 | int ehsz; |
1039 | |
1040 | /* Make room for encryption header */ |
1041 | ehsz = (user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE; |
1042 | WARN_ON(skb_headroom(skb) < ehsz); |
1043 | ehdr = (struct tipc_ehdr *)skb_push(skb, len: ehsz); |
1044 | |
1045 | /* Obtain a seqno first: |
1046 | * Use the key seqno (= cluster wise) if dest is unknown or we're in |
1047 | * cluster key mode, otherwise it's better for a per-peer seqno! |
1048 | */ |
1049 | if (!__rx || aead->mode == CLUSTER_KEY) |
1050 | seqno = atomic64_inc_return(v: &aead->seqno); |
1051 | else |
1052 | seqno = atomic64_inc_return(v: &__rx->sndnxt); |
1053 | |
1054 | /* Revoke the key if seqno is wrapped around */ |
1055 | if (unlikely(!seqno)) |
1056 | return tipc_crypto_key_revoke(net, tx_key); |
1057 | |
1058 | /* Word 1-2 */ |
1059 | ehdr->seqno = cpu_to_be64(seqno); |
1060 | |
1061 | /* Words 0, 3- */ |
1062 | ehdr->version = TIPC_EVERSION; |
1063 | ehdr->user = 0; |
1064 | ehdr->keepalive = 0; |
1065 | ehdr->tx_key = tx_key; |
1066 | ehdr->destined = (__rx) ? 1 : 0; |
1067 | ehdr->rx_key_active = (__rx) ? __rx->key.active : 0; |
1068 | ehdr->rx_nokey = (__rx) ? __rx->nokey : 0; |
1069 | ehdr->master_key = aead->crypto->key_master; |
1070 | ehdr->reserved_1 = 0; |
1071 | ehdr->reserved_2 = 0; |
1072 | |
1073 | switch (user) { |
1074 | case LINK_CONFIG: |
1075 | ehdr->user = LINK_CONFIG; |
1076 | memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN); |
1077 | break; |
1078 | default: |
1079 | if (user == LINK_PROTOCOL && msg_type(m: hdr) == STATE_MSG) { |
1080 | ehdr->user = LINK_PROTOCOL; |
1081 | ehdr->keepalive = msg_is_keepalive(m: hdr); |
1082 | } |
1083 | ehdr->addr = hdr->hdr[3]; |
1084 | break; |
1085 | } |
1086 | |
1087 | return ehsz; |
1088 | } |
1089 | |
1090 | static inline void tipc_crypto_key_set_state(struct tipc_crypto *c, |
1091 | u8 new_passive, |
1092 | u8 new_active, |
1093 | u8 new_pending) |
1094 | { |
1095 | struct tipc_key old = c->key; |
1096 | char buf[32]; |
1097 | |
1098 | c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) | |
1099 | ((new_active & KEY_MASK) << (KEY_BITS)) | |
1100 | ((new_pending & KEY_MASK)); |
1101 | |
1102 | pr_debug("%s: key changing %s ::%pS\n" , c->name, |
1103 | tipc_key_change_dump(old, c->key, buf), |
1104 | __builtin_return_address(0)); |
1105 | } |
1106 | |
1107 | /** |
1108 | * tipc_crypto_key_init - Initiate a new user / AEAD key |
1109 | * @c: TIPC crypto to which new key is attached |
1110 | * @ukey: the user key |
1111 | * @mode: the key mode (CLUSTER_KEY or PER_NODE_KEY) |
1112 | * @master_key: specify this is a cluster master key |
1113 | * |
1114 | * A new TIPC AEAD key will be allocated and initiated with the specified user |
1115 | * key, then attached to the TIPC crypto. |
1116 | * |
1117 | * Return: new key id in case of success, otherwise: < 0 |
1118 | */ |
1119 | int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey, |
1120 | u8 mode, bool master_key) |
1121 | { |
1122 | struct tipc_aead *aead = NULL; |
1123 | int rc = 0; |
1124 | |
1125 | /* Initiate with the new user key */ |
1126 | rc = tipc_aead_init(aead: &aead, ukey, mode); |
1127 | |
1128 | /* Attach it to the crypto */ |
1129 | if (likely(!rc)) { |
1130 | rc = tipc_crypto_key_attach(c, aead, pos: 0, master_key); |
1131 | if (rc < 0) |
1132 | tipc_aead_free(rp: &aead->rcu); |
1133 | } |
1134 | |
1135 | return rc; |
1136 | } |
1137 | |
1138 | /** |
1139 | * tipc_crypto_key_attach - Attach a new AEAD key to TIPC crypto |
1140 | * @c: TIPC crypto to which the new AEAD key is attached |
1141 | * @aead: the new AEAD key pointer |
1142 | * @pos: desired slot in the crypto key array, = 0 if any! |
1143 | * @master_key: specify this is a cluster master key |
1144 | * |
1145 | * Return: new key id in case of success, otherwise: -EBUSY |
1146 | */ |
1147 | static int tipc_crypto_key_attach(struct tipc_crypto *c, |
1148 | struct tipc_aead *aead, u8 pos, |
1149 | bool master_key) |
1150 | { |
1151 | struct tipc_key key; |
1152 | int rc = -EBUSY; |
1153 | u8 new_key; |
1154 | |
1155 | spin_lock_bh(lock: &c->lock); |
1156 | key = c->key; |
1157 | if (master_key) { |
1158 | new_key = KEY_MASTER; |
1159 | goto attach; |
1160 | } |
1161 | if (key.active && key.passive) |
1162 | goto exit; |
1163 | if (key.pending) { |
1164 | if (tipc_aead_users(aead: c->aead[key.pending]) > 0) |
1165 | goto exit; |
1166 | /* if (pos): ok with replacing, will be aligned when needed */ |
1167 | /* Replace it */ |
1168 | new_key = key.pending; |
1169 | } else { |
1170 | if (pos) { |
1171 | if (key.active && pos != key_next(key.active)) { |
1172 | key.passive = pos; |
1173 | new_key = pos; |
1174 | goto attach; |
1175 | } else if (!key.active && !key.passive) { |
1176 | key.pending = pos; |
1177 | new_key = pos; |
1178 | goto attach; |
1179 | } |
1180 | } |
1181 | key.pending = key_next(key.active ?: key.passive); |
1182 | new_key = key.pending; |
1183 | } |
1184 | |
1185 | attach: |
1186 | aead->crypto = c; |
1187 | aead->gen = (is_tx(c)) ? ++c->key_gen : c->key_gen; |
1188 | tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock); |
1189 | if (likely(c->key.keys != key.keys)) |
1190 | tipc_crypto_key_set_state(c, new_passive: key.passive, new_active: key.active, |
1191 | new_pending: key.pending); |
1192 | c->working = 1; |
1193 | c->nokey = 0; |
1194 | c->key_master |= master_key; |
1195 | rc = new_key; |
1196 | |
1197 | exit: |
1198 | spin_unlock_bh(lock: &c->lock); |
1199 | return rc; |
1200 | } |
1201 | |
1202 | void tipc_crypto_key_flush(struct tipc_crypto *c) |
1203 | { |
1204 | struct tipc_crypto *tx, *rx; |
1205 | int k; |
1206 | |
1207 | spin_lock_bh(lock: &c->lock); |
1208 | if (is_rx(c)) { |
1209 | /* Try to cancel pending work */ |
1210 | rx = c; |
1211 | tx = tipc_net(net: rx->net)->crypto_tx; |
1212 | if (cancel_delayed_work(dwork: &rx->work)) { |
1213 | kfree(objp: rx->skey); |
1214 | rx->skey = NULL; |
1215 | atomic_xchg(v: &rx->key_distr, new: 0); |
1216 | tipc_node_put(node: rx->node); |
1217 | } |
1218 | /* RX stopping => decrease TX key users if any */ |
1219 | k = atomic_xchg(v: &rx->peer_rx_active, new: 0); |
1220 | if (k) { |
1221 | tipc_aead_users_dec(aead: tx->aead[k], lim: 0); |
1222 | /* Mark the point TX key users changed */ |
1223 | tx->timer1 = jiffies; |
1224 | } |
1225 | } |
1226 | |
1227 | c->flags = 0; |
1228 | tipc_crypto_key_set_state(c, new_passive: 0, new_active: 0, new_pending: 0); |
1229 | for (k = KEY_MIN; k <= KEY_MAX; k++) |
1230 | tipc_crypto_key_detach(c->aead[k], &c->lock); |
1231 | atomic64_set(v: &c->sndnxt, i: 0); |
1232 | spin_unlock_bh(lock: &c->lock); |
1233 | } |
1234 | |
1235 | /** |
1236 | * tipc_crypto_key_try_align - Align RX keys if possible |
1237 | * @rx: RX crypto handle |
1238 | * @new_pending: new pending slot if aligned (= TX key from peer) |
1239 | * |
1240 | * Peer has used an unknown key slot, this only happens when peer has left and |
1241 | * rejoned, or we are newcomer. |
1242 | * That means, there must be no active key but a pending key at unaligned slot. |
1243 | * If so, we try to move the pending key to the new slot. |
1244 | * Note: A potential passive key can exist, it will be shifted correspondingly! |
1245 | * |
1246 | * Return: "true" if key is successfully aligned, otherwise "false" |
1247 | */ |
1248 | static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending) |
1249 | { |
1250 | struct tipc_aead *tmp1, *tmp2 = NULL; |
1251 | struct tipc_key key; |
1252 | bool aligned = false; |
1253 | u8 new_passive = 0; |
1254 | int x; |
1255 | |
1256 | spin_lock(lock: &rx->lock); |
1257 | key = rx->key; |
1258 | if (key.pending == new_pending) { |
1259 | aligned = true; |
1260 | goto exit; |
1261 | } |
1262 | if (key.active) |
1263 | goto exit; |
1264 | if (!key.pending) |
1265 | goto exit; |
1266 | if (tipc_aead_users(aead: rx->aead[key.pending]) > 0) |
1267 | goto exit; |
1268 | |
1269 | /* Try to "isolate" this pending key first */ |
1270 | tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock); |
1271 | if (!refcount_dec_if_one(r: &tmp1->refcnt)) |
1272 | goto exit; |
1273 | rcu_assign_pointer(rx->aead[key.pending], NULL); |
1274 | |
1275 | /* Move passive key if any */ |
1276 | if (key.passive) { |
1277 | tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock)); |
1278 | x = (key.passive - key.pending + new_pending) % KEY_MAX; |
1279 | new_passive = (x <= 0) ? x + KEY_MAX : x; |
1280 | } |
1281 | |
1282 | /* Re-allocate the key(s) */ |
1283 | tipc_crypto_key_set_state(c: rx, new_passive, new_active: 0, new_pending); |
1284 | rcu_assign_pointer(rx->aead[new_pending], tmp1); |
1285 | if (new_passive) |
1286 | rcu_assign_pointer(rx->aead[new_passive], tmp2); |
1287 | refcount_set(r: &tmp1->refcnt, n: 1); |
1288 | aligned = true; |
1289 | pr_info_ratelimited("%s: key[%d] -> key[%d]\n" , rx->name, key.pending, |
1290 | new_pending); |
1291 | |
1292 | exit: |
1293 | spin_unlock(lock: &rx->lock); |
1294 | return aligned; |
1295 | } |
1296 | |
1297 | /** |
1298 | * tipc_crypto_key_pick_tx - Pick one TX key for message decryption |
1299 | * @tx: TX crypto handle |
1300 | * @rx: RX crypto handle (can be NULL) |
1301 | * @skb: the message skb which will be decrypted later |
1302 | * @tx_key: peer TX key id |
1303 | * |
1304 | * This function looks up the existing TX keys and pick one which is suitable |
1305 | * for the message decryption, that must be a cluster key and not used before |
1306 | * on the same message (i.e. recursive). |
1307 | * |
1308 | * Return: the TX AEAD key handle in case of success, otherwise NULL |
1309 | */ |
1310 | static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx, |
1311 | struct tipc_crypto *rx, |
1312 | struct sk_buff *skb, |
1313 | u8 tx_key) |
1314 | { |
1315 | struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(skb); |
1316 | struct tipc_aead *aead = NULL; |
1317 | struct tipc_key key = tx->key; |
1318 | u8 k, i = 0; |
1319 | |
1320 | /* Initialize data if not yet */ |
1321 | if (!skb_cb->tx_clone_deferred) { |
1322 | skb_cb->tx_clone_deferred = 1; |
1323 | memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); |
1324 | } |
1325 | |
1326 | skb_cb->tx_clone_ctx.rx = rx; |
1327 | if (++skb_cb->tx_clone_ctx.recurs > 2) |
1328 | return NULL; |
1329 | |
1330 | /* Pick one TX key */ |
1331 | spin_lock(lock: &tx->lock); |
1332 | if (tx_key == KEY_MASTER) { |
1333 | aead = tipc_aead_rcu_ptr(tx->aead[KEY_MASTER], &tx->lock); |
1334 | goto done; |
1335 | } |
1336 | do { |
1337 | k = (i == 0) ? key.pending : |
1338 | ((i == 1) ? key.active : key.passive); |
1339 | if (!k) |
1340 | continue; |
1341 | aead = tipc_aead_rcu_ptr(tx->aead[k], &tx->lock); |
1342 | if (!aead) |
1343 | continue; |
1344 | if (aead->mode != CLUSTER_KEY || |
1345 | aead == skb_cb->tx_clone_ctx.last) { |
1346 | aead = NULL; |
1347 | continue; |
1348 | } |
1349 | /* Ok, found one cluster key */ |
1350 | skb_cb->tx_clone_ctx.last = aead; |
1351 | WARN_ON(skb->next); |
1352 | skb->next = skb_clone(skb, GFP_ATOMIC); |
1353 | if (unlikely(!skb->next)) |
1354 | pr_warn("Failed to clone skb for next round if any\n" ); |
1355 | break; |
1356 | } while (++i < 3); |
1357 | |
1358 | done: |
1359 | if (likely(aead)) |
1360 | WARN_ON(!refcount_inc_not_zero(&aead->refcnt)); |
1361 | spin_unlock(lock: &tx->lock); |
1362 | |
1363 | return aead; |
1364 | } |
1365 | |
1366 | /** |
1367 | * tipc_crypto_key_synch: Synch own key data according to peer key status |
1368 | * @rx: RX crypto handle |
1369 | * @skb: TIPCv2 message buffer (incl. the ehdr from peer) |
1370 | * |
1371 | * This function updates the peer node related data as the peer RX active key |
1372 | * has changed, so the number of TX keys' users on this node are increased and |
1373 | * decreased correspondingly. |
1374 | * |
1375 | * It also considers if peer has no key, then we need to make own master key |
1376 | * (if any) taking over i.e. starting grace period and also trigger key |
1377 | * distributing process. |
1378 | * |
1379 | * The "per-peer" sndnxt is also reset when the peer key has switched. |
1380 | */ |
1381 | static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb) |
1382 | { |
1383 | struct tipc_ehdr *ehdr = (struct tipc_ehdr *)skb_network_header(skb); |
1384 | struct tipc_crypto *tx = tipc_net(net: rx->net)->crypto_tx; |
1385 | struct tipc_msg *hdr = buf_msg(skb); |
1386 | u32 self = tipc_own_addr(net: rx->net); |
1387 | u8 cur, new; |
1388 | unsigned long delay; |
1389 | |
1390 | /* Update RX 'key_master' flag according to peer, also mark "legacy" if |
1391 | * a peer has no master key. |
1392 | */ |
1393 | rx->key_master = ehdr->master_key; |
1394 | if (!rx->key_master) |
1395 | tx->legacy_user = 1; |
1396 | |
1397 | /* For later cases, apply only if message is destined to this node */ |
1398 | if (!ehdr->destined || msg_short(m: hdr) || msg_destnode(m: hdr) != self) |
1399 | return; |
1400 | |
1401 | /* Case 1: Peer has no keys, let's make master key take over */ |
1402 | if (ehdr->rx_nokey) { |
1403 | /* Set or extend grace period */ |
1404 | tx->timer2 = jiffies; |
1405 | /* Schedule key distributing for the peer if not yet */ |
1406 | if (tx->key.keys && |
1407 | !atomic_cmpxchg(v: &rx->key_distr, old: 0, KEY_DISTR_SCHED)) { |
1408 | get_random_bytes(buf: &delay, len: 2); |
1409 | delay %= 5; |
1410 | delay = msecs_to_jiffies(m: 500 * ++delay); |
1411 | if (queue_delayed_work(wq: tx->wq, dwork: &rx->work, delay)) |
1412 | tipc_node_get(node: rx->node); |
1413 | } |
1414 | } else { |
1415 | /* Cancel a pending key distributing if any */ |
1416 | atomic_xchg(v: &rx->key_distr, new: 0); |
1417 | } |
1418 | |
1419 | /* Case 2: Peer RX active key has changed, let's update own TX users */ |
1420 | cur = atomic_read(v: &rx->peer_rx_active); |
1421 | new = ehdr->rx_key_active; |
1422 | if (tx->key.keys && |
1423 | cur != new && |
1424 | atomic_cmpxchg(v: &rx->peer_rx_active, old: cur, new) == cur) { |
1425 | if (new) |
1426 | tipc_aead_users_inc(aead: tx->aead[new], INT_MAX); |
1427 | if (cur) |
1428 | tipc_aead_users_dec(aead: tx->aead[cur], lim: 0); |
1429 | |
1430 | atomic64_set(v: &rx->sndnxt, i: 0); |
1431 | /* Mark the point TX key users changed */ |
1432 | tx->timer1 = jiffies; |
1433 | |
1434 | pr_debug("%s: key users changed %d-- %d++, peer %s\n" , |
1435 | tx->name, cur, new, rx->name); |
1436 | } |
1437 | } |
1438 | |
1439 | static int tipc_crypto_key_revoke(struct net *net, u8 tx_key) |
1440 | { |
1441 | struct tipc_crypto *tx = tipc_net(net)->crypto_tx; |
1442 | struct tipc_key key; |
1443 | |
1444 | spin_lock_bh(lock: &tx->lock); |
1445 | key = tx->key; |
1446 | WARN_ON(!key.active || tx_key != key.active); |
1447 | |
1448 | /* Free the active key */ |
1449 | tipc_crypto_key_set_state(c: tx, new_passive: key.passive, new_active: 0, new_pending: key.pending); |
1450 | tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); |
1451 | spin_unlock_bh(lock: &tx->lock); |
1452 | |
1453 | pr_warn("%s: key is revoked\n" , tx->name); |
1454 | return -EKEYREVOKED; |
1455 | } |
1456 | |
1457 | int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net, |
1458 | struct tipc_node *node) |
1459 | { |
1460 | struct tipc_crypto *c; |
1461 | |
1462 | if (*crypto) |
1463 | return -EEXIST; |
1464 | |
1465 | /* Allocate crypto */ |
1466 | c = kzalloc(size: sizeof(*c), GFP_ATOMIC); |
1467 | if (!c) |
1468 | return -ENOMEM; |
1469 | |
1470 | /* Allocate workqueue on TX */ |
1471 | if (!node) { |
1472 | c->wq = alloc_ordered_workqueue("tipc_crypto" , 0); |
1473 | if (!c->wq) { |
1474 | kfree(objp: c); |
1475 | return -ENOMEM; |
1476 | } |
1477 | } |
1478 | |
1479 | /* Allocate statistic structure */ |
1480 | c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC); |
1481 | if (!c->stats) { |
1482 | if (c->wq) |
1483 | destroy_workqueue(wq: c->wq); |
1484 | kfree_sensitive(objp: c); |
1485 | return -ENOMEM; |
1486 | } |
1487 | |
1488 | c->flags = 0; |
1489 | c->net = net; |
1490 | c->node = node; |
1491 | get_random_bytes(buf: &c->key_gen, len: 2); |
1492 | tipc_crypto_key_set_state(c, new_passive: 0, new_active: 0, new_pending: 0); |
1493 | atomic_set(v: &c->key_distr, i: 0); |
1494 | atomic_set(v: &c->peer_rx_active, i: 0); |
1495 | atomic64_set(v: &c->sndnxt, i: 0); |
1496 | c->timer1 = jiffies; |
1497 | c->timer2 = jiffies; |
1498 | c->rekeying_intv = TIPC_REKEYING_INTV_DEF; |
1499 | spin_lock_init(&c->lock); |
1500 | scnprintf(buf: c->name, size: 48, fmt: "%s(%s)" , (is_rx(c)) ? "RX" : "TX" , |
1501 | (is_rx(c)) ? tipc_node_get_id_str(node: c->node) : |
1502 | tipc_own_id_string(net: c->net)); |
1503 | |
1504 | if (is_rx(c)) |
1505 | INIT_DELAYED_WORK(&c->work, tipc_crypto_work_rx); |
1506 | else |
1507 | INIT_DELAYED_WORK(&c->work, tipc_crypto_work_tx); |
1508 | |
1509 | *crypto = c; |
1510 | return 0; |
1511 | } |
1512 | |
1513 | void tipc_crypto_stop(struct tipc_crypto **crypto) |
1514 | { |
1515 | struct tipc_crypto *c = *crypto; |
1516 | u8 k; |
1517 | |
1518 | if (!c) |
1519 | return; |
1520 | |
1521 | /* Flush any queued works & destroy wq */ |
1522 | if (is_tx(c)) { |
1523 | c->rekeying_intv = 0; |
1524 | cancel_delayed_work_sync(dwork: &c->work); |
1525 | destroy_workqueue(wq: c->wq); |
1526 | } |
1527 | |
1528 | /* Release AEAD keys */ |
1529 | rcu_read_lock(); |
1530 | for (k = KEY_MIN; k <= KEY_MAX; k++) |
1531 | tipc_aead_put(rcu_dereference(c->aead[k])); |
1532 | rcu_read_unlock(); |
1533 | pr_debug("%s: has been stopped\n" , c->name); |
1534 | |
1535 | /* Free this crypto statistics */ |
1536 | free_percpu(pdata: c->stats); |
1537 | |
1538 | *crypto = NULL; |
1539 | kfree_sensitive(objp: c); |
1540 | } |
1541 | |
1542 | void tipc_crypto_timeout(struct tipc_crypto *rx) |
1543 | { |
1544 | struct tipc_net *tn = tipc_net(net: rx->net); |
1545 | struct tipc_crypto *tx = tn->crypto_tx; |
1546 | struct tipc_key key; |
1547 | int cmd; |
1548 | |
1549 | /* TX pending: taking all users & stable -> active */ |
1550 | spin_lock(lock: &tx->lock); |
1551 | key = tx->key; |
1552 | if (key.active && tipc_aead_users(aead: tx->aead[key.active]) > 0) |
1553 | goto s1; |
1554 | if (!key.pending || tipc_aead_users(aead: tx->aead[key.pending]) <= 0) |
1555 | goto s1; |
1556 | if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_TIME)) |
1557 | goto s1; |
1558 | |
1559 | tipc_crypto_key_set_state(c: tx, new_passive: key.passive, new_active: key.pending, new_pending: 0); |
1560 | if (key.active) |
1561 | tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); |
1562 | this_cpu_inc(tx->stats->stat[STAT_SWITCHES]); |
1563 | pr_info("%s: key[%d] is activated\n" , tx->name, key.pending); |
1564 | |
1565 | s1: |
1566 | spin_unlock(lock: &tx->lock); |
1567 | |
1568 | /* RX pending: having user -> active */ |
1569 | spin_lock(lock: &rx->lock); |
1570 | key = rx->key; |
1571 | if (!key.pending || tipc_aead_users(aead: rx->aead[key.pending]) <= 0) |
1572 | goto s2; |
1573 | |
1574 | if (key.active) |
1575 | key.passive = key.active; |
1576 | key.active = key.pending; |
1577 | rx->timer2 = jiffies; |
1578 | tipc_crypto_key_set_state(c: rx, new_passive: key.passive, new_active: key.active, new_pending: 0); |
1579 | this_cpu_inc(rx->stats->stat[STAT_SWITCHES]); |
1580 | pr_info("%s: key[%d] is activated\n" , rx->name, key.pending); |
1581 | goto s5; |
1582 | |
1583 | s2: |
1584 | /* RX pending: not working -> remove */ |
1585 | if (!key.pending || tipc_aead_users(aead: rx->aead[key.pending]) > -10) |
1586 | goto s3; |
1587 | |
1588 | tipc_crypto_key_set_state(c: rx, new_passive: key.passive, new_active: key.active, new_pending: 0); |
1589 | tipc_crypto_key_detach(rx->aead[key.pending], &rx->lock); |
1590 | pr_debug("%s: key[%d] is removed\n" , rx->name, key.pending); |
1591 | goto s5; |
1592 | |
1593 | s3: |
1594 | /* RX active: timed out or no user -> pending */ |
1595 | if (!key.active) |
1596 | goto s4; |
1597 | if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM) && |
1598 | tipc_aead_users(aead: rx->aead[key.active]) > 0) |
1599 | goto s4; |
1600 | |
1601 | if (key.pending) |
1602 | key.passive = key.active; |
1603 | else |
1604 | key.pending = key.active; |
1605 | rx->timer2 = jiffies; |
1606 | tipc_crypto_key_set_state(c: rx, new_passive: key.passive, new_active: 0, new_pending: key.pending); |
1607 | tipc_aead_users_set(aead: rx->aead[key.pending], val: 0); |
1608 | pr_debug("%s: key[%d] is deactivated\n" , rx->name, key.active); |
1609 | goto s5; |
1610 | |
1611 | s4: |
1612 | /* RX passive: outdated or not working -> free */ |
1613 | if (!key.passive) |
1614 | goto s5; |
1615 | if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM) && |
1616 | tipc_aead_users(aead: rx->aead[key.passive]) > -10) |
1617 | goto s5; |
1618 | |
1619 | tipc_crypto_key_set_state(c: rx, new_passive: 0, new_active: key.active, new_pending: key.pending); |
1620 | tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock); |
1621 | pr_debug("%s: key[%d] is freed\n" , rx->name, key.passive); |
1622 | |
1623 | s5: |
1624 | spin_unlock(lock: &rx->lock); |
1625 | |
1626 | /* Relax it here, the flag will be set again if it really is, but only |
1627 | * when we are not in grace period for safety! |
1628 | */ |
1629 | if (time_after(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) |
1630 | tx->legacy_user = 0; |
1631 | |
1632 | /* Limit max_tfms & do debug commands if needed */ |
1633 | if (likely(sysctl_tipc_max_tfms <= TIPC_MAX_TFMS_LIM)) |
1634 | return; |
1635 | |
1636 | cmd = sysctl_tipc_max_tfms; |
1637 | sysctl_tipc_max_tfms = TIPC_MAX_TFMS_DEF; |
1638 | tipc_crypto_do_cmd(net: rx->net, cmd); |
1639 | } |
1640 | |
1641 | static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb, |
1642 | struct tipc_bearer *b, |
1643 | struct tipc_media_addr *dst, |
1644 | struct tipc_node *__dnode, u8 type) |
1645 | { |
1646 | struct sk_buff *skb; |
1647 | |
1648 | skb = skb_clone(skb: _skb, GFP_ATOMIC); |
1649 | if (skb) { |
1650 | TIPC_SKB_CB(skb)->xmit_type = type; |
1651 | tipc_crypto_xmit(net, skb: &skb, b, dst, __dnode); |
1652 | if (skb) |
1653 | b->media->send_msg(net, skb, b, dst); |
1654 | } |
1655 | } |
1656 | |
1657 | /** |
1658 | * tipc_crypto_xmit - Build & encrypt TIPC message for xmit |
1659 | * @net: struct net |
1660 | * @skb: input/output message skb pointer |
1661 | * @b: bearer used for xmit later |
1662 | * @dst: destination media address |
1663 | * @__dnode: destination node for reference if any |
1664 | * |
1665 | * First, build an encryption message header on the top of the message, then |
1666 | * encrypt the original TIPC message by using the pending, master or active |
1667 | * key with this preference order. |
1668 | * If the encryption is successful, the encrypted skb is returned directly or |
1669 | * via the callback. |
1670 | * Otherwise, the skb is freed! |
1671 | * |
1672 | * Return: |
1673 | * * 0 : the encryption has succeeded (or no encryption) |
1674 | * * -EINPROGRESS/-EBUSY : the encryption is ongoing, a callback will be made |
1675 | * * -ENOKEK : the encryption has failed due to no key |
1676 | * * -EKEYREVOKED : the encryption has failed due to key revoked |
1677 | * * -ENOMEM : the encryption has failed due to no memory |
1678 | * * < 0 : the encryption has failed due to other reasons |
1679 | */ |
1680 | int tipc_crypto_xmit(struct net *net, struct sk_buff **skb, |
1681 | struct tipc_bearer *b, struct tipc_media_addr *dst, |
1682 | struct tipc_node *__dnode) |
1683 | { |
1684 | struct tipc_crypto *__rx = tipc_node_crypto_rx(n: __dnode); |
1685 | struct tipc_crypto *tx = tipc_net(net)->crypto_tx; |
1686 | struct tipc_crypto_stats __percpu *stats = tx->stats; |
1687 | struct tipc_msg *hdr = buf_msg(skb: *skb); |
1688 | struct tipc_key key = tx->key; |
1689 | struct tipc_aead *aead = NULL; |
1690 | u32 user = msg_user(m: hdr); |
1691 | u32 type = msg_type(m: hdr); |
1692 | int rc = -ENOKEY; |
1693 | u8 tx_key = 0; |
1694 | |
1695 | /* No encryption? */ |
1696 | if (!tx->working) |
1697 | return 0; |
1698 | |
1699 | /* Pending key if peer has active on it or probing time */ |
1700 | if (unlikely(key.pending)) { |
1701 | tx_key = key.pending; |
1702 | if (!tx->key_master && !key.active) |
1703 | goto encrypt; |
1704 | if (__rx && atomic_read(v: &__rx->peer_rx_active) == tx_key) |
1705 | goto encrypt; |
1706 | if (TIPC_SKB_CB(*skb)->xmit_type == SKB_PROBING) { |
1707 | pr_debug("%s: probing for key[%d]\n" , tx->name, |
1708 | key.pending); |
1709 | goto encrypt; |
1710 | } |
1711 | if (user == LINK_CONFIG || user == LINK_PROTOCOL) |
1712 | tipc_crypto_clone_msg(net, skb: *skb, b, dst, __dnode, |
1713 | SKB_PROBING); |
1714 | } |
1715 | |
1716 | /* Master key if this is a *vital* message or in grace period */ |
1717 | if (tx->key_master) { |
1718 | tx_key = KEY_MASTER; |
1719 | if (!key.active) |
1720 | goto encrypt; |
1721 | if (TIPC_SKB_CB(*skb)->xmit_type == SKB_GRACING) { |
1722 | pr_debug("%s: gracing for msg (%d %d)\n" , tx->name, |
1723 | user, type); |
1724 | goto encrypt; |
1725 | } |
1726 | if (user == LINK_CONFIG || |
1727 | (user == LINK_PROTOCOL && type == RESET_MSG) || |
1728 | (user == MSG_CRYPTO && type == KEY_DISTR_MSG) || |
1729 | time_before(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) { |
1730 | if (__rx && __rx->key_master && |
1731 | !atomic_read(v: &__rx->peer_rx_active)) |
1732 | goto encrypt; |
1733 | if (!__rx) { |
1734 | if (likely(!tx->legacy_user)) |
1735 | goto encrypt; |
1736 | tipc_crypto_clone_msg(net, skb: *skb, b, dst, |
1737 | __dnode, SKB_GRACING); |
1738 | } |
1739 | } |
1740 | } |
1741 | |
1742 | /* Else, use the active key if any */ |
1743 | if (likely(key.active)) { |
1744 | tx_key = key.active; |
1745 | goto encrypt; |
1746 | } |
1747 | |
1748 | goto exit; |
1749 | |
1750 | encrypt: |
1751 | aead = tipc_aead_get(aead: tx->aead[tx_key]); |
1752 | if (unlikely(!aead)) |
1753 | goto exit; |
1754 | rc = tipc_ehdr_build(net, aead, tx_key, skb: *skb, __rx); |
1755 | if (likely(rc > 0)) |
1756 | rc = tipc_aead_encrypt(aead, skb: *skb, b, dst, __dnode); |
1757 | |
1758 | exit: |
1759 | switch (rc) { |
1760 | case 0: |
1761 | this_cpu_inc(stats->stat[STAT_OK]); |
1762 | break; |
1763 | case -EINPROGRESS: |
1764 | case -EBUSY: |
1765 | this_cpu_inc(stats->stat[STAT_ASYNC]); |
1766 | *skb = NULL; |
1767 | return rc; |
1768 | default: |
1769 | this_cpu_inc(stats->stat[STAT_NOK]); |
1770 | if (rc == -ENOKEY) |
1771 | this_cpu_inc(stats->stat[STAT_NOKEYS]); |
1772 | else if (rc == -EKEYREVOKED) |
1773 | this_cpu_inc(stats->stat[STAT_BADKEYS]); |
1774 | kfree_skb(skb: *skb); |
1775 | *skb = NULL; |
1776 | break; |
1777 | } |
1778 | |
1779 | tipc_aead_put(aead); |
1780 | return rc; |
1781 | } |
1782 | |
1783 | /** |
1784 | * tipc_crypto_rcv - Decrypt an encrypted TIPC message from peer |
1785 | * @net: struct net |
1786 | * @rx: RX crypto handle |
1787 | * @skb: input/output message skb pointer |
1788 | * @b: bearer where the message has been received |
1789 | * |
1790 | * If the decryption is successful, the decrypted skb is returned directly or |
1791 | * as the callback, the encryption header and auth tag will be trimed out |
1792 | * before forwarding to tipc_rcv() via the tipc_crypto_rcv_complete(). |
1793 | * Otherwise, the skb will be freed! |
1794 | * Note: RX key(s) can be re-aligned, or in case of no key suitable, TX |
1795 | * cluster key(s) can be taken for decryption (- recursive). |
1796 | * |
1797 | * Return: |
1798 | * * 0 : the decryption has successfully completed |
1799 | * * -EINPROGRESS/-EBUSY : the decryption is ongoing, a callback will be made |
1800 | * * -ENOKEY : the decryption has failed due to no key |
1801 | * * -EBADMSG : the decryption has failed due to bad message |
1802 | * * -ENOMEM : the decryption has failed due to no memory |
1803 | * * < 0 : the decryption has failed due to other reasons |
1804 | */ |
1805 | int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx, |
1806 | struct sk_buff **skb, struct tipc_bearer *b) |
1807 | { |
1808 | struct tipc_crypto *tx = tipc_net(net)->crypto_tx; |
1809 | struct tipc_crypto_stats __percpu *stats; |
1810 | struct tipc_aead *aead = NULL; |
1811 | struct tipc_key key; |
1812 | int rc = -ENOKEY; |
1813 | u8 tx_key, n; |
1814 | |
1815 | tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key; |
1816 | |
1817 | /* New peer? |
1818 | * Let's try with TX key (i.e. cluster mode) & verify the skb first! |
1819 | */ |
1820 | if (unlikely(!rx || tx_key == KEY_MASTER)) |
1821 | goto pick_tx; |
1822 | |
1823 | /* Pick RX key according to TX key if any */ |
1824 | key = rx->key; |
1825 | if (tx_key == key.active || tx_key == key.pending || |
1826 | tx_key == key.passive) |
1827 | goto decrypt; |
1828 | |
1829 | /* Unknown key, let's try to align RX key(s) */ |
1830 | if (tipc_crypto_key_try_align(rx, new_pending: tx_key)) |
1831 | goto decrypt; |
1832 | |
1833 | pick_tx: |
1834 | /* No key suitable? Try to pick one from TX... */ |
1835 | aead = tipc_crypto_key_pick_tx(tx, rx, skb: *skb, tx_key); |
1836 | if (aead) |
1837 | goto decrypt; |
1838 | goto exit; |
1839 | |
1840 | decrypt: |
1841 | rcu_read_lock(); |
1842 | if (!aead) |
1843 | aead = tipc_aead_get(aead: rx->aead[tx_key]); |
1844 | rc = tipc_aead_decrypt(net, aead, skb: *skb, b); |
1845 | rcu_read_unlock(); |
1846 | |
1847 | exit: |
1848 | stats = ((rx) ?: tx)->stats; |
1849 | switch (rc) { |
1850 | case 0: |
1851 | this_cpu_inc(stats->stat[STAT_OK]); |
1852 | break; |
1853 | case -EINPROGRESS: |
1854 | case -EBUSY: |
1855 | this_cpu_inc(stats->stat[STAT_ASYNC]); |
1856 | *skb = NULL; |
1857 | return rc; |
1858 | default: |
1859 | this_cpu_inc(stats->stat[STAT_NOK]); |
1860 | if (rc == -ENOKEY) { |
1861 | kfree_skb(skb: *skb); |
1862 | *skb = NULL; |
1863 | if (rx) { |
1864 | /* Mark rx->nokey only if we dont have a |
1865 | * pending received session key, nor a newer |
1866 | * one i.e. in the next slot. |
1867 | */ |
1868 | n = key_next(tx_key); |
1869 | rx->nokey = !(rx->skey || |
1870 | rcu_access_pointer(rx->aead[n])); |
1871 | pr_debug_ratelimited("%s: nokey %d, key %d/%x\n" , |
1872 | rx->name, rx->nokey, |
1873 | tx_key, rx->key.keys); |
1874 | tipc_node_put(node: rx->node); |
1875 | } |
1876 | this_cpu_inc(stats->stat[STAT_NOKEYS]); |
1877 | return rc; |
1878 | } else if (rc == -EBADMSG) { |
1879 | this_cpu_inc(stats->stat[STAT_BADMSGS]); |
1880 | } |
1881 | break; |
1882 | } |
1883 | |
1884 | tipc_crypto_rcv_complete(net, aead, b, skb, err: rc); |
1885 | return rc; |
1886 | } |
1887 | |
1888 | static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, |
1889 | struct tipc_bearer *b, |
1890 | struct sk_buff **skb, int err) |
1891 | { |
1892 | struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(*skb); |
1893 | struct tipc_crypto *rx = aead->crypto; |
1894 | struct tipc_aead *tmp = NULL; |
1895 | struct tipc_ehdr *ehdr; |
1896 | struct tipc_node *n; |
1897 | |
1898 | /* Is this completed by TX? */ |
1899 | if (unlikely(is_tx(aead->crypto))) { |
1900 | rx = skb_cb->tx_clone_ctx.rx; |
1901 | pr_debug("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n" , |
1902 | (rx) ? tipc_node_get_id_str(rx->node) : "-" , err, aead, |
1903 | (*skb)->next, skb_cb->flags); |
1904 | pr_debug("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n" , |
1905 | skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last, |
1906 | aead->crypto->aead[1], aead->crypto->aead[2], |
1907 | aead->crypto->aead[3]); |
1908 | if (unlikely(err)) { |
1909 | if (err == -EBADMSG && (*skb)->next) |
1910 | tipc_rcv(net, skb: (*skb)->next, b); |
1911 | goto free_skb; |
1912 | } |
1913 | |
1914 | if (likely((*skb)->next)) { |
1915 | kfree_skb(skb: (*skb)->next); |
1916 | (*skb)->next = NULL; |
1917 | } |
1918 | ehdr = (struct tipc_ehdr *)(*skb)->data; |
1919 | if (!rx) { |
1920 | WARN_ON(ehdr->user != LINK_CONFIG); |
1921 | n = tipc_node_create(net, addr: 0, peer_id: ehdr->id, capabilities: 0xffffu, hash_mixes: 0, |
1922 | preliminary: true); |
1923 | rx = tipc_node_crypto_rx(n: n); |
1924 | if (unlikely(!rx)) |
1925 | goto free_skb; |
1926 | } |
1927 | |
1928 | /* Ignore cloning if it was TX master key */ |
1929 | if (ehdr->tx_key == KEY_MASTER) |
1930 | goto rcv; |
1931 | if (tipc_aead_clone(dst: &tmp, src: aead) < 0) |
1932 | goto rcv; |
1933 | WARN_ON(!refcount_inc_not_zero(&tmp->refcnt)); |
1934 | if (tipc_crypto_key_attach(c: rx, aead: tmp, pos: ehdr->tx_key, master_key: false) < 0) { |
1935 | tipc_aead_free(rp: &tmp->rcu); |
1936 | goto rcv; |
1937 | } |
1938 | tipc_aead_put(aead); |
1939 | aead = tmp; |
1940 | } |
1941 | |
1942 | if (unlikely(err)) { |
1943 | tipc_aead_users_dec(aead: (struct tipc_aead __force __rcu *)aead, INT_MIN); |
1944 | goto free_skb; |
1945 | } |
1946 | |
1947 | /* Set the RX key's user */ |
1948 | tipc_aead_users_set(aead: (struct tipc_aead __force __rcu *)aead, val: 1); |
1949 | |
1950 | /* Mark this point, RX works */ |
1951 | rx->timer1 = jiffies; |
1952 | |
1953 | rcv: |
1954 | /* Remove ehdr & auth. tag prior to tipc_rcv() */ |
1955 | ehdr = (struct tipc_ehdr *)(*skb)->data; |
1956 | |
1957 | /* Mark this point, RX passive still works */ |
1958 | if (rx->key.passive && ehdr->tx_key == rx->key.passive) |
1959 | rx->timer2 = jiffies; |
1960 | |
1961 | skb_reset_network_header(skb: *skb); |
1962 | skb_pull(skb: *skb, len: tipc_ehdr_size(ehdr)); |
1963 | if (pskb_trim(skb: *skb, len: (*skb)->len - aead->authsize)) |
1964 | goto free_skb; |
1965 | |
1966 | /* Validate TIPCv2 message */ |
1967 | if (unlikely(!tipc_msg_validate(skb))) { |
1968 | pr_err_ratelimited("Packet dropped after decryption!\n" ); |
1969 | goto free_skb; |
1970 | } |
1971 | |
1972 | /* Ok, everything's fine, try to synch own keys according to peers' */ |
1973 | tipc_crypto_key_synch(rx, skb: *skb); |
1974 | |
1975 | /* Re-fetch skb cb as skb might be changed in tipc_msg_validate */ |
1976 | skb_cb = TIPC_SKB_CB(*skb); |
1977 | |
1978 | /* Mark skb decrypted */ |
1979 | skb_cb->decrypted = 1; |
1980 | |
1981 | /* Clear clone cxt if any */ |
1982 | if (likely(!skb_cb->tx_clone_deferred)) |
1983 | goto exit; |
1984 | skb_cb->tx_clone_deferred = 0; |
1985 | memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); |
1986 | goto exit; |
1987 | |
1988 | free_skb: |
1989 | kfree_skb(skb: *skb); |
1990 | *skb = NULL; |
1991 | |
1992 | exit: |
1993 | tipc_aead_put(aead); |
1994 | if (rx) |
1995 | tipc_node_put(node: rx->node); |
1996 | } |
1997 | |
1998 | static void tipc_crypto_do_cmd(struct net *net, int cmd) |
1999 | { |
2000 | struct tipc_net *tn = tipc_net(net); |
2001 | struct tipc_crypto *tx = tn->crypto_tx, *rx; |
2002 | struct list_head *p; |
2003 | unsigned int stat; |
2004 | int i, j, cpu; |
2005 | char buf[200]; |
2006 | |
2007 | /* Currently only one command is supported */ |
2008 | switch (cmd) { |
2009 | case 0xfff1: |
2010 | goto print_stats; |
2011 | default: |
2012 | return; |
2013 | } |
2014 | |
2015 | print_stats: |
2016 | /* Print a header */ |
2017 | pr_info("\n=============== TIPC Crypto Statistics ===============\n\n" ); |
2018 | |
2019 | /* Print key status */ |
2020 | pr_info("Key status:\n" ); |
2021 | pr_info("TX(%7.7s)\n%s" , tipc_own_id_string(net), |
2022 | tipc_crypto_key_dump(tx, buf)); |
2023 | |
2024 | rcu_read_lock(); |
2025 | for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { |
2026 | rx = tipc_node_crypto_rx_by_list(pos: p); |
2027 | pr_info("RX(%7.7s)\n%s" , tipc_node_get_id_str(rx->node), |
2028 | tipc_crypto_key_dump(rx, buf)); |
2029 | } |
2030 | rcu_read_unlock(); |
2031 | |
2032 | /* Print crypto statistics */ |
2033 | for (i = 0, j = 0; i < MAX_STATS; i++) |
2034 | j += scnprintf(buf: buf + j, size: 200 - j, fmt: "|%11s " , hstats[i]); |
2035 | pr_info("Counter %s" , buf); |
2036 | |
2037 | memset(buf, '-', 115); |
2038 | buf[115] = '\0'; |
2039 | pr_info("%s\n" , buf); |
2040 | |
2041 | j = scnprintf(buf, size: 200, fmt: "TX(%7.7s) " , tipc_own_id_string(net)); |
2042 | for_each_possible_cpu(cpu) { |
2043 | for (i = 0; i < MAX_STATS; i++) { |
2044 | stat = per_cpu_ptr(tx->stats, cpu)->stat[i]; |
2045 | j += scnprintf(buf: buf + j, size: 200 - j, fmt: "|%11d " , stat); |
2046 | } |
2047 | pr_info("%s" , buf); |
2048 | j = scnprintf(buf, size: 200, fmt: "%12s" , " " ); |
2049 | } |
2050 | |
2051 | rcu_read_lock(); |
2052 | for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { |
2053 | rx = tipc_node_crypto_rx_by_list(pos: p); |
2054 | j = scnprintf(buf, size: 200, fmt: "RX(%7.7s) " , |
2055 | tipc_node_get_id_str(node: rx->node)); |
2056 | for_each_possible_cpu(cpu) { |
2057 | for (i = 0; i < MAX_STATS; i++) { |
2058 | stat = per_cpu_ptr(rx->stats, cpu)->stat[i]; |
2059 | j += scnprintf(buf: buf + j, size: 200 - j, fmt: "|%11d " , |
2060 | stat); |
2061 | } |
2062 | pr_info("%s" , buf); |
2063 | j = scnprintf(buf, size: 200, fmt: "%12s" , " " ); |
2064 | } |
2065 | } |
2066 | rcu_read_unlock(); |
2067 | |
2068 | pr_info("\n======================== Done ========================\n" ); |
2069 | } |
2070 | |
2071 | static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf) |
2072 | { |
2073 | struct tipc_key key = c->key; |
2074 | struct tipc_aead *aead; |
2075 | int k, i = 0; |
2076 | char *s; |
2077 | |
2078 | for (k = KEY_MIN; k <= KEY_MAX; k++) { |
2079 | if (k == KEY_MASTER) { |
2080 | if (is_rx(c)) |
2081 | continue; |
2082 | if (time_before(jiffies, |
2083 | c->timer2 + TIPC_TX_GRACE_PERIOD)) |
2084 | s = "ACT" ; |
2085 | else |
2086 | s = "PAS" ; |
2087 | } else { |
2088 | if (k == key.passive) |
2089 | s = "PAS" ; |
2090 | else if (k == key.active) |
2091 | s = "ACT" ; |
2092 | else if (k == key.pending) |
2093 | s = "PEN" ; |
2094 | else |
2095 | s = "-" ; |
2096 | } |
2097 | i += scnprintf(buf: buf + i, size: 200 - i, fmt: "\tKey%d: %s" , k, s); |
2098 | |
2099 | rcu_read_lock(); |
2100 | aead = rcu_dereference(c->aead[k]); |
2101 | if (aead) |
2102 | i += scnprintf(buf: buf + i, size: 200 - i, |
2103 | fmt: "{\"0x...%s\", \"%s\"}/%d:%d" , |
2104 | aead->hint, |
2105 | (aead->mode == CLUSTER_KEY) ? "c" : "p" , |
2106 | atomic_read(v: &aead->users), |
2107 | refcount_read(r: &aead->refcnt)); |
2108 | rcu_read_unlock(); |
2109 | i += scnprintf(buf: buf + i, size: 200 - i, fmt: "\n" ); |
2110 | } |
2111 | |
2112 | if (is_rx(c)) |
2113 | i += scnprintf(buf: buf + i, size: 200 - i, fmt: "\tPeer RX active: %d\n" , |
2114 | atomic_read(v: &c->peer_rx_active)); |
2115 | |
2116 | return buf; |
2117 | } |
2118 | |
2119 | static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new, |
2120 | char *buf) |
2121 | { |
2122 | struct tipc_key *key = &old; |
2123 | int k, i = 0; |
2124 | char *s; |
2125 | |
2126 | /* Output format: "[%s %s %s] -> [%s %s %s]", max len = 32 */ |
2127 | again: |
2128 | i += scnprintf(buf: buf + i, size: 32 - i, fmt: "[" ); |
2129 | for (k = KEY_1; k <= KEY_3; k++) { |
2130 | if (k == key->passive) |
2131 | s = "pas" ; |
2132 | else if (k == key->active) |
2133 | s = "act" ; |
2134 | else if (k == key->pending) |
2135 | s = "pen" ; |
2136 | else |
2137 | s = "-" ; |
2138 | i += scnprintf(buf: buf + i, size: 32 - i, |
2139 | fmt: (k != KEY_3) ? "%s " : "%s" , s); |
2140 | } |
2141 | if (key != &new) { |
2142 | i += scnprintf(buf: buf + i, size: 32 - i, fmt: "] -> " ); |
2143 | key = &new; |
2144 | goto again; |
2145 | } |
2146 | i += scnprintf(buf: buf + i, size: 32 - i, fmt: "]" ); |
2147 | return buf; |
2148 | } |
2149 | |
2150 | /** |
2151 | * tipc_crypto_msg_rcv - Common 'MSG_CRYPTO' processing point |
2152 | * @net: the struct net |
2153 | * @skb: the receiving message buffer |
2154 | */ |
2155 | void tipc_crypto_msg_rcv(struct net *net, struct sk_buff *skb) |
2156 | { |
2157 | struct tipc_crypto *rx; |
2158 | struct tipc_msg *hdr; |
2159 | |
2160 | if (unlikely(skb_linearize(skb))) |
2161 | goto exit; |
2162 | |
2163 | hdr = buf_msg(skb); |
2164 | rx = tipc_node_crypto_rx_by_addr(net, addr: msg_prevnode(m: hdr)); |
2165 | if (unlikely(!rx)) |
2166 | goto exit; |
2167 | |
2168 | switch (msg_type(m: hdr)) { |
2169 | case KEY_DISTR_MSG: |
2170 | if (tipc_crypto_key_rcv(rx, hdr)) |
2171 | goto exit; |
2172 | break; |
2173 | default: |
2174 | break; |
2175 | } |
2176 | |
2177 | tipc_node_put(node: rx->node); |
2178 | |
2179 | exit: |
2180 | kfree_skb(skb); |
2181 | } |
2182 | |
2183 | /** |
2184 | * tipc_crypto_key_distr - Distribute a TX key |
2185 | * @tx: the TX crypto |
2186 | * @key: the key's index |
2187 | * @dest: the destination tipc node, = NULL if distributing to all nodes |
2188 | * |
2189 | * Return: 0 in case of success, otherwise < 0 |
2190 | */ |
2191 | int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key, |
2192 | struct tipc_node *dest) |
2193 | { |
2194 | struct tipc_aead *aead; |
2195 | u32 dnode = tipc_node_get_addr(node: dest); |
2196 | int rc = -ENOKEY; |
2197 | |
2198 | if (!sysctl_tipc_key_exchange_enabled) |
2199 | return 0; |
2200 | |
2201 | if (key) { |
2202 | rcu_read_lock(); |
2203 | aead = tipc_aead_get(aead: tx->aead[key]); |
2204 | if (likely(aead)) { |
2205 | rc = tipc_crypto_key_xmit(net: tx->net, skey: aead->key, |
2206 | gen: aead->gen, mode: aead->mode, |
2207 | dnode); |
2208 | tipc_aead_put(aead); |
2209 | } |
2210 | rcu_read_unlock(); |
2211 | } |
2212 | |
2213 | return rc; |
2214 | } |
2215 | |
2216 | /** |
2217 | * tipc_crypto_key_xmit - Send a session key |
2218 | * @net: the struct net |
2219 | * @skey: the session key to be sent |
2220 | * @gen: the key's generation |
2221 | * @mode: the key's mode |
2222 | * @dnode: the destination node address, = 0 if broadcasting to all nodes |
2223 | * |
2224 | * The session key 'skey' is packed in a TIPC v2 'MSG_CRYPTO/KEY_DISTR_MSG' |
2225 | * as its data section, then xmit-ed through the uc/bc link. |
2226 | * |
2227 | * Return: 0 in case of success, otherwise < 0 |
2228 | */ |
2229 | static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey, |
2230 | u16 gen, u8 mode, u32 dnode) |
2231 | { |
2232 | struct sk_buff_head pkts; |
2233 | struct tipc_msg *hdr; |
2234 | struct sk_buff *skb; |
2235 | u16 size, cong_link_cnt; |
2236 | u8 *data; |
2237 | int rc; |
2238 | |
2239 | size = tipc_aead_key_size(key: skey); |
2240 | skb = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC); |
2241 | if (!skb) |
2242 | return -ENOMEM; |
2243 | |
2244 | hdr = buf_msg(skb); |
2245 | tipc_msg_init(own_addr: tipc_own_addr(net), m: hdr, MSG_CRYPTO, KEY_DISTR_MSG, |
2246 | INT_H_SIZE, destnode: dnode); |
2247 | msg_set_size(m: hdr, INT_H_SIZE + size); |
2248 | msg_set_key_gen(m: hdr, gen); |
2249 | msg_set_key_mode(m: hdr, mode); |
2250 | |
2251 | data = msg_data(m: hdr); |
2252 | *((__be32 *)(data + TIPC_AEAD_ALG_NAME)) = htonl(skey->keylen); |
2253 | memcpy(data, skey->alg_name, TIPC_AEAD_ALG_NAME); |
2254 | memcpy(data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->key, |
2255 | skey->keylen); |
2256 | |
2257 | __skb_queue_head_init(list: &pkts); |
2258 | __skb_queue_tail(list: &pkts, newsk: skb); |
2259 | if (dnode) |
2260 | rc = tipc_node_xmit(net, list: &pkts, dnode, selector: 0); |
2261 | else |
2262 | rc = tipc_bcast_xmit(net, pkts: &pkts, cong_link_cnt: &cong_link_cnt); |
2263 | |
2264 | return rc; |
2265 | } |
2266 | |
2267 | /** |
2268 | * tipc_crypto_key_rcv - Receive a session key |
2269 | * @rx: the RX crypto |
2270 | * @hdr: the TIPC v2 message incl. the receiving session key in its data |
2271 | * |
2272 | * This function retrieves the session key in the message from peer, then |
2273 | * schedules a RX work to attach the key to the corresponding RX crypto. |
2274 | * |
2275 | * Return: "true" if the key has been scheduled for attaching, otherwise |
2276 | * "false". |
2277 | */ |
2278 | static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr) |
2279 | { |
2280 | struct tipc_crypto *tx = tipc_net(net: rx->net)->crypto_tx; |
2281 | struct tipc_aead_key *skey = NULL; |
2282 | u16 key_gen = msg_key_gen(m: hdr); |
2283 | u32 size = msg_data_sz(m: hdr); |
2284 | u8 *data = msg_data(m: hdr); |
2285 | unsigned int keylen; |
2286 | |
2287 | /* Verify whether the size can exist in the packet */ |
2288 | if (unlikely(size < sizeof(struct tipc_aead_key) + TIPC_AEAD_KEYLEN_MIN)) { |
2289 | pr_debug("%s: message data size is too small\n" , rx->name); |
2290 | goto exit; |
2291 | } |
2292 | |
2293 | keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME))); |
2294 | |
2295 | /* Verify the supplied size values */ |
2296 | if (unlikely(size != keylen + sizeof(struct tipc_aead_key) || |
2297 | keylen > TIPC_AEAD_KEY_SIZE_MAX)) { |
2298 | pr_debug("%s: invalid MSG_CRYPTO key size\n" , rx->name); |
2299 | goto exit; |
2300 | } |
2301 | |
2302 | spin_lock(lock: &rx->lock); |
2303 | if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) { |
2304 | pr_err("%s: key existed <%p>, gen %d vs %d\n" , rx->name, |
2305 | rx->skey, key_gen, rx->key_gen); |
2306 | goto exit_unlock; |
2307 | } |
2308 | |
2309 | /* Allocate memory for the key */ |
2310 | skey = kmalloc(size, GFP_ATOMIC); |
2311 | if (unlikely(!skey)) { |
2312 | pr_err("%s: unable to allocate memory for skey\n" , rx->name); |
2313 | goto exit_unlock; |
2314 | } |
2315 | |
2316 | /* Copy key from msg data */ |
2317 | skey->keylen = keylen; |
2318 | memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME); |
2319 | memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32), |
2320 | skey->keylen); |
2321 | |
2322 | rx->key_gen = key_gen; |
2323 | rx->skey_mode = msg_key_mode(m: hdr); |
2324 | rx->skey = skey; |
2325 | rx->nokey = 0; |
2326 | mb(); /* for nokey flag */ |
2327 | |
2328 | exit_unlock: |
2329 | spin_unlock(lock: &rx->lock); |
2330 | |
2331 | exit: |
2332 | /* Schedule the key attaching on this crypto */ |
2333 | if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0))) |
2334 | return true; |
2335 | |
2336 | return false; |
2337 | } |
2338 | |
2339 | /** |
2340 | * tipc_crypto_work_rx - Scheduled RX works handler |
2341 | * @work: the struct RX work |
2342 | * |
2343 | * The function processes the previous scheduled works i.e. distributing TX key |
2344 | * or attaching a received session key on RX crypto. |
2345 | */ |
2346 | static void tipc_crypto_work_rx(struct work_struct *work) |
2347 | { |
2348 | struct delayed_work *dwork = to_delayed_work(work); |
2349 | struct tipc_crypto *rx = container_of(dwork, struct tipc_crypto, work); |
2350 | struct tipc_crypto *tx = tipc_net(net: rx->net)->crypto_tx; |
2351 | unsigned long delay = msecs_to_jiffies(m: 5000); |
2352 | bool resched = false; |
2353 | u8 key; |
2354 | int rc; |
2355 | |
2356 | /* Case 1: Distribute TX key to peer if scheduled */ |
2357 | if (atomic_cmpxchg(v: &rx->key_distr, |
2358 | KEY_DISTR_SCHED, |
2359 | KEY_DISTR_COMPL) == KEY_DISTR_SCHED) { |
2360 | /* Always pick the newest one for distributing */ |
2361 | key = tx->key.pending ?: tx->key.active; |
2362 | rc = tipc_crypto_key_distr(tx, key, dest: rx->node); |
2363 | if (unlikely(rc)) |
2364 | pr_warn("%s: unable to distr key[%d] to %s, err %d\n" , |
2365 | tx->name, key, tipc_node_get_id_str(rx->node), |
2366 | rc); |
2367 | |
2368 | /* Sched for key_distr releasing */ |
2369 | resched = true; |
2370 | } else { |
2371 | atomic_cmpxchg(v: &rx->key_distr, KEY_DISTR_COMPL, new: 0); |
2372 | } |
2373 | |
2374 | /* Case 2: Attach a pending received session key from peer if any */ |
2375 | if (rx->skey) { |
2376 | rc = tipc_crypto_key_init(c: rx, ukey: rx->skey, mode: rx->skey_mode, master_key: false); |
2377 | if (unlikely(rc < 0)) |
2378 | pr_warn("%s: unable to attach received skey, err %d\n" , |
2379 | rx->name, rc); |
2380 | switch (rc) { |
2381 | case -EBUSY: |
2382 | case -ENOMEM: |
2383 | /* Resched the key attaching */ |
2384 | resched = true; |
2385 | break; |
2386 | default: |
2387 | synchronize_rcu(); |
2388 | kfree(objp: rx->skey); |
2389 | rx->skey = NULL; |
2390 | break; |
2391 | } |
2392 | } |
2393 | |
2394 | if (resched && queue_delayed_work(wq: tx->wq, dwork: &rx->work, delay)) |
2395 | return; |
2396 | |
2397 | tipc_node_put(node: rx->node); |
2398 | } |
2399 | |
2400 | /** |
2401 | * tipc_crypto_rekeying_sched - (Re)schedule rekeying w/o new interval |
2402 | * @tx: TX crypto |
2403 | * @changed: if the rekeying needs to be rescheduled with new interval |
2404 | * @new_intv: new rekeying interval (when "changed" = true) |
2405 | */ |
2406 | void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed, |
2407 | u32 new_intv) |
2408 | { |
2409 | unsigned long delay; |
2410 | bool now = false; |
2411 | |
2412 | if (changed) { |
2413 | if (new_intv == TIPC_REKEYING_NOW) |
2414 | now = true; |
2415 | else |
2416 | tx->rekeying_intv = new_intv; |
2417 | cancel_delayed_work_sync(dwork: &tx->work); |
2418 | } |
2419 | |
2420 | if (tx->rekeying_intv || now) { |
2421 | delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000; |
2422 | queue_delayed_work(wq: tx->wq, dwork: &tx->work, delay: msecs_to_jiffies(m: delay)); |
2423 | } |
2424 | } |
2425 | |
2426 | /** |
2427 | * tipc_crypto_work_tx - Scheduled TX works handler |
2428 | * @work: the struct TX work |
2429 | * |
2430 | * The function processes the previous scheduled work, i.e. key rekeying, by |
2431 | * generating a new session key based on current one, then attaching it to the |
2432 | * TX crypto and finally distributing it to peers. It also re-schedules the |
2433 | * rekeying if needed. |
2434 | */ |
2435 | static void tipc_crypto_work_tx(struct work_struct *work) |
2436 | { |
2437 | struct delayed_work *dwork = to_delayed_work(work); |
2438 | struct tipc_crypto *tx = container_of(dwork, struct tipc_crypto, work); |
2439 | struct tipc_aead_key *skey = NULL; |
2440 | struct tipc_key key = tx->key; |
2441 | struct tipc_aead *aead; |
2442 | int rc = -ENOMEM; |
2443 | |
2444 | if (unlikely(key.pending)) |
2445 | goto resched; |
2446 | |
2447 | /* Take current key as a template */ |
2448 | rcu_read_lock(); |
2449 | aead = rcu_dereference(tx->aead[key.active ?: KEY_MASTER]); |
2450 | if (unlikely(!aead)) { |
2451 | rcu_read_unlock(); |
2452 | /* At least one key should exist for securing */ |
2453 | return; |
2454 | } |
2455 | |
2456 | /* Lets duplicate it first */ |
2457 | skey = kmemdup(p: aead->key, size: tipc_aead_key_size(key: aead->key), GFP_ATOMIC); |
2458 | rcu_read_unlock(); |
2459 | |
2460 | /* Now, generate new key, initiate & distribute it */ |
2461 | if (likely(skey)) { |
2462 | rc = tipc_aead_key_generate(skey) ?: |
2463 | tipc_crypto_key_init(c: tx, ukey: skey, mode: PER_NODE_KEY, master_key: false); |
2464 | if (likely(rc > 0)) |
2465 | rc = tipc_crypto_key_distr(tx, key: rc, NULL); |
2466 | kfree_sensitive(objp: skey); |
2467 | } |
2468 | |
2469 | if (unlikely(rc)) |
2470 | pr_warn_ratelimited("%s: rekeying returns %d\n" , tx->name, rc); |
2471 | |
2472 | resched: |
2473 | /* Re-schedule rekeying if any */ |
2474 | tipc_crypto_rekeying_sched(tx, changed: false, new_intv: 0); |
2475 | } |
2476 | |