1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. |
4 | */ |
5 | |
6 | #include "peer.h" |
7 | #include "device.h" |
8 | #include "queueing.h" |
9 | #include "timers.h" |
10 | #include "peerlookup.h" |
11 | #include "noise.h" |
12 | |
13 | #include <linux/kref.h> |
14 | #include <linux/lockdep.h> |
15 | #include <linux/rcupdate.h> |
16 | #include <linux/list.h> |
17 | |
18 | static struct kmem_cache *peer_cache; |
19 | static atomic64_t peer_counter = ATOMIC64_INIT(0); |
20 | |
21 | struct wg_peer *wg_peer_create(struct wg_device *wg, |
22 | const u8 public_key[NOISE_PUBLIC_KEY_LEN], |
23 | const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]) |
24 | { |
25 | struct wg_peer *peer; |
26 | int ret = -ENOMEM; |
27 | |
28 | lockdep_assert_held(&wg->device_update_lock); |
29 | |
30 | if (wg->num_peers >= MAX_PEERS_PER_DEVICE) |
31 | return ERR_PTR(error: ret); |
32 | |
33 | peer = kmem_cache_zalloc(k: peer_cache, GFP_KERNEL); |
34 | if (unlikely(!peer)) |
35 | return ERR_PTR(error: ret); |
36 | if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))) |
37 | goto err; |
38 | |
39 | peer->device = wg; |
40 | wg_noise_handshake_init(handshake: &peer->handshake, static_identity: &wg->static_identity, |
41 | peer_public_key: public_key, peer_preshared_key: preshared_key, peer); |
42 | peer->internal_id = atomic64_inc_return(v: &peer_counter); |
43 | peer->serial_work_cpu = nr_cpumask_bits; |
44 | wg_cookie_init(cookie: &peer->latest_cookie); |
45 | wg_timers_init(peer); |
46 | wg_cookie_checker_precompute_peer_keys(peer); |
47 | spin_lock_init(&peer->keypairs.keypair_update_lock); |
48 | INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker); |
49 | INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker); |
50 | wg_prev_queue_init(queue: &peer->tx_queue); |
51 | wg_prev_queue_init(queue: &peer->rx_queue); |
52 | rwlock_init(&peer->endpoint_lock); |
53 | kref_init(kref: &peer->refcount); |
54 | skb_queue_head_init(list: &peer->staged_packet_queue); |
55 | wg_noise_reset_last_sent_handshake(handshake_ns: &peer->last_sent_handshake); |
56 | set_bit(nr: NAPI_STATE_NO_BUSY_POLL, addr: &peer->napi.state); |
57 | netif_napi_add(dev: wg->dev, napi: &peer->napi, poll: wg_packet_rx_poll); |
58 | napi_enable(n: &peer->napi); |
59 | list_add_tail(new: &peer->peer_list, head: &wg->peer_list); |
60 | INIT_LIST_HEAD(list: &peer->allowedips_list); |
61 | wg_pubkey_hashtable_add(table: wg->peer_hashtable, peer); |
62 | ++wg->num_peers; |
63 | pr_debug("%s: Peer %llu created\n" , wg->dev->name, peer->internal_id); |
64 | return peer; |
65 | |
66 | err: |
67 | kmem_cache_free(s: peer_cache, objp: peer); |
68 | return ERR_PTR(error: ret); |
69 | } |
70 | |
71 | struct wg_peer *wg_peer_get_maybe_zero(struct wg_peer *peer) |
72 | { |
73 | RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(), |
74 | "Taking peer reference without holding the RCU read lock" ); |
75 | if (unlikely(!peer || !kref_get_unless_zero(&peer->refcount))) |
76 | return NULL; |
77 | return peer; |
78 | } |
79 | |
80 | static void peer_make_dead(struct wg_peer *peer) |
81 | { |
82 | /* Remove from configuration-time lookup structures. */ |
83 | list_del_init(entry: &peer->peer_list); |
84 | wg_allowedips_remove_by_peer(table: &peer->device->peer_allowedips, peer, |
85 | lock: &peer->device->device_update_lock); |
86 | wg_pubkey_hashtable_remove(table: peer->device->peer_hashtable, peer); |
87 | |
88 | /* Mark as dead, so that we don't allow jumping contexts after. */ |
89 | WRITE_ONCE(peer->is_dead, true); |
90 | |
91 | /* The caller must now synchronize_net() for this to take effect. */ |
92 | } |
93 | |
94 | static void peer_remove_after_dead(struct wg_peer *peer) |
95 | { |
96 | WARN_ON(!peer->is_dead); |
97 | |
98 | /* No more keypairs can be created for this peer, since is_dead protects |
99 | * add_new_keypair, so we can now destroy existing ones. |
100 | */ |
101 | wg_noise_keypairs_clear(keypairs: &peer->keypairs); |
102 | |
103 | /* Destroy all ongoing timers that were in-flight at the beginning of |
104 | * this function. |
105 | */ |
106 | wg_timers_stop(peer); |
107 | |
108 | /* The transition between packet encryption/decryption queues isn't |
109 | * guarded by is_dead, but each reference's life is strictly bounded by |
110 | * two generations: once for parallel crypto and once for serial |
111 | * ingestion, so we can simply flush twice, and be sure that we no |
112 | * longer have references inside these queues. |
113 | */ |
114 | |
115 | /* a) For encrypt/decrypt. */ |
116 | flush_workqueue(peer->device->packet_crypt_wq); |
117 | /* b.1) For send (but not receive, since that's napi). */ |
118 | flush_workqueue(peer->device->packet_crypt_wq); |
119 | /* b.2.1) For receive (but not send, since that's wq). */ |
120 | napi_disable(n: &peer->napi); |
121 | /* b.2.1) It's now safe to remove the napi struct, which must be done |
122 | * here from process context. |
123 | */ |
124 | netif_napi_del(napi: &peer->napi); |
125 | |
126 | /* Ensure any workstructs we own (like transmit_handshake_work or |
127 | * clear_peer_work) no longer are in use. |
128 | */ |
129 | flush_workqueue(peer->device->handshake_send_wq); |
130 | |
131 | /* After the above flushes, a peer might still be active in a few |
132 | * different contexts: 1) from xmit(), before hitting is_dead and |
133 | * returning, 2) from wg_packet_consume_data(), before hitting is_dead |
134 | * and returning, 3) from wg_receive_handshake_packet() after a point |
135 | * where it has processed an incoming handshake packet, but where |
136 | * all calls to pass it off to timers fails because of is_dead. We won't |
137 | * have new references in (1) eventually, because we're removed from |
138 | * allowedips; we won't have new references in (2) eventually, because |
139 | * wg_index_hashtable_lookup will always return NULL, since we removed |
140 | * all existing keypairs and no more can be created; we won't have new |
141 | * references in (3) eventually, because we're removed from the pubkey |
142 | * hash table, which allows for a maximum of one handshake response, |
143 | * via the still-uncleared index hashtable entry, but not more than one, |
144 | * and in wg_cookie_message_consume, the lookup eventually gets a peer |
145 | * with a refcount of zero, so no new reference is taken. |
146 | */ |
147 | |
148 | --peer->device->num_peers; |
149 | wg_peer_put(peer); |
150 | } |
151 | |
152 | /* We have a separate "remove" function make sure that all active places where |
153 | * a peer is currently operating will eventually come to an end and not pass |
154 | * their reference onto another context. |
155 | */ |
156 | void wg_peer_remove(struct wg_peer *peer) |
157 | { |
158 | if (unlikely(!peer)) |
159 | return; |
160 | lockdep_assert_held(&peer->device->device_update_lock); |
161 | |
162 | peer_make_dead(peer); |
163 | synchronize_net(); |
164 | peer_remove_after_dead(peer); |
165 | } |
166 | |
167 | void wg_peer_remove_all(struct wg_device *wg) |
168 | { |
169 | struct wg_peer *peer, *temp; |
170 | LIST_HEAD(dead_peers); |
171 | |
172 | lockdep_assert_held(&wg->device_update_lock); |
173 | |
174 | /* Avoid having to traverse individually for each one. */ |
175 | wg_allowedips_free(table: &wg->peer_allowedips, mutex: &wg->device_update_lock); |
176 | |
177 | list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) { |
178 | peer_make_dead(peer); |
179 | list_add_tail(new: &peer->peer_list, head: &dead_peers); |
180 | } |
181 | synchronize_net(); |
182 | list_for_each_entry_safe(peer, temp, &dead_peers, peer_list) |
183 | peer_remove_after_dead(peer); |
184 | } |
185 | |
186 | static void rcu_release(struct rcu_head *rcu) |
187 | { |
188 | struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu); |
189 | |
190 | dst_cache_destroy(dst_cache: &peer->endpoint_cache); |
191 | WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue)); |
192 | |
193 | /* The final zeroing takes care of clearing any remaining handshake key |
194 | * material and other potentially sensitive information. |
195 | */ |
196 | memzero_explicit(s: peer, count: sizeof(*peer)); |
197 | kmem_cache_free(s: peer_cache, objp: peer); |
198 | } |
199 | |
200 | static void kref_release(struct kref *refcount) |
201 | { |
202 | struct wg_peer *peer = container_of(refcount, struct wg_peer, refcount); |
203 | |
204 | pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n" , |
205 | peer->device->dev->name, peer->internal_id, |
206 | &peer->endpoint.addr); |
207 | |
208 | /* Remove ourself from dynamic runtime lookup structures, now that the |
209 | * last reference is gone. |
210 | */ |
211 | wg_index_hashtable_remove(table: peer->device->index_hashtable, |
212 | entry: &peer->handshake.entry); |
213 | |
214 | /* Remove any lingering packets that didn't have a chance to be |
215 | * transmitted. |
216 | */ |
217 | wg_packet_purge_staged_packets(peer); |
218 | |
219 | /* Free the memory used. */ |
220 | call_rcu(head: &peer->rcu, func: rcu_release); |
221 | } |
222 | |
223 | void wg_peer_put(struct wg_peer *peer) |
224 | { |
225 | if (unlikely(!peer)) |
226 | return; |
227 | kref_put(kref: &peer->refcount, release: kref_release); |
228 | } |
229 | |
230 | int __init wg_peer_init(void) |
231 | { |
232 | peer_cache = KMEM_CACHE(wg_peer, 0); |
233 | return peer_cache ? 0 : -ENOMEM; |
234 | } |
235 | |
236 | void wg_peer_uninit(void) |
237 | { |
238 | kmem_cache_destroy(s: peer_cache); |
239 | } |
240 | |