1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * virtio transport for vsock |
4 | * |
5 | * Copyright (C) 2013-2015 Red Hat, Inc. |
6 | * Author: Asias He <asias@redhat.com> |
7 | * Stefan Hajnoczi <stefanha@redhat.com> |
8 | * |
9 | * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s |
10 | * early virtio-vsock proof-of-concept bits. |
11 | */ |
12 | #include <linux/spinlock.h> |
13 | #include <linux/module.h> |
14 | #include <linux/list.h> |
15 | #include <linux/atomic.h> |
16 | #include <linux/virtio.h> |
17 | #include <linux/virtio_ids.h> |
18 | #include <linux/virtio_config.h> |
19 | #include <linux/virtio_vsock.h> |
20 | #include <net/sock.h> |
21 | #include <linux/mutex.h> |
22 | #include <net/af_vsock.h> |
23 | |
24 | static struct workqueue_struct *virtio_vsock_workqueue; |
25 | static struct virtio_vsock __rcu *the_virtio_vsock; |
26 | static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ |
27 | static struct virtio_transport virtio_transport; /* forward declaration */ |
28 | |
29 | struct virtio_vsock { |
30 | struct virtio_device *vdev; |
31 | struct virtqueue *vqs[VSOCK_VQ_MAX]; |
32 | |
33 | /* Virtqueue processing is deferred to a workqueue */ |
34 | struct work_struct tx_work; |
35 | struct work_struct rx_work; |
36 | struct work_struct event_work; |
37 | |
38 | /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX] |
39 | * must be accessed with tx_lock held. |
40 | */ |
41 | struct mutex tx_lock; |
42 | bool tx_run; |
43 | |
44 | struct work_struct send_pkt_work; |
45 | struct sk_buff_head send_pkt_queue; |
46 | |
47 | atomic_t queued_replies; |
48 | |
49 | /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX] |
50 | * must be accessed with rx_lock held. |
51 | */ |
52 | struct mutex rx_lock; |
53 | bool rx_run; |
54 | int rx_buf_nr; |
55 | int rx_buf_max_nr; |
56 | |
57 | /* The following fields are protected by event_lock. |
58 | * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held. |
59 | */ |
60 | struct mutex event_lock; |
61 | bool event_run; |
62 | struct virtio_vsock_event event_list[8]; |
63 | |
64 | u32 guest_cid; |
65 | bool seqpacket_allow; |
66 | |
67 | /* These fields are used only in tx path in function |
68 | * 'virtio_transport_send_pkt_work()', so to save |
69 | * stack space in it, place both of them here. Each |
70 | * pointer from 'out_sgs' points to the corresponding |
71 | * element in 'out_bufs' - this is initialized in |
72 | * 'virtio_vsock_probe()'. Both fields are protected |
73 | * by 'tx_lock'. +1 is needed for packet header. |
74 | */ |
75 | struct scatterlist *out_sgs[MAX_SKB_FRAGS + 1]; |
76 | struct scatterlist out_bufs[MAX_SKB_FRAGS + 1]; |
77 | }; |
78 | |
79 | static u32 virtio_transport_get_local_cid(void) |
80 | { |
81 | struct virtio_vsock *vsock; |
82 | u32 ret; |
83 | |
84 | rcu_read_lock(); |
85 | vsock = rcu_dereference(the_virtio_vsock); |
86 | if (!vsock) { |
87 | ret = VMADDR_CID_ANY; |
88 | goto out_rcu; |
89 | } |
90 | |
91 | ret = vsock->guest_cid; |
92 | out_rcu: |
93 | rcu_read_unlock(); |
94 | return ret; |
95 | } |
96 | |
97 | static void |
98 | virtio_transport_send_pkt_work(struct work_struct *work) |
99 | { |
100 | struct virtio_vsock *vsock = |
101 | container_of(work, struct virtio_vsock, send_pkt_work); |
102 | struct virtqueue *vq; |
103 | bool added = false; |
104 | bool restart_rx = false; |
105 | |
106 | mutex_lock(&vsock->tx_lock); |
107 | |
108 | if (!vsock->tx_run) |
109 | goto out; |
110 | |
111 | vq = vsock->vqs[VSOCK_VQ_TX]; |
112 | |
113 | for (;;) { |
114 | int ret, in_sg = 0, out_sg = 0; |
115 | struct scatterlist **sgs; |
116 | struct sk_buff *skb; |
117 | bool reply; |
118 | |
119 | skb = virtio_vsock_skb_dequeue(list: &vsock->send_pkt_queue); |
120 | if (!skb) |
121 | break; |
122 | |
123 | reply = virtio_vsock_skb_reply(skb); |
124 | sgs = vsock->out_sgs; |
125 | sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb), |
126 | sizeof(*virtio_vsock_hdr(skb))); |
127 | out_sg++; |
128 | |
129 | if (!skb_is_nonlinear(skb)) { |
130 | if (skb->len > 0) { |
131 | sg_init_one(sgs[out_sg], skb->data, skb->len); |
132 | out_sg++; |
133 | } |
134 | } else { |
135 | struct skb_shared_info *si; |
136 | int i; |
137 | |
138 | /* If skb is nonlinear, then its buffer must contain |
139 | * only header and nothing more. Data is stored in |
140 | * the fragged part. |
141 | */ |
142 | WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb))); |
143 | |
144 | si = skb_shinfo(skb); |
145 | |
146 | for (i = 0; i < si->nr_frags; i++) { |
147 | skb_frag_t *skb_frag = &si->frags[i]; |
148 | void *va; |
149 | |
150 | /* We will use 'page_to_virt()' for the userspace page |
151 | * here, because virtio or dma-mapping layers will call |
152 | * 'virt_to_phys()' later to fill the buffer descriptor. |
153 | * We don't touch memory at "virtual" address of this page. |
154 | */ |
155 | va = page_to_virt(skb_frag_page(skb_frag)); |
156 | sg_init_one(sgs[out_sg], |
157 | va + skb_frag_off(frag: skb_frag), |
158 | skb_frag_size(frag: skb_frag)); |
159 | out_sg++; |
160 | } |
161 | } |
162 | |
163 | ret = virtqueue_add_sgs(vq, sgs, out_sgs: out_sg, in_sgs: in_sg, data: skb, GFP_KERNEL); |
164 | /* Usually this means that there is no more space available in |
165 | * the vq |
166 | */ |
167 | if (ret < 0) { |
168 | virtio_vsock_skb_queue_head(list: &vsock->send_pkt_queue, skb); |
169 | break; |
170 | } |
171 | |
172 | virtio_transport_deliver_tap_pkt(skb); |
173 | |
174 | if (reply) { |
175 | struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; |
176 | int val; |
177 | |
178 | val = atomic_dec_return(v: &vsock->queued_replies); |
179 | |
180 | /* Do we now have resources to resume rx processing? */ |
181 | if (val + 1 == virtqueue_get_vring_size(vq: rx_vq)) |
182 | restart_rx = true; |
183 | } |
184 | |
185 | added = true; |
186 | } |
187 | |
188 | if (added) |
189 | virtqueue_kick(vq); |
190 | |
191 | out: |
192 | mutex_unlock(lock: &vsock->tx_lock); |
193 | |
194 | if (restart_rx) |
195 | queue_work(wq: virtio_vsock_workqueue, work: &vsock->rx_work); |
196 | } |
197 | |
198 | static int |
199 | virtio_transport_send_pkt(struct sk_buff *skb) |
200 | { |
201 | struct virtio_vsock_hdr *hdr; |
202 | struct virtio_vsock *vsock; |
203 | int len = skb->len; |
204 | |
205 | hdr = virtio_vsock_hdr(skb); |
206 | |
207 | rcu_read_lock(); |
208 | vsock = rcu_dereference(the_virtio_vsock); |
209 | if (!vsock) { |
210 | kfree_skb(skb); |
211 | len = -ENODEV; |
212 | goto out_rcu; |
213 | } |
214 | |
215 | if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) { |
216 | kfree_skb(skb); |
217 | len = -ENODEV; |
218 | goto out_rcu; |
219 | } |
220 | |
221 | if (virtio_vsock_skb_reply(skb)) |
222 | atomic_inc(v: &vsock->queued_replies); |
223 | |
224 | virtio_vsock_skb_queue_tail(list: &vsock->send_pkt_queue, skb); |
225 | queue_work(wq: virtio_vsock_workqueue, work: &vsock->send_pkt_work); |
226 | |
227 | out_rcu: |
228 | rcu_read_unlock(); |
229 | return len; |
230 | } |
231 | |
232 | static int |
233 | virtio_transport_cancel_pkt(struct vsock_sock *vsk) |
234 | { |
235 | struct virtio_vsock *vsock; |
236 | int cnt = 0, ret; |
237 | |
238 | rcu_read_lock(); |
239 | vsock = rcu_dereference(the_virtio_vsock); |
240 | if (!vsock) { |
241 | ret = -ENODEV; |
242 | goto out_rcu; |
243 | } |
244 | |
245 | cnt = virtio_transport_purge_skbs(vsk, list: &vsock->send_pkt_queue); |
246 | |
247 | if (cnt) { |
248 | struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; |
249 | int new_cnt; |
250 | |
251 | new_cnt = atomic_sub_return(i: cnt, v: &vsock->queued_replies); |
252 | if (new_cnt + cnt >= virtqueue_get_vring_size(vq: rx_vq) && |
253 | new_cnt < virtqueue_get_vring_size(vq: rx_vq)) |
254 | queue_work(wq: virtio_vsock_workqueue, work: &vsock->rx_work); |
255 | } |
256 | |
257 | ret = 0; |
258 | |
259 | out_rcu: |
260 | rcu_read_unlock(); |
261 | return ret; |
262 | } |
263 | |
264 | static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) |
265 | { |
266 | int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM; |
267 | struct scatterlist pkt, *p; |
268 | struct virtqueue *vq; |
269 | struct sk_buff *skb; |
270 | int ret; |
271 | |
272 | vq = vsock->vqs[VSOCK_VQ_RX]; |
273 | |
274 | do { |
275 | skb = virtio_vsock_alloc_skb(size: total_len, GFP_KERNEL); |
276 | if (!skb) |
277 | break; |
278 | |
279 | memset(skb->head, 0, VIRTIO_VSOCK_SKB_HEADROOM); |
280 | sg_init_one(&pkt, virtio_vsock_hdr(skb), total_len); |
281 | p = &pkt; |
282 | ret = virtqueue_add_sgs(vq, sgs: &p, out_sgs: 0, in_sgs: 1, data: skb, GFP_KERNEL); |
283 | if (ret < 0) { |
284 | kfree_skb(skb); |
285 | break; |
286 | } |
287 | |
288 | vsock->rx_buf_nr++; |
289 | } while (vq->num_free); |
290 | if (vsock->rx_buf_nr > vsock->rx_buf_max_nr) |
291 | vsock->rx_buf_max_nr = vsock->rx_buf_nr; |
292 | virtqueue_kick(vq); |
293 | } |
294 | |
295 | static void virtio_transport_tx_work(struct work_struct *work) |
296 | { |
297 | struct virtio_vsock *vsock = |
298 | container_of(work, struct virtio_vsock, tx_work); |
299 | struct virtqueue *vq; |
300 | bool added = false; |
301 | |
302 | vq = vsock->vqs[VSOCK_VQ_TX]; |
303 | mutex_lock(&vsock->tx_lock); |
304 | |
305 | if (!vsock->tx_run) |
306 | goto out; |
307 | |
308 | do { |
309 | struct sk_buff *skb; |
310 | unsigned int len; |
311 | |
312 | virtqueue_disable_cb(vq); |
313 | while ((skb = virtqueue_get_buf(vq, len: &len)) != NULL) { |
314 | consume_skb(skb); |
315 | added = true; |
316 | } |
317 | } while (!virtqueue_enable_cb(vq)); |
318 | |
319 | out: |
320 | mutex_unlock(lock: &vsock->tx_lock); |
321 | |
322 | if (added) |
323 | queue_work(wq: virtio_vsock_workqueue, work: &vsock->send_pkt_work); |
324 | } |
325 | |
326 | /* Is there space left for replies to rx packets? */ |
327 | static bool virtio_transport_more_replies(struct virtio_vsock *vsock) |
328 | { |
329 | struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX]; |
330 | int val; |
331 | |
332 | smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */ |
333 | val = atomic_read(v: &vsock->queued_replies); |
334 | |
335 | return val < virtqueue_get_vring_size(vq); |
336 | } |
337 | |
338 | /* event_lock must be held */ |
339 | static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock, |
340 | struct virtio_vsock_event *event) |
341 | { |
342 | struct scatterlist sg; |
343 | struct virtqueue *vq; |
344 | |
345 | vq = vsock->vqs[VSOCK_VQ_EVENT]; |
346 | |
347 | sg_init_one(&sg, event, sizeof(*event)); |
348 | |
349 | return virtqueue_add_inbuf(vq, sg: &sg, num: 1, data: event, GFP_KERNEL); |
350 | } |
351 | |
352 | /* event_lock must be held */ |
353 | static void virtio_vsock_event_fill(struct virtio_vsock *vsock) |
354 | { |
355 | size_t i; |
356 | |
357 | for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) { |
358 | struct virtio_vsock_event *event = &vsock->event_list[i]; |
359 | |
360 | virtio_vsock_event_fill_one(vsock, event); |
361 | } |
362 | |
363 | virtqueue_kick(vq: vsock->vqs[VSOCK_VQ_EVENT]); |
364 | } |
365 | |
366 | static void virtio_vsock_reset_sock(struct sock *sk) |
367 | { |
368 | /* vmci_transport.c doesn't take sk_lock here either. At least we're |
369 | * under vsock_table_lock so the sock cannot disappear while we're |
370 | * executing. |
371 | */ |
372 | |
373 | sk->sk_state = TCP_CLOSE; |
374 | sk->sk_err = ECONNRESET; |
375 | sk_error_report(sk); |
376 | } |
377 | |
378 | static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock) |
379 | { |
380 | struct virtio_device *vdev = vsock->vdev; |
381 | __le64 guest_cid; |
382 | |
383 | vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid), |
384 | &guest_cid, sizeof(guest_cid)); |
385 | vsock->guest_cid = le64_to_cpu(guest_cid); |
386 | } |
387 | |
388 | /* event_lock must be held */ |
389 | static void virtio_vsock_event_handle(struct virtio_vsock *vsock, |
390 | struct virtio_vsock_event *event) |
391 | { |
392 | switch (le32_to_cpu(event->id)) { |
393 | case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET: |
394 | virtio_vsock_update_guest_cid(vsock); |
395 | vsock_for_each_connected_socket(transport: &virtio_transport.transport, |
396 | fn: virtio_vsock_reset_sock); |
397 | break; |
398 | } |
399 | } |
400 | |
401 | static void virtio_transport_event_work(struct work_struct *work) |
402 | { |
403 | struct virtio_vsock *vsock = |
404 | container_of(work, struct virtio_vsock, event_work); |
405 | struct virtqueue *vq; |
406 | |
407 | vq = vsock->vqs[VSOCK_VQ_EVENT]; |
408 | |
409 | mutex_lock(&vsock->event_lock); |
410 | |
411 | if (!vsock->event_run) |
412 | goto out; |
413 | |
414 | do { |
415 | struct virtio_vsock_event *event; |
416 | unsigned int len; |
417 | |
418 | virtqueue_disable_cb(vq); |
419 | while ((event = virtqueue_get_buf(vq, len: &len)) != NULL) { |
420 | if (len == sizeof(*event)) |
421 | virtio_vsock_event_handle(vsock, event); |
422 | |
423 | virtio_vsock_event_fill_one(vsock, event); |
424 | } |
425 | } while (!virtqueue_enable_cb(vq)); |
426 | |
427 | virtqueue_kick(vq: vsock->vqs[VSOCK_VQ_EVENT]); |
428 | out: |
429 | mutex_unlock(lock: &vsock->event_lock); |
430 | } |
431 | |
432 | static void virtio_vsock_event_done(struct virtqueue *vq) |
433 | { |
434 | struct virtio_vsock *vsock = vq->vdev->priv; |
435 | |
436 | if (!vsock) |
437 | return; |
438 | queue_work(wq: virtio_vsock_workqueue, work: &vsock->event_work); |
439 | } |
440 | |
441 | static void virtio_vsock_tx_done(struct virtqueue *vq) |
442 | { |
443 | struct virtio_vsock *vsock = vq->vdev->priv; |
444 | |
445 | if (!vsock) |
446 | return; |
447 | queue_work(wq: virtio_vsock_workqueue, work: &vsock->tx_work); |
448 | } |
449 | |
450 | static void virtio_vsock_rx_done(struct virtqueue *vq) |
451 | { |
452 | struct virtio_vsock *vsock = vq->vdev->priv; |
453 | |
454 | if (!vsock) |
455 | return; |
456 | queue_work(wq: virtio_vsock_workqueue, work: &vsock->rx_work); |
457 | } |
458 | |
459 | static bool virtio_transport_can_msgzerocopy(int bufs_num) |
460 | { |
461 | struct virtio_vsock *vsock; |
462 | bool res = false; |
463 | |
464 | rcu_read_lock(); |
465 | |
466 | vsock = rcu_dereference(the_virtio_vsock); |
467 | if (vsock) { |
468 | struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX]; |
469 | |
470 | /* Check that tx queue is large enough to keep whole |
471 | * data to send. This is needed, because when there is |
472 | * not enough free space in the queue, current skb to |
473 | * send will be reinserted to the head of tx list of |
474 | * the socket to retry transmission later, so if skb |
475 | * is bigger than whole queue, it will be reinserted |
476 | * again and again, thus blocking other skbs to be sent. |
477 | * Each page of the user provided buffer will be added |
478 | * as a single buffer to the tx virtqueue, so compare |
479 | * number of pages against maximum capacity of the queue. |
480 | */ |
481 | if (bufs_num <= vq->num_max) |
482 | res = true; |
483 | } |
484 | |
485 | rcu_read_unlock(); |
486 | |
487 | return res; |
488 | } |
489 | |
490 | static bool virtio_transport_msgzerocopy_allow(void) |
491 | { |
492 | return true; |
493 | } |
494 | |
495 | static bool virtio_transport_seqpacket_allow(u32 remote_cid); |
496 | |
497 | static struct virtio_transport virtio_transport = { |
498 | .transport = { |
499 | .module = THIS_MODULE, |
500 | |
501 | .get_local_cid = virtio_transport_get_local_cid, |
502 | |
503 | .init = virtio_transport_do_socket_init, |
504 | .destruct = virtio_transport_destruct, |
505 | .release = virtio_transport_release, |
506 | .connect = virtio_transport_connect, |
507 | .shutdown = virtio_transport_shutdown, |
508 | .cancel_pkt = virtio_transport_cancel_pkt, |
509 | |
510 | .dgram_bind = virtio_transport_dgram_bind, |
511 | .dgram_dequeue = virtio_transport_dgram_dequeue, |
512 | .dgram_enqueue = virtio_transport_dgram_enqueue, |
513 | .dgram_allow = virtio_transport_dgram_allow, |
514 | |
515 | .stream_dequeue = virtio_transport_stream_dequeue, |
516 | .stream_enqueue = virtio_transport_stream_enqueue, |
517 | .stream_has_data = virtio_transport_stream_has_data, |
518 | .stream_has_space = virtio_transport_stream_has_space, |
519 | .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, |
520 | .stream_is_active = virtio_transport_stream_is_active, |
521 | .stream_allow = virtio_transport_stream_allow, |
522 | |
523 | .seqpacket_dequeue = virtio_transport_seqpacket_dequeue, |
524 | .seqpacket_enqueue = virtio_transport_seqpacket_enqueue, |
525 | .seqpacket_allow = virtio_transport_seqpacket_allow, |
526 | .seqpacket_has_data = virtio_transport_seqpacket_has_data, |
527 | |
528 | .msgzerocopy_allow = virtio_transport_msgzerocopy_allow, |
529 | |
530 | .notify_poll_in = virtio_transport_notify_poll_in, |
531 | .notify_poll_out = virtio_transport_notify_poll_out, |
532 | .notify_recv_init = virtio_transport_notify_recv_init, |
533 | .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, |
534 | .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, |
535 | .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, |
536 | .notify_send_init = virtio_transport_notify_send_init, |
537 | .notify_send_pre_block = virtio_transport_notify_send_pre_block, |
538 | .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, |
539 | .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, |
540 | .notify_buffer_size = virtio_transport_notify_buffer_size, |
541 | .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat, |
542 | |
543 | .read_skb = virtio_transport_read_skb, |
544 | }, |
545 | |
546 | .send_pkt = virtio_transport_send_pkt, |
547 | .can_msgzerocopy = virtio_transport_can_msgzerocopy, |
548 | }; |
549 | |
550 | static bool virtio_transport_seqpacket_allow(u32 remote_cid) |
551 | { |
552 | struct virtio_vsock *vsock; |
553 | bool seqpacket_allow; |
554 | |
555 | seqpacket_allow = false; |
556 | rcu_read_lock(); |
557 | vsock = rcu_dereference(the_virtio_vsock); |
558 | if (vsock) |
559 | seqpacket_allow = vsock->seqpacket_allow; |
560 | rcu_read_unlock(); |
561 | |
562 | return seqpacket_allow; |
563 | } |
564 | |
565 | static void virtio_transport_rx_work(struct work_struct *work) |
566 | { |
567 | struct virtio_vsock *vsock = |
568 | container_of(work, struct virtio_vsock, rx_work); |
569 | struct virtqueue *vq; |
570 | |
571 | vq = vsock->vqs[VSOCK_VQ_RX]; |
572 | |
573 | mutex_lock(&vsock->rx_lock); |
574 | |
575 | if (!vsock->rx_run) |
576 | goto out; |
577 | |
578 | do { |
579 | virtqueue_disable_cb(vq); |
580 | for (;;) { |
581 | struct sk_buff *skb; |
582 | unsigned int len; |
583 | |
584 | if (!virtio_transport_more_replies(vsock)) { |
585 | /* Stop rx until the device processes already |
586 | * pending replies. Leave rx virtqueue |
587 | * callbacks disabled. |
588 | */ |
589 | goto out; |
590 | } |
591 | |
592 | skb = virtqueue_get_buf(vq, len: &len); |
593 | if (!skb) |
594 | break; |
595 | |
596 | vsock->rx_buf_nr--; |
597 | |
598 | /* Drop short/long packets */ |
599 | if (unlikely(len < sizeof(struct virtio_vsock_hdr) || |
600 | len > virtio_vsock_skb_len(skb))) { |
601 | kfree_skb(skb); |
602 | continue; |
603 | } |
604 | |
605 | virtio_vsock_skb_rx_put(skb); |
606 | virtio_transport_deliver_tap_pkt(skb); |
607 | virtio_transport_recv_pkt(t: &virtio_transport, skb); |
608 | } |
609 | } while (!virtqueue_enable_cb(vq)); |
610 | |
611 | out: |
612 | if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) |
613 | virtio_vsock_rx_fill(vsock); |
614 | mutex_unlock(lock: &vsock->rx_lock); |
615 | } |
616 | |
617 | static int virtio_vsock_vqs_init(struct virtio_vsock *vsock) |
618 | { |
619 | struct virtio_device *vdev = vsock->vdev; |
620 | static const char * const names[] = { |
621 | "rx" , |
622 | "tx" , |
623 | "event" , |
624 | }; |
625 | vq_callback_t *callbacks[] = { |
626 | virtio_vsock_rx_done, |
627 | virtio_vsock_tx_done, |
628 | virtio_vsock_event_done, |
629 | }; |
630 | int ret; |
631 | |
632 | ret = virtio_find_vqs(vdev, nvqs: VSOCK_VQ_MAX, vqs: vsock->vqs, callbacks, names, |
633 | NULL); |
634 | if (ret < 0) |
635 | return ret; |
636 | |
637 | virtio_vsock_update_guest_cid(vsock); |
638 | |
639 | virtio_device_ready(dev: vdev); |
640 | |
641 | return 0; |
642 | } |
643 | |
644 | static void virtio_vsock_vqs_start(struct virtio_vsock *vsock) |
645 | { |
646 | mutex_lock(&vsock->tx_lock); |
647 | vsock->tx_run = true; |
648 | mutex_unlock(lock: &vsock->tx_lock); |
649 | |
650 | mutex_lock(&vsock->rx_lock); |
651 | virtio_vsock_rx_fill(vsock); |
652 | vsock->rx_run = true; |
653 | mutex_unlock(lock: &vsock->rx_lock); |
654 | |
655 | mutex_lock(&vsock->event_lock); |
656 | virtio_vsock_event_fill(vsock); |
657 | vsock->event_run = true; |
658 | mutex_unlock(lock: &vsock->event_lock); |
659 | |
660 | /* virtio_transport_send_pkt() can queue packets once |
661 | * the_virtio_vsock is set, but they won't be processed until |
662 | * vsock->tx_run is set to true. We queue vsock->send_pkt_work |
663 | * when initialization finishes to send those packets queued |
664 | * earlier. |
665 | * We don't need to queue the other workers (rx, event) because |
666 | * as long as we don't fill the queues with empty buffers, the |
667 | * host can't send us any notification. |
668 | */ |
669 | queue_work(wq: virtio_vsock_workqueue, work: &vsock->send_pkt_work); |
670 | } |
671 | |
672 | static void virtio_vsock_vqs_del(struct virtio_vsock *vsock) |
673 | { |
674 | struct virtio_device *vdev = vsock->vdev; |
675 | struct sk_buff *skb; |
676 | |
677 | /* Reset all connected sockets when the VQs disappear */ |
678 | vsock_for_each_connected_socket(transport: &virtio_transport.transport, |
679 | fn: virtio_vsock_reset_sock); |
680 | |
681 | /* Stop all work handlers to make sure no one is accessing the device, |
682 | * so we can safely call virtio_reset_device(). |
683 | */ |
684 | mutex_lock(&vsock->rx_lock); |
685 | vsock->rx_run = false; |
686 | mutex_unlock(lock: &vsock->rx_lock); |
687 | |
688 | mutex_lock(&vsock->tx_lock); |
689 | vsock->tx_run = false; |
690 | mutex_unlock(lock: &vsock->tx_lock); |
691 | |
692 | mutex_lock(&vsock->event_lock); |
693 | vsock->event_run = false; |
694 | mutex_unlock(lock: &vsock->event_lock); |
695 | |
696 | /* Flush all device writes and interrupts, device will not use any |
697 | * more buffers. |
698 | */ |
699 | virtio_reset_device(dev: vdev); |
700 | |
701 | mutex_lock(&vsock->rx_lock); |
702 | while ((skb = virtqueue_detach_unused_buf(vq: vsock->vqs[VSOCK_VQ_RX]))) |
703 | kfree_skb(skb); |
704 | mutex_unlock(lock: &vsock->rx_lock); |
705 | |
706 | mutex_lock(&vsock->tx_lock); |
707 | while ((skb = virtqueue_detach_unused_buf(vq: vsock->vqs[VSOCK_VQ_TX]))) |
708 | kfree_skb(skb); |
709 | mutex_unlock(lock: &vsock->tx_lock); |
710 | |
711 | virtio_vsock_skb_queue_purge(list: &vsock->send_pkt_queue); |
712 | |
713 | /* Delete virtqueues and flush outstanding callbacks if any */ |
714 | vdev->config->del_vqs(vdev); |
715 | } |
716 | |
717 | static int virtio_vsock_probe(struct virtio_device *vdev) |
718 | { |
719 | struct virtio_vsock *vsock = NULL; |
720 | int ret; |
721 | int i; |
722 | |
723 | ret = mutex_lock_interruptible(&the_virtio_vsock_mutex); |
724 | if (ret) |
725 | return ret; |
726 | |
727 | /* Only one virtio-vsock device per guest is supported */ |
728 | if (rcu_dereference_protected(the_virtio_vsock, |
729 | lockdep_is_held(&the_virtio_vsock_mutex))) { |
730 | ret = -EBUSY; |
731 | goto out; |
732 | } |
733 | |
734 | vsock = kzalloc(size: sizeof(*vsock), GFP_KERNEL); |
735 | if (!vsock) { |
736 | ret = -ENOMEM; |
737 | goto out; |
738 | } |
739 | |
740 | vsock->vdev = vdev; |
741 | |
742 | vsock->rx_buf_nr = 0; |
743 | vsock->rx_buf_max_nr = 0; |
744 | atomic_set(v: &vsock->queued_replies, i: 0); |
745 | |
746 | mutex_init(&vsock->tx_lock); |
747 | mutex_init(&vsock->rx_lock); |
748 | mutex_init(&vsock->event_lock); |
749 | skb_queue_head_init(list: &vsock->send_pkt_queue); |
750 | INIT_WORK(&vsock->rx_work, virtio_transport_rx_work); |
751 | INIT_WORK(&vsock->tx_work, virtio_transport_tx_work); |
752 | INIT_WORK(&vsock->event_work, virtio_transport_event_work); |
753 | INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); |
754 | |
755 | if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET)) |
756 | vsock->seqpacket_allow = true; |
757 | |
758 | vdev->priv = vsock; |
759 | |
760 | ret = virtio_vsock_vqs_init(vsock); |
761 | if (ret < 0) |
762 | goto out; |
763 | |
764 | for (i = 0; i < ARRAY_SIZE(vsock->out_sgs); i++) |
765 | vsock->out_sgs[i] = &vsock->out_bufs[i]; |
766 | |
767 | rcu_assign_pointer(the_virtio_vsock, vsock); |
768 | virtio_vsock_vqs_start(vsock); |
769 | |
770 | mutex_unlock(lock: &the_virtio_vsock_mutex); |
771 | |
772 | return 0; |
773 | |
774 | out: |
775 | kfree(objp: vsock); |
776 | mutex_unlock(lock: &the_virtio_vsock_mutex); |
777 | return ret; |
778 | } |
779 | |
780 | static void virtio_vsock_remove(struct virtio_device *vdev) |
781 | { |
782 | struct virtio_vsock *vsock = vdev->priv; |
783 | |
784 | mutex_lock(&the_virtio_vsock_mutex); |
785 | |
786 | vdev->priv = NULL; |
787 | rcu_assign_pointer(the_virtio_vsock, NULL); |
788 | synchronize_rcu(); |
789 | |
790 | virtio_vsock_vqs_del(vsock); |
791 | |
792 | /* Other works can be queued before 'config->del_vqs()', so we flush |
793 | * all works before to free the vsock object to avoid use after free. |
794 | */ |
795 | flush_work(work: &vsock->rx_work); |
796 | flush_work(work: &vsock->tx_work); |
797 | flush_work(work: &vsock->event_work); |
798 | flush_work(work: &vsock->send_pkt_work); |
799 | |
800 | mutex_unlock(lock: &the_virtio_vsock_mutex); |
801 | |
802 | kfree(objp: vsock); |
803 | } |
804 | |
805 | #ifdef CONFIG_PM_SLEEP |
806 | static int virtio_vsock_freeze(struct virtio_device *vdev) |
807 | { |
808 | struct virtio_vsock *vsock = vdev->priv; |
809 | |
810 | mutex_lock(&the_virtio_vsock_mutex); |
811 | |
812 | rcu_assign_pointer(the_virtio_vsock, NULL); |
813 | synchronize_rcu(); |
814 | |
815 | virtio_vsock_vqs_del(vsock); |
816 | |
817 | mutex_unlock(lock: &the_virtio_vsock_mutex); |
818 | |
819 | return 0; |
820 | } |
821 | |
822 | static int virtio_vsock_restore(struct virtio_device *vdev) |
823 | { |
824 | struct virtio_vsock *vsock = vdev->priv; |
825 | int ret; |
826 | |
827 | mutex_lock(&the_virtio_vsock_mutex); |
828 | |
829 | /* Only one virtio-vsock device per guest is supported */ |
830 | if (rcu_dereference_protected(the_virtio_vsock, |
831 | lockdep_is_held(&the_virtio_vsock_mutex))) { |
832 | ret = -EBUSY; |
833 | goto out; |
834 | } |
835 | |
836 | ret = virtio_vsock_vqs_init(vsock); |
837 | if (ret < 0) |
838 | goto out; |
839 | |
840 | rcu_assign_pointer(the_virtio_vsock, vsock); |
841 | virtio_vsock_vqs_start(vsock); |
842 | |
843 | out: |
844 | mutex_unlock(lock: &the_virtio_vsock_mutex); |
845 | return ret; |
846 | } |
847 | #endif /* CONFIG_PM_SLEEP */ |
848 | |
849 | static struct virtio_device_id id_table[] = { |
850 | { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID }, |
851 | { 0 }, |
852 | }; |
853 | |
854 | static unsigned int features[] = { |
855 | VIRTIO_VSOCK_F_SEQPACKET |
856 | }; |
857 | |
858 | static struct virtio_driver virtio_vsock_driver = { |
859 | .feature_table = features, |
860 | .feature_table_size = ARRAY_SIZE(features), |
861 | .driver.name = KBUILD_MODNAME, |
862 | .driver.owner = THIS_MODULE, |
863 | .id_table = id_table, |
864 | .probe = virtio_vsock_probe, |
865 | .remove = virtio_vsock_remove, |
866 | #ifdef CONFIG_PM_SLEEP |
867 | .freeze = virtio_vsock_freeze, |
868 | .restore = virtio_vsock_restore, |
869 | #endif |
870 | }; |
871 | |
872 | static int __init virtio_vsock_init(void) |
873 | { |
874 | int ret; |
875 | |
876 | virtio_vsock_workqueue = alloc_workqueue(fmt: "virtio_vsock" , flags: 0, max_active: 0); |
877 | if (!virtio_vsock_workqueue) |
878 | return -ENOMEM; |
879 | |
880 | ret = vsock_core_register(t: &virtio_transport.transport, |
881 | VSOCK_TRANSPORT_F_G2H); |
882 | if (ret) |
883 | goto out_wq; |
884 | |
885 | ret = register_virtio_driver(&virtio_vsock_driver); |
886 | if (ret) |
887 | goto out_vci; |
888 | |
889 | return 0; |
890 | |
891 | out_vci: |
892 | vsock_core_unregister(t: &virtio_transport.transport); |
893 | out_wq: |
894 | destroy_workqueue(wq: virtio_vsock_workqueue); |
895 | return ret; |
896 | } |
897 | |
898 | static void __exit virtio_vsock_exit(void) |
899 | { |
900 | unregister_virtio_driver(drv: &virtio_vsock_driver); |
901 | vsock_core_unregister(t: &virtio_transport.transport); |
902 | destroy_workqueue(wq: virtio_vsock_workqueue); |
903 | } |
904 | |
905 | module_init(virtio_vsock_init); |
906 | module_exit(virtio_vsock_exit); |
907 | MODULE_LICENSE("GPL v2" ); |
908 | MODULE_AUTHOR("Asias He" ); |
909 | MODULE_DESCRIPTION("virtio transport for vsock" ); |
910 | MODULE_DEVICE_TABLE(virtio, id_table); |
911 | |