1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * VMware vSockets Driver |
4 | * |
5 | * Copyright (C) 2007-2013 VMware, Inc. All rights reserved. |
6 | */ |
7 | |
8 | #include <linux/types.h> |
9 | #include <linux/bitops.h> |
10 | #include <linux/cred.h> |
11 | #include <linux/init.h> |
12 | #include <linux/io.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/kmod.h> |
15 | #include <linux/list.h> |
16 | #include <linux/module.h> |
17 | #include <linux/mutex.h> |
18 | #include <linux/net.h> |
19 | #include <linux/poll.h> |
20 | #include <linux/skbuff.h> |
21 | #include <linux/smp.h> |
22 | #include <linux/socket.h> |
23 | #include <linux/stddef.h> |
24 | #include <linux/unistd.h> |
25 | #include <linux/wait.h> |
26 | #include <linux/workqueue.h> |
27 | #include <net/sock.h> |
28 | #include <net/af_vsock.h> |
29 | |
30 | #include "vmci_transport_notify.h" |
31 | |
32 | static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg); |
33 | static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg); |
34 | static void vmci_transport_peer_detach_cb(u32 sub_id, |
35 | const struct vmci_event_data *ed, |
36 | void *client_data); |
37 | static void vmci_transport_recv_pkt_work(struct work_struct *work); |
38 | static void vmci_transport_cleanup(struct work_struct *work); |
39 | static int vmci_transport_recv_listen(struct sock *sk, |
40 | struct vmci_transport_packet *pkt); |
41 | static int vmci_transport_recv_connecting_server( |
42 | struct sock *sk, |
43 | struct sock *pending, |
44 | struct vmci_transport_packet *pkt); |
45 | static int vmci_transport_recv_connecting_client( |
46 | struct sock *sk, |
47 | struct vmci_transport_packet *pkt); |
48 | static int vmci_transport_recv_connecting_client_negotiate( |
49 | struct sock *sk, |
50 | struct vmci_transport_packet *pkt); |
51 | static int vmci_transport_recv_connecting_client_invalid( |
52 | struct sock *sk, |
53 | struct vmci_transport_packet *pkt); |
54 | static int vmci_transport_recv_connected(struct sock *sk, |
55 | struct vmci_transport_packet *pkt); |
56 | static bool vmci_transport_old_proto_override(bool *old_pkt_proto); |
57 | static u16 vmci_transport_new_proto_supported_versions(void); |
58 | static bool vmci_transport_proto_to_notify_struct(struct sock *sk, u16 *proto, |
59 | bool old_pkt_proto); |
60 | static bool vmci_check_transport(struct vsock_sock *vsk); |
61 | |
62 | struct vmci_transport_recv_pkt_info { |
63 | struct work_struct work; |
64 | struct sock *sk; |
65 | struct vmci_transport_packet pkt; |
66 | }; |
67 | |
68 | static LIST_HEAD(vmci_transport_cleanup_list); |
69 | static DEFINE_SPINLOCK(vmci_transport_cleanup_lock); |
70 | static DECLARE_WORK(vmci_transport_cleanup_work, vmci_transport_cleanup); |
71 | |
72 | static struct vmci_handle vmci_transport_stream_handle = { VMCI_INVALID_ID, |
73 | VMCI_INVALID_ID }; |
74 | static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID; |
75 | |
76 | static int PROTOCOL_OVERRIDE = -1; |
77 | |
78 | static struct vsock_transport vmci_transport; /* forward declaration */ |
79 | |
80 | /* Helper function to convert from a VMCI error code to a VSock error code. */ |
81 | |
82 | static s32 vmci_transport_error_to_vsock_error(s32 vmci_error) |
83 | { |
84 | switch (vmci_error) { |
85 | case VMCI_ERROR_NO_MEM: |
86 | return -ENOMEM; |
87 | case VMCI_ERROR_DUPLICATE_ENTRY: |
88 | case VMCI_ERROR_ALREADY_EXISTS: |
89 | return -EADDRINUSE; |
90 | case VMCI_ERROR_NO_ACCESS: |
91 | return -EPERM; |
92 | case VMCI_ERROR_NO_RESOURCES: |
93 | return -ENOBUFS; |
94 | case VMCI_ERROR_INVALID_RESOURCE: |
95 | return -EHOSTUNREACH; |
96 | case VMCI_ERROR_INVALID_ARGS: |
97 | default: |
98 | break; |
99 | } |
100 | return -EINVAL; |
101 | } |
102 | |
103 | static u32 vmci_transport_peer_rid(u32 peer_cid) |
104 | { |
105 | if (VMADDR_CID_HYPERVISOR == peer_cid) |
106 | return VMCI_TRANSPORT_HYPERVISOR_PACKET_RID; |
107 | |
108 | return VMCI_TRANSPORT_PACKET_RID; |
109 | } |
110 | |
111 | static inline void |
112 | vmci_transport_packet_init(struct vmci_transport_packet *pkt, |
113 | struct sockaddr_vm *src, |
114 | struct sockaddr_vm *dst, |
115 | u8 type, |
116 | u64 size, |
117 | u64 mode, |
118 | struct vmci_transport_waiting_info *wait, |
119 | u16 proto, |
120 | struct vmci_handle handle) |
121 | { |
122 | /* We register the stream control handler as an any cid handle so we |
123 | * must always send from a source address of VMADDR_CID_ANY |
124 | */ |
125 | pkt->dg.src = vmci_make_handle(VMADDR_CID_ANY, |
126 | VMCI_TRANSPORT_PACKET_RID); |
127 | pkt->dg.dst = vmci_make_handle(dst->svm_cid, |
128 | vmci_transport_peer_rid(dst->svm_cid)); |
129 | pkt->dg.payload_size = sizeof(*pkt) - sizeof(pkt->dg); |
130 | pkt->version = VMCI_TRANSPORT_PACKET_VERSION; |
131 | pkt->type = type; |
132 | pkt->src_port = src->svm_port; |
133 | pkt->dst_port = dst->svm_port; |
134 | memset(&pkt->proto, 0, sizeof(pkt->proto)); |
135 | memset(&pkt->_reserved2, 0, sizeof(pkt->_reserved2)); |
136 | |
137 | switch (pkt->type) { |
138 | case VMCI_TRANSPORT_PACKET_TYPE_INVALID: |
139 | pkt->u.size = 0; |
140 | break; |
141 | |
142 | case VMCI_TRANSPORT_PACKET_TYPE_REQUEST: |
143 | case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE: |
144 | pkt->u.size = size; |
145 | break; |
146 | |
147 | case VMCI_TRANSPORT_PACKET_TYPE_OFFER: |
148 | case VMCI_TRANSPORT_PACKET_TYPE_ATTACH: |
149 | pkt->u.handle = handle; |
150 | break; |
151 | |
152 | case VMCI_TRANSPORT_PACKET_TYPE_WROTE: |
153 | case VMCI_TRANSPORT_PACKET_TYPE_READ: |
154 | case VMCI_TRANSPORT_PACKET_TYPE_RST: |
155 | pkt->u.size = 0; |
156 | break; |
157 | |
158 | case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN: |
159 | pkt->u.mode = mode; |
160 | break; |
161 | |
162 | case VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ: |
163 | case VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE: |
164 | memcpy(&pkt->u.wait, wait, sizeof(pkt->u.wait)); |
165 | break; |
166 | |
167 | case VMCI_TRANSPORT_PACKET_TYPE_REQUEST2: |
168 | case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2: |
169 | pkt->u.size = size; |
170 | pkt->proto = proto; |
171 | break; |
172 | } |
173 | } |
174 | |
175 | static inline void |
176 | vmci_transport_packet_get_addresses(struct vmci_transport_packet *pkt, |
177 | struct sockaddr_vm *local, |
178 | struct sockaddr_vm *remote) |
179 | { |
180 | vsock_addr_init(addr: local, cid: pkt->dg.dst.context, port: pkt->dst_port); |
181 | vsock_addr_init(addr: remote, cid: pkt->dg.src.context, port: pkt->src_port); |
182 | } |
183 | |
184 | static int |
185 | __vmci_transport_send_control_pkt(struct vmci_transport_packet *pkt, |
186 | struct sockaddr_vm *src, |
187 | struct sockaddr_vm *dst, |
188 | enum vmci_transport_packet_type type, |
189 | u64 size, |
190 | u64 mode, |
191 | struct vmci_transport_waiting_info *wait, |
192 | u16 proto, |
193 | struct vmci_handle handle, |
194 | bool convert_error) |
195 | { |
196 | int err; |
197 | |
198 | vmci_transport_packet_init(pkt, src, dst, type, size, mode, wait, |
199 | proto, handle); |
200 | err = vmci_datagram_send(msg: &pkt->dg); |
201 | if (convert_error && (err < 0)) |
202 | return vmci_transport_error_to_vsock_error(vmci_error: err); |
203 | |
204 | return err; |
205 | } |
206 | |
207 | static int |
208 | vmci_transport_reply_control_pkt_fast(struct vmci_transport_packet *pkt, |
209 | enum vmci_transport_packet_type type, |
210 | u64 size, |
211 | u64 mode, |
212 | struct vmci_transport_waiting_info *wait, |
213 | struct vmci_handle handle) |
214 | { |
215 | struct vmci_transport_packet reply; |
216 | struct sockaddr_vm src, dst; |
217 | |
218 | if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST) { |
219 | return 0; |
220 | } else { |
221 | vmci_transport_packet_get_addresses(pkt, local: &src, remote: &dst); |
222 | return __vmci_transport_send_control_pkt(pkt: &reply, src: &src, dst: &dst, |
223 | type, |
224 | size, mode, wait, |
225 | VSOCK_PROTO_INVALID, |
226 | handle, convert_error: true); |
227 | } |
228 | } |
229 | |
230 | static int |
231 | vmci_transport_send_control_pkt_bh(struct sockaddr_vm *src, |
232 | struct sockaddr_vm *dst, |
233 | enum vmci_transport_packet_type type, |
234 | u64 size, |
235 | u64 mode, |
236 | struct vmci_transport_waiting_info *wait, |
237 | struct vmci_handle handle) |
238 | { |
239 | /* Note that it is safe to use a single packet across all CPUs since |
240 | * two tasklets of the same type are guaranteed to not ever run |
241 | * simultaneously. If that ever changes, or VMCI stops using tasklets, |
242 | * we can use per-cpu packets. |
243 | */ |
244 | static struct vmci_transport_packet pkt; |
245 | |
246 | return __vmci_transport_send_control_pkt(pkt: &pkt, src, dst, type, |
247 | size, mode, wait, |
248 | VSOCK_PROTO_INVALID, handle, |
249 | convert_error: false); |
250 | } |
251 | |
252 | static int |
253 | vmci_transport_alloc_send_control_pkt(struct sockaddr_vm *src, |
254 | struct sockaddr_vm *dst, |
255 | enum vmci_transport_packet_type type, |
256 | u64 size, |
257 | u64 mode, |
258 | struct vmci_transport_waiting_info *wait, |
259 | u16 proto, |
260 | struct vmci_handle handle) |
261 | { |
262 | struct vmci_transport_packet *pkt; |
263 | int err; |
264 | |
265 | pkt = kmalloc(size: sizeof(*pkt), GFP_KERNEL); |
266 | if (!pkt) |
267 | return -ENOMEM; |
268 | |
269 | err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size, |
270 | mode, wait, proto, handle, |
271 | convert_error: true); |
272 | kfree(objp: pkt); |
273 | |
274 | return err; |
275 | } |
276 | |
277 | static int |
278 | vmci_transport_send_control_pkt(struct sock *sk, |
279 | enum vmci_transport_packet_type type, |
280 | u64 size, |
281 | u64 mode, |
282 | struct vmci_transport_waiting_info *wait, |
283 | u16 proto, |
284 | struct vmci_handle handle) |
285 | { |
286 | struct vsock_sock *vsk; |
287 | |
288 | vsk = vsock_sk(sk); |
289 | |
290 | if (!vsock_addr_bound(addr: &vsk->local_addr)) |
291 | return -EINVAL; |
292 | |
293 | if (!vsock_addr_bound(addr: &vsk->remote_addr)) |
294 | return -EINVAL; |
295 | |
296 | return vmci_transport_alloc_send_control_pkt(src: &vsk->local_addr, |
297 | dst: &vsk->remote_addr, |
298 | type, size, mode, |
299 | wait, proto, handle); |
300 | } |
301 | |
302 | static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst, |
303 | struct sockaddr_vm *src, |
304 | struct vmci_transport_packet *pkt) |
305 | { |
306 | if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST) |
307 | return 0; |
308 | return vmci_transport_send_control_pkt_bh( |
309 | src: dst, dst: src, |
310 | type: VMCI_TRANSPORT_PACKET_TYPE_RST, size: 0, |
311 | mode: 0, NULL, handle: VMCI_INVALID_HANDLE); |
312 | } |
313 | |
314 | static int vmci_transport_send_reset(struct sock *sk, |
315 | struct vmci_transport_packet *pkt) |
316 | { |
317 | struct sockaddr_vm *dst_ptr; |
318 | struct sockaddr_vm dst; |
319 | struct vsock_sock *vsk; |
320 | |
321 | if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST) |
322 | return 0; |
323 | |
324 | vsk = vsock_sk(sk); |
325 | |
326 | if (!vsock_addr_bound(addr: &vsk->local_addr)) |
327 | return -EINVAL; |
328 | |
329 | if (vsock_addr_bound(addr: &vsk->remote_addr)) { |
330 | dst_ptr = &vsk->remote_addr; |
331 | } else { |
332 | vsock_addr_init(addr: &dst, cid: pkt->dg.src.context, |
333 | port: pkt->src_port); |
334 | dst_ptr = &dst; |
335 | } |
336 | return vmci_transport_alloc_send_control_pkt(src: &vsk->local_addr, dst: dst_ptr, |
337 | type: VMCI_TRANSPORT_PACKET_TYPE_RST, |
338 | size: 0, mode: 0, NULL, VSOCK_PROTO_INVALID, |
339 | handle: VMCI_INVALID_HANDLE); |
340 | } |
341 | |
342 | static int vmci_transport_send_negotiate(struct sock *sk, size_t size) |
343 | { |
344 | return vmci_transport_send_control_pkt( |
345 | sk, |
346 | type: VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE, |
347 | size, mode: 0, NULL, |
348 | VSOCK_PROTO_INVALID, |
349 | handle: VMCI_INVALID_HANDLE); |
350 | } |
351 | |
352 | static int vmci_transport_send_negotiate2(struct sock *sk, size_t size, |
353 | u16 version) |
354 | { |
355 | return vmci_transport_send_control_pkt( |
356 | sk, |
357 | type: VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2, |
358 | size, mode: 0, NULL, proto: version, |
359 | handle: VMCI_INVALID_HANDLE); |
360 | } |
361 | |
362 | static int vmci_transport_send_qp_offer(struct sock *sk, |
363 | struct vmci_handle handle) |
364 | { |
365 | return vmci_transport_send_control_pkt( |
366 | sk, type: VMCI_TRANSPORT_PACKET_TYPE_OFFER, size: 0, |
367 | mode: 0, NULL, |
368 | VSOCK_PROTO_INVALID, handle); |
369 | } |
370 | |
371 | static int vmci_transport_send_attach(struct sock *sk, |
372 | struct vmci_handle handle) |
373 | { |
374 | return vmci_transport_send_control_pkt( |
375 | sk, type: VMCI_TRANSPORT_PACKET_TYPE_ATTACH, |
376 | size: 0, mode: 0, NULL, VSOCK_PROTO_INVALID, |
377 | handle); |
378 | } |
379 | |
380 | static int vmci_transport_reply_reset(struct vmci_transport_packet *pkt) |
381 | { |
382 | return vmci_transport_reply_control_pkt_fast( |
383 | pkt, |
384 | type: VMCI_TRANSPORT_PACKET_TYPE_RST, |
385 | size: 0, mode: 0, NULL, |
386 | handle: VMCI_INVALID_HANDLE); |
387 | } |
388 | |
389 | static int vmci_transport_send_invalid_bh(struct sockaddr_vm *dst, |
390 | struct sockaddr_vm *src) |
391 | { |
392 | return vmci_transport_send_control_pkt_bh( |
393 | src: dst, dst: src, |
394 | type: VMCI_TRANSPORT_PACKET_TYPE_INVALID, |
395 | size: 0, mode: 0, NULL, handle: VMCI_INVALID_HANDLE); |
396 | } |
397 | |
398 | int vmci_transport_send_wrote_bh(struct sockaddr_vm *dst, |
399 | struct sockaddr_vm *src) |
400 | { |
401 | return vmci_transport_send_control_pkt_bh( |
402 | src: dst, dst: src, |
403 | type: VMCI_TRANSPORT_PACKET_TYPE_WROTE, size: 0, |
404 | mode: 0, NULL, handle: VMCI_INVALID_HANDLE); |
405 | } |
406 | |
407 | int vmci_transport_send_read_bh(struct sockaddr_vm *dst, |
408 | struct sockaddr_vm *src) |
409 | { |
410 | return vmci_transport_send_control_pkt_bh( |
411 | src: dst, dst: src, |
412 | type: VMCI_TRANSPORT_PACKET_TYPE_READ, size: 0, |
413 | mode: 0, NULL, handle: VMCI_INVALID_HANDLE); |
414 | } |
415 | |
416 | int vmci_transport_send_wrote(struct sock *sk) |
417 | { |
418 | return vmci_transport_send_control_pkt( |
419 | sk, type: VMCI_TRANSPORT_PACKET_TYPE_WROTE, size: 0, |
420 | mode: 0, NULL, VSOCK_PROTO_INVALID, |
421 | handle: VMCI_INVALID_HANDLE); |
422 | } |
423 | |
424 | int vmci_transport_send_read(struct sock *sk) |
425 | { |
426 | return vmci_transport_send_control_pkt( |
427 | sk, type: VMCI_TRANSPORT_PACKET_TYPE_READ, size: 0, |
428 | mode: 0, NULL, VSOCK_PROTO_INVALID, |
429 | handle: VMCI_INVALID_HANDLE); |
430 | } |
431 | |
432 | int vmci_transport_send_waiting_write(struct sock *sk, |
433 | struct vmci_transport_waiting_info *wait) |
434 | { |
435 | return vmci_transport_send_control_pkt( |
436 | sk, type: VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE, |
437 | size: 0, mode: 0, wait, VSOCK_PROTO_INVALID, |
438 | handle: VMCI_INVALID_HANDLE); |
439 | } |
440 | |
441 | int vmci_transport_send_waiting_read(struct sock *sk, |
442 | struct vmci_transport_waiting_info *wait) |
443 | { |
444 | return vmci_transport_send_control_pkt( |
445 | sk, type: VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ, |
446 | size: 0, mode: 0, wait, VSOCK_PROTO_INVALID, |
447 | handle: VMCI_INVALID_HANDLE); |
448 | } |
449 | |
450 | static int vmci_transport_shutdown(struct vsock_sock *vsk, int mode) |
451 | { |
452 | return vmci_transport_send_control_pkt( |
453 | sk: &vsk->sk, |
454 | type: VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN, |
455 | size: 0, mode, NULL, |
456 | VSOCK_PROTO_INVALID, |
457 | handle: VMCI_INVALID_HANDLE); |
458 | } |
459 | |
460 | static int vmci_transport_send_conn_request(struct sock *sk, size_t size) |
461 | { |
462 | return vmci_transport_send_control_pkt(sk, |
463 | type: VMCI_TRANSPORT_PACKET_TYPE_REQUEST, |
464 | size, mode: 0, NULL, |
465 | VSOCK_PROTO_INVALID, |
466 | handle: VMCI_INVALID_HANDLE); |
467 | } |
468 | |
469 | static int vmci_transport_send_conn_request2(struct sock *sk, size_t size, |
470 | u16 version) |
471 | { |
472 | return vmci_transport_send_control_pkt( |
473 | sk, type: VMCI_TRANSPORT_PACKET_TYPE_REQUEST2, |
474 | size, mode: 0, NULL, proto: version, |
475 | handle: VMCI_INVALID_HANDLE); |
476 | } |
477 | |
478 | static struct sock *vmci_transport_get_pending( |
479 | struct sock *listener, |
480 | struct vmci_transport_packet *pkt) |
481 | { |
482 | struct vsock_sock *vlistener; |
483 | struct vsock_sock *vpending; |
484 | struct sock *pending; |
485 | struct sockaddr_vm src; |
486 | |
487 | vsock_addr_init(addr: &src, cid: pkt->dg.src.context, port: pkt->src_port); |
488 | |
489 | vlistener = vsock_sk(listener); |
490 | |
491 | list_for_each_entry(vpending, &vlistener->pending_links, |
492 | pending_links) { |
493 | if (vsock_addr_equals_addr(addr: &src, other: &vpending->remote_addr) && |
494 | pkt->dst_port == vpending->local_addr.svm_port) { |
495 | pending = sk_vsock(vpending); |
496 | sock_hold(sk: pending); |
497 | goto found; |
498 | } |
499 | } |
500 | |
501 | pending = NULL; |
502 | found: |
503 | return pending; |
504 | |
505 | } |
506 | |
507 | static void vmci_transport_release_pending(struct sock *pending) |
508 | { |
509 | sock_put(sk: pending); |
510 | } |
511 | |
512 | /* We allow two kinds of sockets to communicate with a restricted VM: 1) |
513 | * trusted sockets 2) sockets from applications running as the same user as the |
514 | * VM (this is only true for the host side and only when using hosted products) |
515 | */ |
516 | |
517 | static bool vmci_transport_is_trusted(struct vsock_sock *vsock, u32 peer_cid) |
518 | { |
519 | return vsock->trusted || |
520 | vmci_is_context_owner(context_id: peer_cid, uid: vsock->owner->uid); |
521 | } |
522 | |
523 | /* We allow sending datagrams to and receiving datagrams from a restricted VM |
524 | * only if it is trusted as described in vmci_transport_is_trusted. |
525 | */ |
526 | |
527 | static bool vmci_transport_allow_dgram(struct vsock_sock *vsock, u32 peer_cid) |
528 | { |
529 | if (VMADDR_CID_HYPERVISOR == peer_cid) |
530 | return true; |
531 | |
532 | if (vsock->cached_peer != peer_cid) { |
533 | vsock->cached_peer = peer_cid; |
534 | if (!vmci_transport_is_trusted(vsock, peer_cid) && |
535 | (vmci_context_get_priv_flags(context_id: peer_cid) & |
536 | VMCI_PRIVILEGE_FLAG_RESTRICTED)) { |
537 | vsock->cached_peer_allow_dgram = false; |
538 | } else { |
539 | vsock->cached_peer_allow_dgram = true; |
540 | } |
541 | } |
542 | |
543 | return vsock->cached_peer_allow_dgram; |
544 | } |
545 | |
546 | static int |
547 | vmci_transport_queue_pair_alloc(struct vmci_qp **qpair, |
548 | struct vmci_handle *handle, |
549 | u64 produce_size, |
550 | u64 consume_size, |
551 | u32 peer, u32 flags, bool trusted) |
552 | { |
553 | int err = 0; |
554 | |
555 | if (trusted) { |
556 | /* Try to allocate our queue pair as trusted. This will only |
557 | * work if vsock is running in the host. |
558 | */ |
559 | |
560 | err = vmci_qpair_alloc(qpair, handle, produce_qsize: produce_size, |
561 | consume_qsize: consume_size, |
562 | peer, flags, |
563 | priv_flags: VMCI_PRIVILEGE_FLAG_TRUSTED); |
564 | if (err != VMCI_ERROR_NO_ACCESS) |
565 | goto out; |
566 | |
567 | } |
568 | |
569 | err = vmci_qpair_alloc(qpair, handle, produce_qsize: produce_size, consume_qsize: consume_size, |
570 | peer, flags, priv_flags: VMCI_NO_PRIVILEGE_FLAGS); |
571 | out: |
572 | if (err < 0) { |
573 | pr_err_once("Could not attach to queue pair with %d\n" , err); |
574 | err = vmci_transport_error_to_vsock_error(vmci_error: err); |
575 | } |
576 | |
577 | return err; |
578 | } |
579 | |
580 | static int |
581 | vmci_transport_datagram_create_hnd(u32 resource_id, |
582 | u32 flags, |
583 | vmci_datagram_recv_cb recv_cb, |
584 | void *client_data, |
585 | struct vmci_handle *out_handle) |
586 | { |
587 | int err = 0; |
588 | |
589 | /* Try to allocate our datagram handler as trusted. This will only work |
590 | * if vsock is running in the host. |
591 | */ |
592 | |
593 | err = vmci_datagram_create_handle_priv(resource_id, flags, |
594 | priv_flags: VMCI_PRIVILEGE_FLAG_TRUSTED, |
595 | recv_cb, |
596 | client_data, out_handle); |
597 | |
598 | if (err == VMCI_ERROR_NO_ACCESS) |
599 | err = vmci_datagram_create_handle(resource_id, flags, |
600 | recv_cb, client_data, |
601 | out_handle); |
602 | |
603 | return err; |
604 | } |
605 | |
606 | /* This is invoked as part of a tasklet that's scheduled when the VMCI |
607 | * interrupt fires. This is run in bottom-half context and if it ever needs to |
608 | * sleep it should defer that work to a work queue. |
609 | */ |
610 | |
611 | static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg) |
612 | { |
613 | struct sock *sk; |
614 | size_t size; |
615 | struct sk_buff *skb; |
616 | struct vsock_sock *vsk; |
617 | |
618 | sk = (struct sock *)data; |
619 | |
620 | /* This handler is privileged when this module is running on the host. |
621 | * We will get datagrams from all endpoints (even VMs that are in a |
622 | * restricted context). If we get one from a restricted context then |
623 | * the destination socket must be trusted. |
624 | * |
625 | * NOTE: We access the socket struct without holding the lock here. |
626 | * This is ok because the field we are interested is never modified |
627 | * outside of the create and destruct socket functions. |
628 | */ |
629 | vsk = vsock_sk(sk); |
630 | if (!vmci_transport_allow_dgram(vsock: vsk, peer_cid: dg->src.context)) |
631 | return VMCI_ERROR_NO_ACCESS; |
632 | |
633 | size = VMCI_DG_SIZE(dg); |
634 | |
635 | /* Attach the packet to the socket's receive queue as an sk_buff. */ |
636 | skb = alloc_skb(size, GFP_ATOMIC); |
637 | if (!skb) |
638 | return VMCI_ERROR_NO_MEM; |
639 | |
640 | /* sk_receive_skb() will do a sock_put(), so hold here. */ |
641 | sock_hold(sk); |
642 | skb_put(skb, len: size); |
643 | memcpy(skb->data, dg, size); |
644 | sk_receive_skb(sk, skb, nested: 0); |
645 | |
646 | return VMCI_SUCCESS; |
647 | } |
648 | |
649 | static bool vmci_transport_stream_allow(u32 cid, u32 port) |
650 | { |
651 | static const u32 non_socket_contexts[] = { |
652 | VMADDR_CID_LOCAL, |
653 | }; |
654 | int i; |
655 | |
656 | BUILD_BUG_ON(sizeof(cid) != sizeof(*non_socket_contexts)); |
657 | |
658 | for (i = 0; i < ARRAY_SIZE(non_socket_contexts); i++) { |
659 | if (cid == non_socket_contexts[i]) |
660 | return false; |
661 | } |
662 | |
663 | return true; |
664 | } |
665 | |
666 | /* This is invoked as part of a tasklet that's scheduled when the VMCI |
667 | * interrupt fires. This is run in bottom-half context but it defers most of |
668 | * its work to the packet handling work queue. |
669 | */ |
670 | |
671 | static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg) |
672 | { |
673 | struct sock *sk; |
674 | struct sockaddr_vm dst; |
675 | struct sockaddr_vm src; |
676 | struct vmci_transport_packet *pkt; |
677 | struct vsock_sock *vsk; |
678 | bool bh_process_pkt; |
679 | int err; |
680 | |
681 | sk = NULL; |
682 | err = VMCI_SUCCESS; |
683 | bh_process_pkt = false; |
684 | |
685 | /* Ignore incoming packets from contexts without sockets, or resources |
686 | * that aren't vsock implementations. |
687 | */ |
688 | |
689 | if (!vmci_transport_stream_allow(cid: dg->src.context, port: -1) |
690 | || vmci_transport_peer_rid(peer_cid: dg->src.context) != dg->src.resource) |
691 | return VMCI_ERROR_NO_ACCESS; |
692 | |
693 | if (VMCI_DG_SIZE(dg) < sizeof(*pkt)) |
694 | /* Drop datagrams that do not contain full VSock packets. */ |
695 | return VMCI_ERROR_INVALID_ARGS; |
696 | |
697 | pkt = (struct vmci_transport_packet *)dg; |
698 | |
699 | /* Find the socket that should handle this packet. First we look for a |
700 | * connected socket and if there is none we look for a socket bound to |
701 | * the destintation address. |
702 | */ |
703 | vsock_addr_init(addr: &src, cid: pkt->dg.src.context, port: pkt->src_port); |
704 | vsock_addr_init(addr: &dst, cid: pkt->dg.dst.context, port: pkt->dst_port); |
705 | |
706 | sk = vsock_find_connected_socket(src: &src, dst: &dst); |
707 | if (!sk) { |
708 | sk = vsock_find_bound_socket(addr: &dst); |
709 | if (!sk) { |
710 | /* We could not find a socket for this specified |
711 | * address. If this packet is a RST, we just drop it. |
712 | * If it is another packet, we send a RST. Note that |
713 | * we do not send a RST reply to RSTs so that we do not |
714 | * continually send RSTs between two endpoints. |
715 | * |
716 | * Note that since this is a reply, dst is src and src |
717 | * is dst. |
718 | */ |
719 | if (vmci_transport_send_reset_bh(dst: &dst, src: &src, pkt) < 0) |
720 | pr_err("unable to send reset\n" ); |
721 | |
722 | err = VMCI_ERROR_NOT_FOUND; |
723 | goto out; |
724 | } |
725 | } |
726 | |
727 | /* If the received packet type is beyond all types known to this |
728 | * implementation, reply with an invalid message. Hopefully this will |
729 | * help when implementing backwards compatibility in the future. |
730 | */ |
731 | if (pkt->type >= VMCI_TRANSPORT_PACKET_TYPE_MAX) { |
732 | vmci_transport_send_invalid_bh(dst: &dst, src: &src); |
733 | err = VMCI_ERROR_INVALID_ARGS; |
734 | goto out; |
735 | } |
736 | |
737 | /* This handler is privileged when this module is running on the host. |
738 | * We will get datagram connect requests from all endpoints (even VMs |
739 | * that are in a restricted context). If we get one from a restricted |
740 | * context then the destination socket must be trusted. |
741 | * |
742 | * NOTE: We access the socket struct without holding the lock here. |
743 | * This is ok because the field we are interested is never modified |
744 | * outside of the create and destruct socket functions. |
745 | */ |
746 | vsk = vsock_sk(sk); |
747 | if (!vmci_transport_allow_dgram(vsock: vsk, peer_cid: pkt->dg.src.context)) { |
748 | err = VMCI_ERROR_NO_ACCESS; |
749 | goto out; |
750 | } |
751 | |
752 | /* We do most everything in a work queue, but let's fast path the |
753 | * notification of reads and writes to help data transfer performance. |
754 | * We can only do this if there is no process context code executing |
755 | * for this socket since that may change the state. |
756 | */ |
757 | bh_lock_sock(sk); |
758 | |
759 | if (!sock_owned_by_user(sk)) { |
760 | /* The local context ID may be out of date, update it. */ |
761 | vsk->local_addr.svm_cid = dst.svm_cid; |
762 | |
763 | if (sk->sk_state == TCP_ESTABLISHED) |
764 | vmci_trans(vsk)->notify_ops->handle_notify_pkt( |
765 | sk, pkt, true, &dst, &src, |
766 | &bh_process_pkt); |
767 | } |
768 | |
769 | bh_unlock_sock(sk); |
770 | |
771 | if (!bh_process_pkt) { |
772 | struct vmci_transport_recv_pkt_info *recv_pkt_info; |
773 | |
774 | recv_pkt_info = kmalloc(size: sizeof(*recv_pkt_info), GFP_ATOMIC); |
775 | if (!recv_pkt_info) { |
776 | if (vmci_transport_send_reset_bh(dst: &dst, src: &src, pkt) < 0) |
777 | pr_err("unable to send reset\n" ); |
778 | |
779 | err = VMCI_ERROR_NO_MEM; |
780 | goto out; |
781 | } |
782 | |
783 | recv_pkt_info->sk = sk; |
784 | memcpy(&recv_pkt_info->pkt, pkt, sizeof(recv_pkt_info->pkt)); |
785 | INIT_WORK(&recv_pkt_info->work, vmci_transport_recv_pkt_work); |
786 | |
787 | schedule_work(work: &recv_pkt_info->work); |
788 | /* Clear sk so that the reference count incremented by one of |
789 | * the Find functions above is not decremented below. We need |
790 | * that reference count for the packet handler we've scheduled |
791 | * to run. |
792 | */ |
793 | sk = NULL; |
794 | } |
795 | |
796 | out: |
797 | if (sk) |
798 | sock_put(sk); |
799 | |
800 | return err; |
801 | } |
802 | |
803 | static void vmci_transport_handle_detach(struct sock *sk) |
804 | { |
805 | struct vsock_sock *vsk; |
806 | |
807 | vsk = vsock_sk(sk); |
808 | if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) { |
809 | sock_set_flag(sk, flag: SOCK_DONE); |
810 | |
811 | /* On a detach the peer will not be sending or receiving |
812 | * anymore. |
813 | */ |
814 | vsk->peer_shutdown = SHUTDOWN_MASK; |
815 | |
816 | /* We should not be sending anymore since the peer won't be |
817 | * there to receive, but we can still receive if there is data |
818 | * left in our consume queue. If the local endpoint is a host, |
819 | * we can't call vsock_stream_has_data, since that may block, |
820 | * but a host endpoint can't read data once the VM has |
821 | * detached, so there is no available data in that case. |
822 | */ |
823 | if (vsk->local_addr.svm_cid == VMADDR_CID_HOST || |
824 | vsock_stream_has_data(vsk) <= 0) { |
825 | if (sk->sk_state == TCP_SYN_SENT) { |
826 | /* The peer may detach from a queue pair while |
827 | * we are still in the connecting state, i.e., |
828 | * if the peer VM is killed after attaching to |
829 | * a queue pair, but before we complete the |
830 | * handshake. In that case, we treat the detach |
831 | * event like a reset. |
832 | */ |
833 | |
834 | sk->sk_state = TCP_CLOSE; |
835 | sk->sk_err = ECONNRESET; |
836 | sk_error_report(sk); |
837 | return; |
838 | } |
839 | sk->sk_state = TCP_CLOSE; |
840 | } |
841 | sk->sk_state_change(sk); |
842 | } |
843 | } |
844 | |
845 | static void vmci_transport_peer_detach_cb(u32 sub_id, |
846 | const struct vmci_event_data *e_data, |
847 | void *client_data) |
848 | { |
849 | struct vmci_transport *trans = client_data; |
850 | const struct vmci_event_payload_qp *e_payload; |
851 | |
852 | e_payload = vmci_event_data_const_payload(ev_data: e_data); |
853 | |
854 | /* XXX This is lame, we should provide a way to lookup sockets by |
855 | * qp_handle. |
856 | */ |
857 | if (vmci_handle_is_invalid(h: e_payload->handle) || |
858 | !vmci_handle_is_equal(h1: trans->qp_handle, h2: e_payload->handle)) |
859 | return; |
860 | |
861 | /* We don't ask for delayed CBs when we subscribe to this event (we |
862 | * pass 0 as flags to vmci_event_subscribe()). VMCI makes no |
863 | * guarantees in that case about what context we might be running in, |
864 | * so it could be BH or process, blockable or non-blockable. So we |
865 | * need to account for all possible contexts here. |
866 | */ |
867 | spin_lock_bh(lock: &trans->lock); |
868 | if (!trans->sk) |
869 | goto out; |
870 | |
871 | /* Apart from here, trans->lock is only grabbed as part of sk destruct, |
872 | * where trans->sk isn't locked. |
873 | */ |
874 | bh_lock_sock(trans->sk); |
875 | |
876 | vmci_transport_handle_detach(sk: trans->sk); |
877 | |
878 | bh_unlock_sock(trans->sk); |
879 | out: |
880 | spin_unlock_bh(lock: &trans->lock); |
881 | } |
882 | |
883 | static void vmci_transport_qp_resumed_cb(u32 sub_id, |
884 | const struct vmci_event_data *e_data, |
885 | void *client_data) |
886 | { |
887 | vsock_for_each_connected_socket(transport: &vmci_transport, |
888 | fn: vmci_transport_handle_detach); |
889 | } |
890 | |
891 | static void vmci_transport_recv_pkt_work(struct work_struct *work) |
892 | { |
893 | struct vmci_transport_recv_pkt_info *recv_pkt_info; |
894 | struct vmci_transport_packet *pkt; |
895 | struct sock *sk; |
896 | |
897 | recv_pkt_info = |
898 | container_of(work, struct vmci_transport_recv_pkt_info, work); |
899 | sk = recv_pkt_info->sk; |
900 | pkt = &recv_pkt_info->pkt; |
901 | |
902 | lock_sock(sk); |
903 | |
904 | /* The local context ID may be out of date. */ |
905 | vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context; |
906 | |
907 | switch (sk->sk_state) { |
908 | case TCP_LISTEN: |
909 | vmci_transport_recv_listen(sk, pkt); |
910 | break; |
911 | case TCP_SYN_SENT: |
912 | /* Processing of pending connections for servers goes through |
913 | * the listening socket, so see vmci_transport_recv_listen() |
914 | * for that path. |
915 | */ |
916 | vmci_transport_recv_connecting_client(sk, pkt); |
917 | break; |
918 | case TCP_ESTABLISHED: |
919 | vmci_transport_recv_connected(sk, pkt); |
920 | break; |
921 | default: |
922 | /* Because this function does not run in the same context as |
923 | * vmci_transport_recv_stream_cb it is possible that the |
924 | * socket has closed. We need to let the other side know or it |
925 | * could be sitting in a connect and hang forever. Send a |
926 | * reset to prevent that. |
927 | */ |
928 | vmci_transport_send_reset(sk, pkt); |
929 | break; |
930 | } |
931 | |
932 | release_sock(sk); |
933 | kfree(objp: recv_pkt_info); |
934 | /* Release reference obtained in the stream callback when we fetched |
935 | * this socket out of the bound or connected list. |
936 | */ |
937 | sock_put(sk); |
938 | } |
939 | |
940 | static int vmci_transport_recv_listen(struct sock *sk, |
941 | struct vmci_transport_packet *pkt) |
942 | { |
943 | struct sock *pending; |
944 | struct vsock_sock *vpending; |
945 | int err; |
946 | u64 qp_size; |
947 | bool old_request = false; |
948 | bool old_pkt_proto = false; |
949 | |
950 | /* Because we are in the listen state, we could be receiving a packet |
951 | * for ourself or any previous connection requests that we received. |
952 | * If it's the latter, we try to find a socket in our list of pending |
953 | * connections and, if we do, call the appropriate handler for the |
954 | * state that socket is in. Otherwise we try to service the |
955 | * connection request. |
956 | */ |
957 | pending = vmci_transport_get_pending(listener: sk, pkt); |
958 | if (pending) { |
959 | lock_sock(sk: pending); |
960 | |
961 | /* The local context ID may be out of date. */ |
962 | vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context; |
963 | |
964 | switch (pending->sk_state) { |
965 | case TCP_SYN_SENT: |
966 | err = vmci_transport_recv_connecting_server(sk, |
967 | pending, |
968 | pkt); |
969 | break; |
970 | default: |
971 | vmci_transport_send_reset(sk: pending, pkt); |
972 | err = -EINVAL; |
973 | } |
974 | |
975 | if (err < 0) |
976 | vsock_remove_pending(listener: sk, pending); |
977 | |
978 | release_sock(sk: pending); |
979 | vmci_transport_release_pending(pending); |
980 | |
981 | return err; |
982 | } |
983 | |
984 | /* The listen state only accepts connection requests. Reply with a |
985 | * reset unless we received a reset. |
986 | */ |
987 | |
988 | if (!(pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST || |
989 | pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)) { |
990 | vmci_transport_reply_reset(pkt); |
991 | return -EINVAL; |
992 | } |
993 | |
994 | if (pkt->u.size == 0) { |
995 | vmci_transport_reply_reset(pkt); |
996 | return -EINVAL; |
997 | } |
998 | |
999 | /* If this socket can't accommodate this connection request, we send a |
1000 | * reset. Otherwise we create and initialize a child socket and reply |
1001 | * with a connection negotiation. |
1002 | */ |
1003 | if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog) { |
1004 | vmci_transport_reply_reset(pkt); |
1005 | return -ECONNREFUSED; |
1006 | } |
1007 | |
1008 | pending = vsock_create_connected(parent: sk); |
1009 | if (!pending) { |
1010 | vmci_transport_send_reset(sk, pkt); |
1011 | return -ENOMEM; |
1012 | } |
1013 | |
1014 | vpending = vsock_sk(pending); |
1015 | |
1016 | vsock_addr_init(addr: &vpending->local_addr, cid: pkt->dg.dst.context, |
1017 | port: pkt->dst_port); |
1018 | vsock_addr_init(addr: &vpending->remote_addr, cid: pkt->dg.src.context, |
1019 | port: pkt->src_port); |
1020 | |
1021 | err = vsock_assign_transport(vsk: vpending, vsock_sk(sk)); |
1022 | /* Transport assigned (looking at remote_addr) must be the same |
1023 | * where we received the request. |
1024 | */ |
1025 | if (err || !vmci_check_transport(vsk: vpending)) { |
1026 | vmci_transport_send_reset(sk, pkt); |
1027 | sock_put(sk: pending); |
1028 | return err; |
1029 | } |
1030 | |
1031 | /* If the proposed size fits within our min/max, accept it. Otherwise |
1032 | * propose our own size. |
1033 | */ |
1034 | if (pkt->u.size >= vpending->buffer_min_size && |
1035 | pkt->u.size <= vpending->buffer_max_size) { |
1036 | qp_size = pkt->u.size; |
1037 | } else { |
1038 | qp_size = vpending->buffer_size; |
1039 | } |
1040 | |
1041 | /* Figure out if we are using old or new requests based on the |
1042 | * overrides pkt types sent by our peer. |
1043 | */ |
1044 | if (vmci_transport_old_proto_override(old_pkt_proto: &old_pkt_proto)) { |
1045 | old_request = old_pkt_proto; |
1046 | } else { |
1047 | if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST) |
1048 | old_request = true; |
1049 | else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2) |
1050 | old_request = false; |
1051 | |
1052 | } |
1053 | |
1054 | if (old_request) { |
1055 | /* Handle a REQUEST (or override) */ |
1056 | u16 version = VSOCK_PROTO_INVALID; |
1057 | if (vmci_transport_proto_to_notify_struct( |
1058 | sk: pending, proto: &version, old_pkt_proto: true)) |
1059 | err = vmci_transport_send_negotiate(sk: pending, size: qp_size); |
1060 | else |
1061 | err = -EINVAL; |
1062 | |
1063 | } else { |
1064 | /* Handle a REQUEST2 (or override) */ |
1065 | int proto_int = pkt->proto; |
1066 | int pos; |
1067 | u16 active_proto_version = 0; |
1068 | |
1069 | /* The list of possible protocols is the intersection of all |
1070 | * protocols the client supports ... plus all the protocols we |
1071 | * support. |
1072 | */ |
1073 | proto_int &= vmci_transport_new_proto_supported_versions(); |
1074 | |
1075 | /* We choose the highest possible protocol version and use that |
1076 | * one. |
1077 | */ |
1078 | pos = fls(x: proto_int); |
1079 | if (pos) { |
1080 | active_proto_version = (1 << (pos - 1)); |
1081 | if (vmci_transport_proto_to_notify_struct( |
1082 | sk: pending, proto: &active_proto_version, old_pkt_proto: false)) |
1083 | err = vmci_transport_send_negotiate2(sk: pending, |
1084 | size: qp_size, |
1085 | version: active_proto_version); |
1086 | else |
1087 | err = -EINVAL; |
1088 | |
1089 | } else { |
1090 | err = -EINVAL; |
1091 | } |
1092 | } |
1093 | |
1094 | if (err < 0) { |
1095 | vmci_transport_send_reset(sk, pkt); |
1096 | sock_put(sk: pending); |
1097 | err = vmci_transport_error_to_vsock_error(vmci_error: err); |
1098 | goto out; |
1099 | } |
1100 | |
1101 | vsock_add_pending(listener: sk, pending); |
1102 | sk_acceptq_added(sk); |
1103 | |
1104 | pending->sk_state = TCP_SYN_SENT; |
1105 | vmci_trans(vpending)->produce_size = |
1106 | vmci_trans(vpending)->consume_size = qp_size; |
1107 | vpending->buffer_size = qp_size; |
1108 | |
1109 | vmci_trans(vpending)->notify_ops->process_request(pending); |
1110 | |
1111 | /* We might never receive another message for this socket and it's not |
1112 | * connected to any process, so we have to ensure it gets cleaned up |
1113 | * ourself. Our delayed work function will take care of that. Note |
1114 | * that we do not ever cancel this function since we have few |
1115 | * guarantees about its state when calling cancel_delayed_work(). |
1116 | * Instead we hold a reference on the socket for that function and make |
1117 | * it capable of handling cases where it needs to do nothing but |
1118 | * release that reference. |
1119 | */ |
1120 | vpending->listener = sk; |
1121 | sock_hold(sk); |
1122 | sock_hold(sk: pending); |
1123 | schedule_delayed_work(dwork: &vpending->pending_work, HZ); |
1124 | |
1125 | out: |
1126 | return err; |
1127 | } |
1128 | |
1129 | static int |
1130 | vmci_transport_recv_connecting_server(struct sock *listener, |
1131 | struct sock *pending, |
1132 | struct vmci_transport_packet *pkt) |
1133 | { |
1134 | struct vsock_sock *vpending; |
1135 | struct vmci_handle handle; |
1136 | struct vmci_qp *qpair; |
1137 | bool is_local; |
1138 | u32 flags; |
1139 | u32 detach_sub_id; |
1140 | int err; |
1141 | int skerr; |
1142 | |
1143 | vpending = vsock_sk(pending); |
1144 | detach_sub_id = VMCI_INVALID_ID; |
1145 | |
1146 | switch (pkt->type) { |
1147 | case VMCI_TRANSPORT_PACKET_TYPE_OFFER: |
1148 | if (vmci_handle_is_invalid(h: pkt->u.handle)) { |
1149 | vmci_transport_send_reset(sk: pending, pkt); |
1150 | skerr = EPROTO; |
1151 | err = -EINVAL; |
1152 | goto destroy; |
1153 | } |
1154 | break; |
1155 | default: |
1156 | /* Close and cleanup the connection. */ |
1157 | vmci_transport_send_reset(sk: pending, pkt); |
1158 | skerr = EPROTO; |
1159 | err = pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST ? 0 : -EINVAL; |
1160 | goto destroy; |
1161 | } |
1162 | |
1163 | /* In order to complete the connection we need to attach to the offered |
1164 | * queue pair and send an attach notification. We also subscribe to the |
1165 | * detach event so we know when our peer goes away, and we do that |
1166 | * before attaching so we don't miss an event. If all this succeeds, |
1167 | * we update our state and wakeup anything waiting in accept() for a |
1168 | * connection. |
1169 | */ |
1170 | |
1171 | /* We don't care about attach since we ensure the other side has |
1172 | * attached by specifying the ATTACH_ONLY flag below. |
1173 | */ |
1174 | err = vmci_event_subscribe(event: VMCI_EVENT_QP_PEER_DETACH, |
1175 | callback: vmci_transport_peer_detach_cb, |
1176 | vmci_trans(vpending), subid: &detach_sub_id); |
1177 | if (err < VMCI_SUCCESS) { |
1178 | vmci_transport_send_reset(sk: pending, pkt); |
1179 | err = vmci_transport_error_to_vsock_error(vmci_error: err); |
1180 | skerr = -err; |
1181 | goto destroy; |
1182 | } |
1183 | |
1184 | vmci_trans(vpending)->detach_sub_id = detach_sub_id; |
1185 | |
1186 | /* Now attach to the queue pair the client created. */ |
1187 | handle = pkt->u.handle; |
1188 | |
1189 | /* vpending->local_addr always has a context id so we do not need to |
1190 | * worry about VMADDR_CID_ANY in this case. |
1191 | */ |
1192 | is_local = |
1193 | vpending->remote_addr.svm_cid == vpending->local_addr.svm_cid; |
1194 | flags = VMCI_QPFLAG_ATTACH_ONLY; |
1195 | flags |= is_local ? VMCI_QPFLAG_LOCAL : 0; |
1196 | |
1197 | err = vmci_transport_queue_pair_alloc( |
1198 | qpair: &qpair, |
1199 | handle: &handle, |
1200 | vmci_trans(vpending)->produce_size, |
1201 | vmci_trans(vpending)->consume_size, |
1202 | peer: pkt->dg.src.context, |
1203 | flags, |
1204 | trusted: vmci_transport_is_trusted( |
1205 | vsock: vpending, |
1206 | peer_cid: vpending->remote_addr.svm_cid)); |
1207 | if (err < 0) { |
1208 | vmci_transport_send_reset(sk: pending, pkt); |
1209 | skerr = -err; |
1210 | goto destroy; |
1211 | } |
1212 | |
1213 | vmci_trans(vpending)->qp_handle = handle; |
1214 | vmci_trans(vpending)->qpair = qpair; |
1215 | |
1216 | /* When we send the attach message, we must be ready to handle incoming |
1217 | * control messages on the newly connected socket. So we move the |
1218 | * pending socket to the connected state before sending the attach |
1219 | * message. Otherwise, an incoming packet triggered by the attach being |
1220 | * received by the peer may be processed concurrently with what happens |
1221 | * below after sending the attach message, and that incoming packet |
1222 | * will find the listening socket instead of the (currently) pending |
1223 | * socket. Note that enqueueing the socket increments the reference |
1224 | * count, so even if a reset comes before the connection is accepted, |
1225 | * the socket will be valid until it is removed from the queue. |
1226 | * |
1227 | * If we fail sending the attach below, we remove the socket from the |
1228 | * connected list and move the socket to TCP_CLOSE before |
1229 | * releasing the lock, so a pending slow path processing of an incoming |
1230 | * packet will not see the socket in the connected state in that case. |
1231 | */ |
1232 | pending->sk_state = TCP_ESTABLISHED; |
1233 | |
1234 | vsock_insert_connected(vsk: vpending); |
1235 | |
1236 | /* Notify our peer of our attach. */ |
1237 | err = vmci_transport_send_attach(sk: pending, handle); |
1238 | if (err < 0) { |
1239 | vsock_remove_connected(vsk: vpending); |
1240 | pr_err("Could not send attach\n" ); |
1241 | vmci_transport_send_reset(sk: pending, pkt); |
1242 | err = vmci_transport_error_to_vsock_error(vmci_error: err); |
1243 | skerr = -err; |
1244 | goto destroy; |
1245 | } |
1246 | |
1247 | /* We have a connection. Move the now connected socket from the |
1248 | * listener's pending list to the accept queue so callers of accept() |
1249 | * can find it. |
1250 | */ |
1251 | vsock_remove_pending(listener, pending); |
1252 | vsock_enqueue_accept(listener, connected: pending); |
1253 | |
1254 | /* Callers of accept() will be waiting on the listening socket, not |
1255 | * the pending socket. |
1256 | */ |
1257 | listener->sk_data_ready(listener); |
1258 | |
1259 | return 0; |
1260 | |
1261 | destroy: |
1262 | pending->sk_err = skerr; |
1263 | pending->sk_state = TCP_CLOSE; |
1264 | /* As long as we drop our reference, all necessary cleanup will handle |
1265 | * when the cleanup function drops its reference and our destruct |
1266 | * implementation is called. Note that since the listen handler will |
1267 | * remove pending from the pending list upon our failure, the cleanup |
1268 | * function won't drop the additional reference, which is why we do it |
1269 | * here. |
1270 | */ |
1271 | sock_put(sk: pending); |
1272 | |
1273 | return err; |
1274 | } |
1275 | |
1276 | static int |
1277 | vmci_transport_recv_connecting_client(struct sock *sk, |
1278 | struct vmci_transport_packet *pkt) |
1279 | { |
1280 | struct vsock_sock *vsk; |
1281 | int err; |
1282 | int skerr; |
1283 | |
1284 | vsk = vsock_sk(sk); |
1285 | |
1286 | switch (pkt->type) { |
1287 | case VMCI_TRANSPORT_PACKET_TYPE_ATTACH: |
1288 | if (vmci_handle_is_invalid(h: pkt->u.handle) || |
1289 | !vmci_handle_is_equal(h1: pkt->u.handle, |
1290 | vmci_trans(vsk)->qp_handle)) { |
1291 | skerr = EPROTO; |
1292 | err = -EINVAL; |
1293 | goto destroy; |
1294 | } |
1295 | |
1296 | /* Signify the socket is connected and wakeup the waiter in |
1297 | * connect(). Also place the socket in the connected table for |
1298 | * accounting (it can already be found since it's in the bound |
1299 | * table). |
1300 | */ |
1301 | sk->sk_state = TCP_ESTABLISHED; |
1302 | sk->sk_socket->state = SS_CONNECTED; |
1303 | vsock_insert_connected(vsk); |
1304 | sk->sk_state_change(sk); |
1305 | |
1306 | break; |
1307 | case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE: |
1308 | case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2: |
1309 | if (pkt->u.size == 0 |
1310 | || pkt->dg.src.context != vsk->remote_addr.svm_cid |
1311 | || pkt->src_port != vsk->remote_addr.svm_port |
1312 | || !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle) |
1313 | || vmci_trans(vsk)->qpair |
1314 | || vmci_trans(vsk)->produce_size != 0 |
1315 | || vmci_trans(vsk)->consume_size != 0 |
1316 | || vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) { |
1317 | skerr = EPROTO; |
1318 | err = -EINVAL; |
1319 | |
1320 | goto destroy; |
1321 | } |
1322 | |
1323 | err = vmci_transport_recv_connecting_client_negotiate(sk, pkt); |
1324 | if (err) { |
1325 | skerr = -err; |
1326 | goto destroy; |
1327 | } |
1328 | |
1329 | break; |
1330 | case VMCI_TRANSPORT_PACKET_TYPE_INVALID: |
1331 | err = vmci_transport_recv_connecting_client_invalid(sk, pkt); |
1332 | if (err) { |
1333 | skerr = -err; |
1334 | goto destroy; |
1335 | } |
1336 | |
1337 | break; |
1338 | case VMCI_TRANSPORT_PACKET_TYPE_RST: |
1339 | /* Older versions of the linux code (WS 6.5 / ESX 4.0) used to |
1340 | * continue processing here after they sent an INVALID packet. |
1341 | * This meant that we got a RST after the INVALID. We ignore a |
1342 | * RST after an INVALID. The common code doesn't send the RST |
1343 | * ... so we can hang if an old version of the common code |
1344 | * fails between getting a REQUEST and sending an OFFER back. |
1345 | * Not much we can do about it... except hope that it doesn't |
1346 | * happen. |
1347 | */ |
1348 | if (vsk->ignore_connecting_rst) { |
1349 | vsk->ignore_connecting_rst = false; |
1350 | } else { |
1351 | skerr = ECONNRESET; |
1352 | err = 0; |
1353 | goto destroy; |
1354 | } |
1355 | |
1356 | break; |
1357 | default: |
1358 | /* Close and cleanup the connection. */ |
1359 | skerr = EPROTO; |
1360 | err = -EINVAL; |
1361 | goto destroy; |
1362 | } |
1363 | |
1364 | return 0; |
1365 | |
1366 | destroy: |
1367 | vmci_transport_send_reset(sk, pkt); |
1368 | |
1369 | sk->sk_state = TCP_CLOSE; |
1370 | sk->sk_err = skerr; |
1371 | sk_error_report(sk); |
1372 | return err; |
1373 | } |
1374 | |
1375 | static int vmci_transport_recv_connecting_client_negotiate( |
1376 | struct sock *sk, |
1377 | struct vmci_transport_packet *pkt) |
1378 | { |
1379 | int err; |
1380 | struct vsock_sock *vsk; |
1381 | struct vmci_handle handle; |
1382 | struct vmci_qp *qpair; |
1383 | u32 detach_sub_id; |
1384 | bool is_local; |
1385 | u32 flags; |
1386 | bool old_proto = true; |
1387 | bool old_pkt_proto; |
1388 | u16 version; |
1389 | |
1390 | vsk = vsock_sk(sk); |
1391 | handle = VMCI_INVALID_HANDLE; |
1392 | detach_sub_id = VMCI_INVALID_ID; |
1393 | |
1394 | /* If we have gotten here then we should be past the point where old |
1395 | * linux vsock could have sent the bogus rst. |
1396 | */ |
1397 | vsk->sent_request = false; |
1398 | vsk->ignore_connecting_rst = false; |
1399 | |
1400 | /* Verify that we're OK with the proposed queue pair size */ |
1401 | if (pkt->u.size < vsk->buffer_min_size || |
1402 | pkt->u.size > vsk->buffer_max_size) { |
1403 | err = -EINVAL; |
1404 | goto destroy; |
1405 | } |
1406 | |
1407 | /* At this point we know the CID the peer is using to talk to us. */ |
1408 | |
1409 | if (vsk->local_addr.svm_cid == VMADDR_CID_ANY) |
1410 | vsk->local_addr.svm_cid = pkt->dg.dst.context; |
1411 | |
1412 | /* Setup the notify ops to be the highest supported version that both |
1413 | * the server and the client support. |
1414 | */ |
1415 | |
1416 | if (vmci_transport_old_proto_override(old_pkt_proto: &old_pkt_proto)) { |
1417 | old_proto = old_pkt_proto; |
1418 | } else { |
1419 | if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE) |
1420 | old_proto = true; |
1421 | else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2) |
1422 | old_proto = false; |
1423 | |
1424 | } |
1425 | |
1426 | if (old_proto) |
1427 | version = VSOCK_PROTO_INVALID; |
1428 | else |
1429 | version = pkt->proto; |
1430 | |
1431 | if (!vmci_transport_proto_to_notify_struct(sk, proto: &version, old_pkt_proto: old_proto)) { |
1432 | err = -EINVAL; |
1433 | goto destroy; |
1434 | } |
1435 | |
1436 | /* Subscribe to detach events first. |
1437 | * |
1438 | * XXX We attach once for each queue pair created for now so it is easy |
1439 | * to find the socket (it's provided), but later we should only |
1440 | * subscribe once and add a way to lookup sockets by queue pair handle. |
1441 | */ |
1442 | err = vmci_event_subscribe(event: VMCI_EVENT_QP_PEER_DETACH, |
1443 | callback: vmci_transport_peer_detach_cb, |
1444 | vmci_trans(vsk), subid: &detach_sub_id); |
1445 | if (err < VMCI_SUCCESS) { |
1446 | err = vmci_transport_error_to_vsock_error(vmci_error: err); |
1447 | goto destroy; |
1448 | } |
1449 | |
1450 | /* Make VMCI select the handle for us. */ |
1451 | handle = VMCI_INVALID_HANDLE; |
1452 | is_local = vsk->remote_addr.svm_cid == vsk->local_addr.svm_cid; |
1453 | flags = is_local ? VMCI_QPFLAG_LOCAL : 0; |
1454 | |
1455 | err = vmci_transport_queue_pair_alloc(qpair: &qpair, |
1456 | handle: &handle, |
1457 | produce_size: pkt->u.size, |
1458 | consume_size: pkt->u.size, |
1459 | peer: vsk->remote_addr.svm_cid, |
1460 | flags, |
1461 | trusted: vmci_transport_is_trusted( |
1462 | vsock: vsk, |
1463 | peer_cid: vsk-> |
1464 | remote_addr.svm_cid)); |
1465 | if (err < 0) |
1466 | goto destroy; |
1467 | |
1468 | err = vmci_transport_send_qp_offer(sk, handle); |
1469 | if (err < 0) { |
1470 | err = vmci_transport_error_to_vsock_error(vmci_error: err); |
1471 | goto destroy; |
1472 | } |
1473 | |
1474 | vmci_trans(vsk)->qp_handle = handle; |
1475 | vmci_trans(vsk)->qpair = qpair; |
1476 | |
1477 | vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = |
1478 | pkt->u.size; |
1479 | |
1480 | vmci_trans(vsk)->detach_sub_id = detach_sub_id; |
1481 | |
1482 | vmci_trans(vsk)->notify_ops->process_negotiate(sk); |
1483 | |
1484 | return 0; |
1485 | |
1486 | destroy: |
1487 | if (detach_sub_id != VMCI_INVALID_ID) |
1488 | vmci_event_unsubscribe(subid: detach_sub_id); |
1489 | |
1490 | if (!vmci_handle_is_invalid(h: handle)) |
1491 | vmci_qpair_detach(qpair: &qpair); |
1492 | |
1493 | return err; |
1494 | } |
1495 | |
1496 | static int |
1497 | vmci_transport_recv_connecting_client_invalid(struct sock *sk, |
1498 | struct vmci_transport_packet *pkt) |
1499 | { |
1500 | int err = 0; |
1501 | struct vsock_sock *vsk = vsock_sk(sk); |
1502 | |
1503 | if (vsk->sent_request) { |
1504 | vsk->sent_request = false; |
1505 | vsk->ignore_connecting_rst = true; |
1506 | |
1507 | err = vmci_transport_send_conn_request(sk, size: vsk->buffer_size); |
1508 | if (err < 0) |
1509 | err = vmci_transport_error_to_vsock_error(vmci_error: err); |
1510 | else |
1511 | err = 0; |
1512 | |
1513 | } |
1514 | |
1515 | return err; |
1516 | } |
1517 | |
1518 | static int vmci_transport_recv_connected(struct sock *sk, |
1519 | struct vmci_transport_packet *pkt) |
1520 | { |
1521 | struct vsock_sock *vsk; |
1522 | bool pkt_processed = false; |
1523 | |
1524 | /* In cases where we are closing the connection, it's sufficient to |
1525 | * mark the state change (and maybe error) and wake up any waiting |
1526 | * threads. Since this is a connected socket, it's owned by a user |
1527 | * process and will be cleaned up when the failure is passed back on |
1528 | * the current or next system call. Our system call implementations |
1529 | * must therefore check for error and state changes on entry and when |
1530 | * being awoken. |
1531 | */ |
1532 | switch (pkt->type) { |
1533 | case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN: |
1534 | if (pkt->u.mode) { |
1535 | vsk = vsock_sk(sk); |
1536 | |
1537 | vsk->peer_shutdown |= pkt->u.mode; |
1538 | sk->sk_state_change(sk); |
1539 | } |
1540 | break; |
1541 | |
1542 | case VMCI_TRANSPORT_PACKET_TYPE_RST: |
1543 | vsk = vsock_sk(sk); |
1544 | /* It is possible that we sent our peer a message (e.g a |
1545 | * WAITING_READ) right before we got notified that the peer had |
1546 | * detached. If that happens then we can get a RST pkt back |
1547 | * from our peer even though there is data available for us to |
1548 | * read. In that case, don't shutdown the socket completely but |
1549 | * instead allow the local client to finish reading data off |
1550 | * the queuepair. Always treat a RST pkt in connected mode like |
1551 | * a clean shutdown. |
1552 | */ |
1553 | sock_set_flag(sk, flag: SOCK_DONE); |
1554 | vsk->peer_shutdown = SHUTDOWN_MASK; |
1555 | if (vsock_stream_has_data(vsk) <= 0) |
1556 | sk->sk_state = TCP_CLOSING; |
1557 | |
1558 | sk->sk_state_change(sk); |
1559 | break; |
1560 | |
1561 | default: |
1562 | vsk = vsock_sk(sk); |
1563 | vmci_trans(vsk)->notify_ops->handle_notify_pkt( |
1564 | sk, pkt, false, NULL, NULL, |
1565 | &pkt_processed); |
1566 | if (!pkt_processed) |
1567 | return -EINVAL; |
1568 | |
1569 | break; |
1570 | } |
1571 | |
1572 | return 0; |
1573 | } |
1574 | |
1575 | static int vmci_transport_socket_init(struct vsock_sock *vsk, |
1576 | struct vsock_sock *psk) |
1577 | { |
1578 | vsk->trans = kmalloc(size: sizeof(struct vmci_transport), GFP_KERNEL); |
1579 | if (!vsk->trans) |
1580 | return -ENOMEM; |
1581 | |
1582 | vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE; |
1583 | vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE; |
1584 | vmci_trans(vsk)->qpair = NULL; |
1585 | vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 0; |
1586 | vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID; |
1587 | vmci_trans(vsk)->notify_ops = NULL; |
1588 | INIT_LIST_HEAD(list: &vmci_trans(vsk)->elem); |
1589 | vmci_trans(vsk)->sk = &vsk->sk; |
1590 | spin_lock_init(&vmci_trans(vsk)->lock); |
1591 | |
1592 | return 0; |
1593 | } |
1594 | |
1595 | static void vmci_transport_free_resources(struct list_head *transport_list) |
1596 | { |
1597 | while (!list_empty(head: transport_list)) { |
1598 | struct vmci_transport *transport = |
1599 | list_first_entry(transport_list, struct vmci_transport, |
1600 | elem); |
1601 | list_del(entry: &transport->elem); |
1602 | |
1603 | if (transport->detach_sub_id != VMCI_INVALID_ID) { |
1604 | vmci_event_unsubscribe(subid: transport->detach_sub_id); |
1605 | transport->detach_sub_id = VMCI_INVALID_ID; |
1606 | } |
1607 | |
1608 | if (!vmci_handle_is_invalid(h: transport->qp_handle)) { |
1609 | vmci_qpair_detach(qpair: &transport->qpair); |
1610 | transport->qp_handle = VMCI_INVALID_HANDLE; |
1611 | transport->produce_size = 0; |
1612 | transport->consume_size = 0; |
1613 | } |
1614 | |
1615 | kfree(objp: transport); |
1616 | } |
1617 | } |
1618 | |
1619 | static void vmci_transport_cleanup(struct work_struct *work) |
1620 | { |
1621 | LIST_HEAD(pending); |
1622 | |
1623 | spin_lock_bh(lock: &vmci_transport_cleanup_lock); |
1624 | list_replace_init(old: &vmci_transport_cleanup_list, new: &pending); |
1625 | spin_unlock_bh(lock: &vmci_transport_cleanup_lock); |
1626 | vmci_transport_free_resources(transport_list: &pending); |
1627 | } |
1628 | |
1629 | static void vmci_transport_destruct(struct vsock_sock *vsk) |
1630 | { |
1631 | /* transport can be NULL if we hit a failure at init() time */ |
1632 | if (!vmci_trans(vsk)) |
1633 | return; |
1634 | |
1635 | /* Ensure that the detach callback doesn't use the sk/vsk |
1636 | * we are about to destruct. |
1637 | */ |
1638 | spin_lock_bh(lock: &vmci_trans(vsk)->lock); |
1639 | vmci_trans(vsk)->sk = NULL; |
1640 | spin_unlock_bh(lock: &vmci_trans(vsk)->lock); |
1641 | |
1642 | if (vmci_trans(vsk)->notify_ops) |
1643 | vmci_trans(vsk)->notify_ops->socket_destruct(vsk); |
1644 | |
1645 | spin_lock_bh(lock: &vmci_transport_cleanup_lock); |
1646 | list_add(new: &vmci_trans(vsk)->elem, head: &vmci_transport_cleanup_list); |
1647 | spin_unlock_bh(lock: &vmci_transport_cleanup_lock); |
1648 | schedule_work(work: &vmci_transport_cleanup_work); |
1649 | |
1650 | vsk->trans = NULL; |
1651 | } |
1652 | |
1653 | static void vmci_transport_release(struct vsock_sock *vsk) |
1654 | { |
1655 | vsock_remove_sock(vsk); |
1656 | |
1657 | if (!vmci_handle_is_invalid(vmci_trans(vsk)->dg_handle)) { |
1658 | vmci_datagram_destroy_handle(vmci_trans(vsk)->dg_handle); |
1659 | vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE; |
1660 | } |
1661 | } |
1662 | |
1663 | static int vmci_transport_dgram_bind(struct vsock_sock *vsk, |
1664 | struct sockaddr_vm *addr) |
1665 | { |
1666 | u32 port; |
1667 | u32 flags; |
1668 | int err; |
1669 | |
1670 | /* VMCI will select a resource ID for us if we provide |
1671 | * VMCI_INVALID_ID. |
1672 | */ |
1673 | port = addr->svm_port == VMADDR_PORT_ANY ? |
1674 | VMCI_INVALID_ID : addr->svm_port; |
1675 | |
1676 | if (port <= LAST_RESERVED_PORT && !capable(CAP_NET_BIND_SERVICE)) |
1677 | return -EACCES; |
1678 | |
1679 | flags = addr->svm_cid == VMADDR_CID_ANY ? |
1680 | VMCI_FLAG_ANYCID_DG_HND : 0; |
1681 | |
1682 | err = vmci_transport_datagram_create_hnd(resource_id: port, flags, |
1683 | recv_cb: vmci_transport_recv_dgram_cb, |
1684 | client_data: &vsk->sk, |
1685 | out_handle: &vmci_trans(vsk)->dg_handle); |
1686 | if (err < VMCI_SUCCESS) |
1687 | return vmci_transport_error_to_vsock_error(vmci_error: err); |
1688 | vsock_addr_init(addr: &vsk->local_addr, cid: addr->svm_cid, |
1689 | vmci_trans(vsk)->dg_handle.resource); |
1690 | |
1691 | return 0; |
1692 | } |
1693 | |
1694 | static int vmci_transport_dgram_enqueue( |
1695 | struct vsock_sock *vsk, |
1696 | struct sockaddr_vm *remote_addr, |
1697 | struct msghdr *msg, |
1698 | size_t len) |
1699 | { |
1700 | int err; |
1701 | struct vmci_datagram *dg; |
1702 | |
1703 | if (len > VMCI_MAX_DG_PAYLOAD_SIZE) |
1704 | return -EMSGSIZE; |
1705 | |
1706 | if (!vmci_transport_allow_dgram(vsock: vsk, peer_cid: remote_addr->svm_cid)) |
1707 | return -EPERM; |
1708 | |
1709 | /* Allocate a buffer for the user's message and our packet header. */ |
1710 | dg = kmalloc(size: len + sizeof(*dg), GFP_KERNEL); |
1711 | if (!dg) |
1712 | return -ENOMEM; |
1713 | |
1714 | err = memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len); |
1715 | if (err) { |
1716 | kfree(objp: dg); |
1717 | return err; |
1718 | } |
1719 | |
1720 | dg->dst = vmci_make_handle(remote_addr->svm_cid, |
1721 | remote_addr->svm_port); |
1722 | dg->src = vmci_make_handle(vsk->local_addr.svm_cid, |
1723 | vsk->local_addr.svm_port); |
1724 | dg->payload_size = len; |
1725 | |
1726 | err = vmci_datagram_send(msg: dg); |
1727 | kfree(objp: dg); |
1728 | if (err < 0) |
1729 | return vmci_transport_error_to_vsock_error(vmci_error: err); |
1730 | |
1731 | return err - sizeof(*dg); |
1732 | } |
1733 | |
1734 | static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk, |
1735 | struct msghdr *msg, size_t len, |
1736 | int flags) |
1737 | { |
1738 | int err; |
1739 | struct vmci_datagram *dg; |
1740 | size_t payload_len; |
1741 | struct sk_buff *skb; |
1742 | |
1743 | if (flags & MSG_OOB || flags & MSG_ERRQUEUE) |
1744 | return -EOPNOTSUPP; |
1745 | |
1746 | /* Retrieve the head sk_buff from the socket's receive queue. */ |
1747 | err = 0; |
1748 | skb = skb_recv_datagram(sk: &vsk->sk, flags, err: &err); |
1749 | if (!skb) |
1750 | return err; |
1751 | |
1752 | dg = (struct vmci_datagram *)skb->data; |
1753 | if (!dg) |
1754 | /* err is 0, meaning we read zero bytes. */ |
1755 | goto out; |
1756 | |
1757 | payload_len = dg->payload_size; |
1758 | /* Ensure the sk_buff matches the payload size claimed in the packet. */ |
1759 | if (payload_len != skb->len - sizeof(*dg)) { |
1760 | err = -EINVAL; |
1761 | goto out; |
1762 | } |
1763 | |
1764 | if (payload_len > len) { |
1765 | payload_len = len; |
1766 | msg->msg_flags |= MSG_TRUNC; |
1767 | } |
1768 | |
1769 | /* Place the datagram payload in the user's iovec. */ |
1770 | err = skb_copy_datagram_msg(from: skb, offset: sizeof(*dg), msg, size: payload_len); |
1771 | if (err) |
1772 | goto out; |
1773 | |
1774 | if (msg->msg_name) { |
1775 | /* Provide the address of the sender. */ |
1776 | DECLARE_SOCKADDR(struct sockaddr_vm *, vm_addr, msg->msg_name); |
1777 | vsock_addr_init(addr: vm_addr, cid: dg->src.context, port: dg->src.resource); |
1778 | msg->msg_namelen = sizeof(*vm_addr); |
1779 | } |
1780 | err = payload_len; |
1781 | |
1782 | out: |
1783 | skb_free_datagram(sk: &vsk->sk, skb); |
1784 | return err; |
1785 | } |
1786 | |
1787 | static bool vmci_transport_dgram_allow(u32 cid, u32 port) |
1788 | { |
1789 | if (cid == VMADDR_CID_HYPERVISOR) { |
1790 | /* Registrations of PBRPC Servers do not modify VMX/Hypervisor |
1791 | * state and are allowed. |
1792 | */ |
1793 | return port == VMCI_UNITY_PBRPC_REGISTER; |
1794 | } |
1795 | |
1796 | return true; |
1797 | } |
1798 | |
1799 | static int vmci_transport_connect(struct vsock_sock *vsk) |
1800 | { |
1801 | int err; |
1802 | bool old_pkt_proto = false; |
1803 | struct sock *sk = &vsk->sk; |
1804 | |
1805 | if (vmci_transport_old_proto_override(old_pkt_proto: &old_pkt_proto) && |
1806 | old_pkt_proto) { |
1807 | err = vmci_transport_send_conn_request(sk, size: vsk->buffer_size); |
1808 | if (err < 0) { |
1809 | sk->sk_state = TCP_CLOSE; |
1810 | return err; |
1811 | } |
1812 | } else { |
1813 | int supported_proto_versions = |
1814 | vmci_transport_new_proto_supported_versions(); |
1815 | err = vmci_transport_send_conn_request2(sk, size: vsk->buffer_size, |
1816 | version: supported_proto_versions); |
1817 | if (err < 0) { |
1818 | sk->sk_state = TCP_CLOSE; |
1819 | return err; |
1820 | } |
1821 | |
1822 | vsk->sent_request = true; |
1823 | } |
1824 | |
1825 | return err; |
1826 | } |
1827 | |
1828 | static ssize_t vmci_transport_stream_dequeue( |
1829 | struct vsock_sock *vsk, |
1830 | struct msghdr *msg, |
1831 | size_t len, |
1832 | int flags) |
1833 | { |
1834 | ssize_t err; |
1835 | |
1836 | if (flags & MSG_PEEK) |
1837 | err = vmci_qpair_peekv(vmci_trans(vsk)->qpair, msg, iov_size: len, mode: 0); |
1838 | else |
1839 | err = vmci_qpair_dequev(vmci_trans(vsk)->qpair, msg, iov_size: len, mode: 0); |
1840 | |
1841 | if (err < 0) |
1842 | err = -ENOMEM; |
1843 | |
1844 | return err; |
1845 | } |
1846 | |
1847 | static ssize_t vmci_transport_stream_enqueue( |
1848 | struct vsock_sock *vsk, |
1849 | struct msghdr *msg, |
1850 | size_t len) |
1851 | { |
1852 | ssize_t err; |
1853 | |
1854 | err = vmci_qpair_enquev(vmci_trans(vsk)->qpair, msg, iov_size: len, mode: 0); |
1855 | if (err < 0) |
1856 | err = -ENOMEM; |
1857 | |
1858 | return err; |
1859 | } |
1860 | |
1861 | static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk) |
1862 | { |
1863 | return vmci_qpair_consume_buf_ready(vmci_trans(vsk)->qpair); |
1864 | } |
1865 | |
1866 | static s64 vmci_transport_stream_has_space(struct vsock_sock *vsk) |
1867 | { |
1868 | return vmci_qpair_produce_free_space(vmci_trans(vsk)->qpair); |
1869 | } |
1870 | |
1871 | static u64 vmci_transport_stream_rcvhiwat(struct vsock_sock *vsk) |
1872 | { |
1873 | return vmci_trans(vsk)->consume_size; |
1874 | } |
1875 | |
1876 | static bool vmci_transport_stream_is_active(struct vsock_sock *vsk) |
1877 | { |
1878 | return !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle); |
1879 | } |
1880 | |
1881 | static int vmci_transport_notify_poll_in( |
1882 | struct vsock_sock *vsk, |
1883 | size_t target, |
1884 | bool *data_ready_now) |
1885 | { |
1886 | return vmci_trans(vsk)->notify_ops->poll_in( |
1887 | &vsk->sk, target, data_ready_now); |
1888 | } |
1889 | |
1890 | static int vmci_transport_notify_poll_out( |
1891 | struct vsock_sock *vsk, |
1892 | size_t target, |
1893 | bool *space_available_now) |
1894 | { |
1895 | return vmci_trans(vsk)->notify_ops->poll_out( |
1896 | &vsk->sk, target, space_available_now); |
1897 | } |
1898 | |
1899 | static int vmci_transport_notify_recv_init( |
1900 | struct vsock_sock *vsk, |
1901 | size_t target, |
1902 | struct vsock_transport_recv_notify_data *data) |
1903 | { |
1904 | return vmci_trans(vsk)->notify_ops->recv_init( |
1905 | &vsk->sk, target, |
1906 | (struct vmci_transport_recv_notify_data *)data); |
1907 | } |
1908 | |
1909 | static int vmci_transport_notify_recv_pre_block( |
1910 | struct vsock_sock *vsk, |
1911 | size_t target, |
1912 | struct vsock_transport_recv_notify_data *data) |
1913 | { |
1914 | return vmci_trans(vsk)->notify_ops->recv_pre_block( |
1915 | &vsk->sk, target, |
1916 | (struct vmci_transport_recv_notify_data *)data); |
1917 | } |
1918 | |
1919 | static int vmci_transport_notify_recv_pre_dequeue( |
1920 | struct vsock_sock *vsk, |
1921 | size_t target, |
1922 | struct vsock_transport_recv_notify_data *data) |
1923 | { |
1924 | return vmci_trans(vsk)->notify_ops->recv_pre_dequeue( |
1925 | &vsk->sk, target, |
1926 | (struct vmci_transport_recv_notify_data *)data); |
1927 | } |
1928 | |
1929 | static int vmci_transport_notify_recv_post_dequeue( |
1930 | struct vsock_sock *vsk, |
1931 | size_t target, |
1932 | ssize_t copied, |
1933 | bool data_read, |
1934 | struct vsock_transport_recv_notify_data *data) |
1935 | { |
1936 | return vmci_trans(vsk)->notify_ops->recv_post_dequeue( |
1937 | &vsk->sk, target, copied, data_read, |
1938 | (struct vmci_transport_recv_notify_data *)data); |
1939 | } |
1940 | |
1941 | static int vmci_transport_notify_send_init( |
1942 | struct vsock_sock *vsk, |
1943 | struct vsock_transport_send_notify_data *data) |
1944 | { |
1945 | return vmci_trans(vsk)->notify_ops->send_init( |
1946 | &vsk->sk, |
1947 | (struct vmci_transport_send_notify_data *)data); |
1948 | } |
1949 | |
1950 | static int vmci_transport_notify_send_pre_block( |
1951 | struct vsock_sock *vsk, |
1952 | struct vsock_transport_send_notify_data *data) |
1953 | { |
1954 | return vmci_trans(vsk)->notify_ops->send_pre_block( |
1955 | &vsk->sk, |
1956 | (struct vmci_transport_send_notify_data *)data); |
1957 | } |
1958 | |
1959 | static int vmci_transport_notify_send_pre_enqueue( |
1960 | struct vsock_sock *vsk, |
1961 | struct vsock_transport_send_notify_data *data) |
1962 | { |
1963 | return vmci_trans(vsk)->notify_ops->send_pre_enqueue( |
1964 | &vsk->sk, |
1965 | (struct vmci_transport_send_notify_data *)data); |
1966 | } |
1967 | |
1968 | static int vmci_transport_notify_send_post_enqueue( |
1969 | struct vsock_sock *vsk, |
1970 | ssize_t written, |
1971 | struct vsock_transport_send_notify_data *data) |
1972 | { |
1973 | return vmci_trans(vsk)->notify_ops->send_post_enqueue( |
1974 | &vsk->sk, written, |
1975 | (struct vmci_transport_send_notify_data *)data); |
1976 | } |
1977 | |
1978 | static bool vmci_transport_old_proto_override(bool *old_pkt_proto) |
1979 | { |
1980 | if (PROTOCOL_OVERRIDE != -1) { |
1981 | if (PROTOCOL_OVERRIDE == 0) |
1982 | *old_pkt_proto = true; |
1983 | else |
1984 | *old_pkt_proto = false; |
1985 | |
1986 | pr_info("Proto override in use\n" ); |
1987 | return true; |
1988 | } |
1989 | |
1990 | return false; |
1991 | } |
1992 | |
1993 | static bool vmci_transport_proto_to_notify_struct(struct sock *sk, |
1994 | u16 *proto, |
1995 | bool old_pkt_proto) |
1996 | { |
1997 | struct vsock_sock *vsk = vsock_sk(sk); |
1998 | |
1999 | if (old_pkt_proto) { |
2000 | if (*proto != VSOCK_PROTO_INVALID) { |
2001 | pr_err("Can't set both an old and new protocol\n" ); |
2002 | return false; |
2003 | } |
2004 | vmci_trans(vsk)->notify_ops = &vmci_transport_notify_pkt_ops; |
2005 | goto exit; |
2006 | } |
2007 | |
2008 | switch (*proto) { |
2009 | case VSOCK_PROTO_PKT_ON_NOTIFY: |
2010 | vmci_trans(vsk)->notify_ops = |
2011 | &vmci_transport_notify_pkt_q_state_ops; |
2012 | break; |
2013 | default: |
2014 | pr_err("Unknown notify protocol version\n" ); |
2015 | return false; |
2016 | } |
2017 | |
2018 | exit: |
2019 | vmci_trans(vsk)->notify_ops->socket_init(sk); |
2020 | return true; |
2021 | } |
2022 | |
2023 | static u16 vmci_transport_new_proto_supported_versions(void) |
2024 | { |
2025 | if (PROTOCOL_OVERRIDE != -1) |
2026 | return PROTOCOL_OVERRIDE; |
2027 | |
2028 | return VSOCK_PROTO_ALL_SUPPORTED; |
2029 | } |
2030 | |
2031 | static u32 vmci_transport_get_local_cid(void) |
2032 | { |
2033 | return vmci_get_context_id(); |
2034 | } |
2035 | |
2036 | static struct vsock_transport vmci_transport = { |
2037 | .module = THIS_MODULE, |
2038 | .init = vmci_transport_socket_init, |
2039 | .destruct = vmci_transport_destruct, |
2040 | .release = vmci_transport_release, |
2041 | .connect = vmci_transport_connect, |
2042 | .dgram_bind = vmci_transport_dgram_bind, |
2043 | .dgram_dequeue = vmci_transport_dgram_dequeue, |
2044 | .dgram_enqueue = vmci_transport_dgram_enqueue, |
2045 | .dgram_allow = vmci_transport_dgram_allow, |
2046 | .stream_dequeue = vmci_transport_stream_dequeue, |
2047 | .stream_enqueue = vmci_transport_stream_enqueue, |
2048 | .stream_has_data = vmci_transport_stream_has_data, |
2049 | .stream_has_space = vmci_transport_stream_has_space, |
2050 | .stream_rcvhiwat = vmci_transport_stream_rcvhiwat, |
2051 | .stream_is_active = vmci_transport_stream_is_active, |
2052 | .stream_allow = vmci_transport_stream_allow, |
2053 | .notify_poll_in = vmci_transport_notify_poll_in, |
2054 | .notify_poll_out = vmci_transport_notify_poll_out, |
2055 | .notify_recv_init = vmci_transport_notify_recv_init, |
2056 | .notify_recv_pre_block = vmci_transport_notify_recv_pre_block, |
2057 | .notify_recv_pre_dequeue = vmci_transport_notify_recv_pre_dequeue, |
2058 | .notify_recv_post_dequeue = vmci_transport_notify_recv_post_dequeue, |
2059 | .notify_send_init = vmci_transport_notify_send_init, |
2060 | .notify_send_pre_block = vmci_transport_notify_send_pre_block, |
2061 | .notify_send_pre_enqueue = vmci_transport_notify_send_pre_enqueue, |
2062 | .notify_send_post_enqueue = vmci_transport_notify_send_post_enqueue, |
2063 | .shutdown = vmci_transport_shutdown, |
2064 | .get_local_cid = vmci_transport_get_local_cid, |
2065 | }; |
2066 | |
2067 | static bool vmci_check_transport(struct vsock_sock *vsk) |
2068 | { |
2069 | return vsk->transport == &vmci_transport; |
2070 | } |
2071 | |
2072 | static void vmci_vsock_transport_cb(bool is_host) |
2073 | { |
2074 | int features; |
2075 | |
2076 | if (is_host) |
2077 | features = VSOCK_TRANSPORT_F_H2G; |
2078 | else |
2079 | features = VSOCK_TRANSPORT_F_G2H; |
2080 | |
2081 | vsock_core_register(t: &vmci_transport, features); |
2082 | } |
2083 | |
2084 | static int __init vmci_transport_init(void) |
2085 | { |
2086 | int err; |
2087 | |
2088 | /* Create the datagram handle that we will use to send and receive all |
2089 | * VSocket control messages for this context. |
2090 | */ |
2091 | err = vmci_transport_datagram_create_hnd(VMCI_TRANSPORT_PACKET_RID, |
2092 | VMCI_FLAG_ANYCID_DG_HND, |
2093 | recv_cb: vmci_transport_recv_stream_cb, |
2094 | NULL, |
2095 | out_handle: &vmci_transport_stream_handle); |
2096 | if (err < VMCI_SUCCESS) { |
2097 | pr_err("Unable to create datagram handle. (%d)\n" , err); |
2098 | return vmci_transport_error_to_vsock_error(vmci_error: err); |
2099 | } |
2100 | err = vmci_event_subscribe(event: VMCI_EVENT_QP_RESUMED, |
2101 | callback: vmci_transport_qp_resumed_cb, |
2102 | NULL, subid: &vmci_transport_qp_resumed_sub_id); |
2103 | if (err < VMCI_SUCCESS) { |
2104 | pr_err("Unable to subscribe to resumed event. (%d)\n" , err); |
2105 | err = vmci_transport_error_to_vsock_error(vmci_error: err); |
2106 | vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID; |
2107 | goto err_destroy_stream_handle; |
2108 | } |
2109 | |
2110 | /* Register only with dgram feature, other features (H2G, G2H) will be |
2111 | * registered when the first host or guest becomes active. |
2112 | */ |
2113 | err = vsock_core_register(t: &vmci_transport, VSOCK_TRANSPORT_F_DGRAM); |
2114 | if (err < 0) |
2115 | goto err_unsubscribe; |
2116 | |
2117 | err = vmci_register_vsock_callback(callback: vmci_vsock_transport_cb); |
2118 | if (err < 0) |
2119 | goto err_unregister; |
2120 | |
2121 | return 0; |
2122 | |
2123 | err_unregister: |
2124 | vsock_core_unregister(t: &vmci_transport); |
2125 | err_unsubscribe: |
2126 | vmci_event_unsubscribe(subid: vmci_transport_qp_resumed_sub_id); |
2127 | err_destroy_stream_handle: |
2128 | vmci_datagram_destroy_handle(handle: vmci_transport_stream_handle); |
2129 | return err; |
2130 | } |
2131 | module_init(vmci_transport_init); |
2132 | |
2133 | static void __exit vmci_transport_exit(void) |
2134 | { |
2135 | cancel_work_sync(work: &vmci_transport_cleanup_work); |
2136 | vmci_transport_free_resources(transport_list: &vmci_transport_cleanup_list); |
2137 | |
2138 | if (!vmci_handle_is_invalid(h: vmci_transport_stream_handle)) { |
2139 | if (vmci_datagram_destroy_handle( |
2140 | handle: vmci_transport_stream_handle) != VMCI_SUCCESS) |
2141 | pr_err("Couldn't destroy datagram handle\n" ); |
2142 | vmci_transport_stream_handle = VMCI_INVALID_HANDLE; |
2143 | } |
2144 | |
2145 | if (vmci_transport_qp_resumed_sub_id != VMCI_INVALID_ID) { |
2146 | vmci_event_unsubscribe(subid: vmci_transport_qp_resumed_sub_id); |
2147 | vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID; |
2148 | } |
2149 | |
2150 | vmci_register_vsock_callback(NULL); |
2151 | vsock_core_unregister(t: &vmci_transport); |
2152 | } |
2153 | module_exit(vmci_transport_exit); |
2154 | |
2155 | MODULE_AUTHOR("VMware, Inc." ); |
2156 | MODULE_DESCRIPTION("VMCI transport for Virtual Sockets" ); |
2157 | MODULE_VERSION("1.0.5.0-k" ); |
2158 | MODULE_LICENSE("GPL v2" ); |
2159 | MODULE_ALIAS("vmware_vsock" ); |
2160 | MODULE_ALIAS_NETPROTO(PF_VSOCK); |
2161 | |