1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_VIRTIO_VSOCK_H
3#define _LINUX_VIRTIO_VSOCK_H
4
5#include <uapi/linux/virtio_vsock.h>
6#include <linux/socket.h>
7#include <net/sock.h>
8#include <net/af_vsock.h>
9
10#define VIRTIO_VSOCK_SKB_HEADROOM (sizeof(struct virtio_vsock_hdr))
11
12struct virtio_vsock_skb_cb {
13 bool reply;
14 bool tap_delivered;
15 u32 offset;
16};
17
18#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
19
20static inline struct virtio_vsock_hdr *virtio_vsock_hdr(struct sk_buff *skb)
21{
22 return (struct virtio_vsock_hdr *)skb->head;
23}
24
25static inline bool virtio_vsock_skb_reply(struct sk_buff *skb)
26{
27 return VIRTIO_VSOCK_SKB_CB(skb)->reply;
28}
29
30static inline void virtio_vsock_skb_set_reply(struct sk_buff *skb)
31{
32 VIRTIO_VSOCK_SKB_CB(skb)->reply = true;
33}
34
35static inline bool virtio_vsock_skb_tap_delivered(struct sk_buff *skb)
36{
37 return VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered;
38}
39
40static inline void virtio_vsock_skb_set_tap_delivered(struct sk_buff *skb)
41{
42 VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = true;
43}
44
45static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb)
46{
47 VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false;
48}
49
50static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb)
51{
52 u32 len;
53
54 len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
55
56 if (len > 0)
57 skb_put(skb, len);
58}
59
60static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
61{
62 struct sk_buff *skb;
63
64 if (size < VIRTIO_VSOCK_SKB_HEADROOM)
65 return NULL;
66
67 skb = alloc_skb(size, priority: mask);
68 if (!skb)
69 return NULL;
70
71 skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
72 return skb;
73}
74
75static inline void
76virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb)
77{
78 spin_lock_bh(lock: &list->lock);
79 __skb_queue_head(list, newsk: skb);
80 spin_unlock_bh(lock: &list->lock);
81}
82
83static inline void
84virtio_vsock_skb_queue_tail(struct sk_buff_head *list, struct sk_buff *skb)
85{
86 spin_lock_bh(lock: &list->lock);
87 __skb_queue_tail(list, newsk: skb);
88 spin_unlock_bh(lock: &list->lock);
89}
90
91static inline struct sk_buff *virtio_vsock_skb_dequeue(struct sk_buff_head *list)
92{
93 struct sk_buff *skb;
94
95 spin_lock_bh(lock: &list->lock);
96 skb = __skb_dequeue(list);
97 spin_unlock_bh(lock: &list->lock);
98
99 return skb;
100}
101
102static inline void virtio_vsock_skb_queue_purge(struct sk_buff_head *list)
103{
104 spin_lock_bh(lock: &list->lock);
105 __skb_queue_purge(list);
106 spin_unlock_bh(lock: &list->lock);
107}
108
109static inline size_t virtio_vsock_skb_len(struct sk_buff *skb)
110{
111 return (size_t)(skb_end_pointer(skb) - skb->head);
112}
113
114#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4)
115#define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
116#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
117
118enum {
119 VSOCK_VQ_RX = 0, /* for host to guest data */
120 VSOCK_VQ_TX = 1, /* for guest to host data */
121 VSOCK_VQ_EVENT = 2,
122 VSOCK_VQ_MAX = 3,
123};
124
125/* Per-socket state (accessed via vsk->trans) */
126struct virtio_vsock_sock {
127 struct vsock_sock *vsk;
128
129 spinlock_t tx_lock;
130 spinlock_t rx_lock;
131
132 /* Protected by tx_lock */
133 u32 tx_cnt;
134 u32 peer_fwd_cnt;
135 u32 peer_buf_alloc;
136
137 /* Protected by rx_lock */
138 u32 fwd_cnt;
139 u32 last_fwd_cnt;
140 u32 rx_bytes;
141 u32 buf_alloc;
142 struct sk_buff_head rx_queue;
143 u32 msg_count;
144};
145
146struct virtio_vsock_pkt_info {
147 u32 remote_cid, remote_port;
148 struct vsock_sock *vsk;
149 struct msghdr *msg;
150 u32 pkt_len;
151 u16 type;
152 u16 op;
153 u32 flags;
154 bool reply;
155};
156
157struct virtio_transport {
158 /* This must be the first field */
159 struct vsock_transport transport;
160
161 /* Takes ownership of the packet */
162 int (*send_pkt)(struct sk_buff *skb);
163
164 /* Used in MSG_ZEROCOPY mode. Checks, that provided data
165 * (number of buffers) could be transmitted with zerocopy
166 * mode. If this callback is not implemented for the current
167 * transport - this means that this transport doesn't need
168 * extra checks and can perform zerocopy transmission by
169 * default.
170 */
171 bool (*can_msgzerocopy)(int bufs_num);
172};
173
174ssize_t
175virtio_transport_stream_dequeue(struct vsock_sock *vsk,
176 struct msghdr *msg,
177 size_t len,
178 int type);
179int
180virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
181 struct msghdr *msg,
182 size_t len, int flags);
183
184int
185virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk,
186 struct msghdr *msg,
187 size_t len);
188ssize_t
189virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk,
190 struct msghdr *msg,
191 int flags);
192s64 virtio_transport_stream_has_data(struct vsock_sock *vsk);
193s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);
194u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk);
195
196int virtio_transport_do_socket_init(struct vsock_sock *vsk,
197 struct vsock_sock *psk);
198int
199virtio_transport_notify_poll_in(struct vsock_sock *vsk,
200 size_t target,
201 bool *data_ready_now);
202int
203virtio_transport_notify_poll_out(struct vsock_sock *vsk,
204 size_t target,
205 bool *space_available_now);
206
207int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
208 size_t target, struct vsock_transport_recv_notify_data *data);
209int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
210 size_t target, struct vsock_transport_recv_notify_data *data);
211int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
212 size_t target, struct vsock_transport_recv_notify_data *data);
213int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
214 size_t target, ssize_t copied, bool data_read,
215 struct vsock_transport_recv_notify_data *data);
216int virtio_transport_notify_send_init(struct vsock_sock *vsk,
217 struct vsock_transport_send_notify_data *data);
218int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
219 struct vsock_transport_send_notify_data *data);
220int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
221 struct vsock_transport_send_notify_data *data);
222int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
223 ssize_t written, struct vsock_transport_send_notify_data *data);
224void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val);
225
226u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk);
227bool virtio_transport_stream_is_active(struct vsock_sock *vsk);
228bool virtio_transport_stream_allow(u32 cid, u32 port);
229int virtio_transport_dgram_bind(struct vsock_sock *vsk,
230 struct sockaddr_vm *addr);
231bool virtio_transport_dgram_allow(u32 cid, u32 port);
232
233int virtio_transport_connect(struct vsock_sock *vsk);
234
235int virtio_transport_shutdown(struct vsock_sock *vsk, int mode);
236
237void virtio_transport_release(struct vsock_sock *vsk);
238
239ssize_t
240virtio_transport_stream_enqueue(struct vsock_sock *vsk,
241 struct msghdr *msg,
242 size_t len);
243int
244virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
245 struct sockaddr_vm *remote_addr,
246 struct msghdr *msg,
247 size_t len);
248
249void virtio_transport_destruct(struct vsock_sock *vsk);
250
251void virtio_transport_recv_pkt(struct virtio_transport *t,
252 struct sk_buff *skb);
253void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb);
254u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
255void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
256void virtio_transport_deliver_tap_pkt(struct sk_buff *skb);
257int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list);
258int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t read_actor);
259#endif /* _LINUX_VIRTIO_VSOCK_H */
260

source code of linux/include/linux/virtio_vsock.h