1 | /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ |
2 | |
3 | /* |
4 | * AF_XDP user-space access library. |
5 | * |
6 | * Copyright (c) 2018 - 2019 Intel Corporation. |
7 | * Copyright (c) 2019 Facebook |
8 | * |
9 | * Author(s): Magnus Karlsson <magnus.karlsson@intel.com> |
10 | */ |
11 | |
12 | #ifndef __XSK_H |
13 | #define __XSK_H |
14 | |
15 | #include <stdio.h> |
16 | #include <stdint.h> |
17 | #include <stdbool.h> |
18 | #include <linux/if_xdp.h> |
19 | |
20 | #include <bpf/libbpf.h> |
21 | |
22 | #ifdef __cplusplus |
23 | extern "C" { |
24 | #endif |
25 | |
26 | /* Do not access these members directly. Use the functions below. */ |
27 | #define DEFINE_XSK_RING(name) \ |
28 | struct name { \ |
29 | __u32 cached_prod; \ |
30 | __u32 cached_cons; \ |
31 | __u32 mask; \ |
32 | __u32 size; \ |
33 | __u32 *producer; \ |
34 | __u32 *consumer; \ |
35 | void *ring; \ |
36 | __u32 *flags; \ |
37 | } |
38 | |
39 | DEFINE_XSK_RING(xsk_ring_prod); |
40 | DEFINE_XSK_RING(xsk_ring_cons); |
41 | |
42 | /* For a detailed explanation on the memory barriers associated with the |
43 | * ring, please take a look at net/xdp/xsk_queue.h. |
44 | */ |
45 | |
46 | struct xsk_umem; |
47 | struct xsk_socket; |
48 | |
49 | static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill, |
50 | __u32 idx) |
51 | { |
52 | __u64 *addrs = (__u64 *)fill->ring; |
53 | |
54 | return &addrs[idx & fill->mask]; |
55 | } |
56 | |
57 | static inline const __u64 * |
58 | xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx) |
59 | { |
60 | const __u64 *addrs = (const __u64 *)comp->ring; |
61 | |
62 | return &addrs[idx & comp->mask]; |
63 | } |
64 | |
65 | static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx, |
66 | __u32 idx) |
67 | { |
68 | struct xdp_desc *descs = (struct xdp_desc *)tx->ring; |
69 | |
70 | return &descs[idx & tx->mask]; |
71 | } |
72 | |
73 | static inline const struct xdp_desc * |
74 | xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx) |
75 | { |
76 | const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring; |
77 | |
78 | return &descs[idx & rx->mask]; |
79 | } |
80 | |
81 | static inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r) |
82 | { |
83 | return *r->flags & XDP_RING_NEED_WAKEUP; |
84 | } |
85 | |
86 | static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb) |
87 | { |
88 | __u32 free_entries = r->cached_cons - r->cached_prod; |
89 | |
90 | if (free_entries >= nb) |
91 | return free_entries; |
92 | |
93 | /* Refresh the local tail pointer. |
94 | * cached_cons is r->size bigger than the real consumer pointer so |
95 | * that this addition can be avoided in the more frequently |
96 | * executed code that computs free_entries in the beginning of |
97 | * this function. Without this optimization it whould have been |
98 | * free_entries = r->cached_prod - r->cached_cons + r->size. |
99 | */ |
100 | r->cached_cons = __atomic_load_n(r->consumer, __ATOMIC_ACQUIRE); |
101 | r->cached_cons += r->size; |
102 | |
103 | return r->cached_cons - r->cached_prod; |
104 | } |
105 | |
106 | static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb) |
107 | { |
108 | __u32 entries = r->cached_prod - r->cached_cons; |
109 | |
110 | if (entries == 0) { |
111 | r->cached_prod = __atomic_load_n(r->producer, __ATOMIC_ACQUIRE); |
112 | entries = r->cached_prod - r->cached_cons; |
113 | } |
114 | |
115 | return (entries > nb) ? nb : entries; |
116 | } |
117 | |
118 | static inline __u32 xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb, __u32 *idx) |
119 | { |
120 | if (xsk_prod_nb_free(r: prod, nb) < nb) |
121 | return 0; |
122 | |
123 | *idx = prod->cached_prod; |
124 | prod->cached_prod += nb; |
125 | |
126 | return nb; |
127 | } |
128 | |
129 | static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb) |
130 | { |
131 | /* Make sure everything has been written to the ring before indicating |
132 | * this to the kernel by writing the producer pointer. |
133 | */ |
134 | __atomic_store_n(prod->producer, *prod->producer + nb, __ATOMIC_RELEASE); |
135 | } |
136 | |
137 | static inline void xsk_ring_prod__cancel(struct xsk_ring_prod *prod, __u32 nb) |
138 | { |
139 | prod->cached_prod -= nb; |
140 | } |
141 | |
142 | static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx) |
143 | { |
144 | __u32 entries = xsk_cons_nb_avail(r: cons, nb); |
145 | |
146 | if (entries > 0) { |
147 | *idx = cons->cached_cons; |
148 | cons->cached_cons += entries; |
149 | } |
150 | |
151 | return entries; |
152 | } |
153 | |
154 | static inline void xsk_ring_cons__cancel(struct xsk_ring_cons *cons, __u32 nb) |
155 | { |
156 | cons->cached_cons -= nb; |
157 | } |
158 | |
159 | static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb) |
160 | { |
161 | /* Make sure data has been read before indicating we are done |
162 | * with the entries by updating the consumer pointer. |
163 | */ |
164 | __atomic_store_n(cons->consumer, *cons->consumer + nb, __ATOMIC_RELEASE); |
165 | } |
166 | |
167 | static inline void *xsk_umem__get_data(void *umem_area, __u64 addr) |
168 | { |
169 | return &((char *)umem_area)[addr]; |
170 | } |
171 | |
172 | static inline __u64 (__u64 addr) |
173 | { |
174 | return addr & XSK_UNALIGNED_BUF_ADDR_MASK; |
175 | } |
176 | |
177 | static inline __u64 (__u64 addr) |
178 | { |
179 | return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; |
180 | } |
181 | |
182 | static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr) |
183 | { |
184 | return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr); |
185 | } |
186 | |
187 | int xsk_umem__fd(const struct xsk_umem *umem); |
188 | int xsk_socket__fd(const struct xsk_socket *xsk); |
189 | |
190 | #define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048 |
191 | #define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048 |
192 | #define XSK_UMEM__DEFAULT_FRAME_SHIFT 12 /* 4096 bytes */ |
193 | #define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT) |
194 | #define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0 |
195 | #define XSK_UMEM__DEFAULT_FLAGS 0 |
196 | |
197 | struct xsk_umem_config { |
198 | __u32 fill_size; |
199 | __u32 comp_size; |
200 | __u32 frame_size; |
201 | __u32 frame_headroom; |
202 | __u32 flags; |
203 | __u32 tx_metadata_len; |
204 | }; |
205 | |
206 | int xsk_attach_xdp_program(struct bpf_program *prog, int ifindex, u32 xdp_flags); |
207 | void xsk_detach_xdp_program(int ifindex, u32 xdp_flags); |
208 | int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk, u32 index); |
209 | void xsk_clear_xskmap(struct bpf_map *map); |
210 | bool xsk_is_in_mode(u32 ifindex, int mode); |
211 | |
212 | struct xsk_socket_config { |
213 | __u32 rx_size; |
214 | __u32 tx_size; |
215 | __u16 bind_flags; |
216 | }; |
217 | |
218 | /* Set config to NULL to get the default configuration. */ |
219 | int xsk_umem__create(struct xsk_umem **umem, |
220 | void *umem_area, __u64 size, |
221 | struct xsk_ring_prod *fill, |
222 | struct xsk_ring_cons *comp, |
223 | const struct xsk_umem_config *config); |
224 | int xsk_socket__create(struct xsk_socket **xsk, |
225 | int ifindex, __u32 queue_id, |
226 | struct xsk_umem *umem, |
227 | struct xsk_ring_cons *rx, |
228 | struct xsk_ring_prod *tx, |
229 | const struct xsk_socket_config *config); |
230 | int xsk_socket__create_shared(struct xsk_socket **xsk_ptr, |
231 | int ifindex, |
232 | __u32 queue_id, struct xsk_umem *umem, |
233 | struct xsk_ring_cons *rx, |
234 | struct xsk_ring_prod *tx, |
235 | struct xsk_ring_prod *fill, |
236 | struct xsk_ring_cons *comp, |
237 | const struct xsk_socket_config *config); |
238 | |
239 | /* Returns 0 for success and -EBUSY if the umem is still in use. */ |
240 | int xsk_umem__delete(struct xsk_umem *umem); |
241 | void xsk_socket__delete(struct xsk_socket *xsk); |
242 | |
243 | int xsk_set_mtu(int ifindex, int mtu); |
244 | |
245 | #ifdef __cplusplus |
246 | } /* extern "C" */ |
247 | #endif |
248 | |
249 | #endif /* __XSK_H */ |
250 | |