1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* net/core/xdp.c |
3 | * |
4 | * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. |
5 | */ |
6 | #include <linux/bpf.h> |
7 | #include <linux/btf.h> |
8 | #include <linux/btf_ids.h> |
9 | #include <linux/filter.h> |
10 | #include <linux/types.h> |
11 | #include <linux/mm.h> |
12 | #include <linux/netdevice.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/idr.h> |
15 | #include <linux/rhashtable.h> |
16 | #include <linux/bug.h> |
17 | #include <net/page_pool/helpers.h> |
18 | |
19 | #include <net/hotdata.h> |
20 | #include <net/netdev_lock.h> |
21 | #include <net/xdp.h> |
22 | #include <net/xdp_priv.h> /* struct xdp_mem_allocator */ |
23 | #include <trace/events/xdp.h> |
24 | #include <net/xdp_sock_drv.h> |
25 | |
26 | #define REG_STATE_NEW 0x0 |
27 | #define REG_STATE_REGISTERED 0x1 |
28 | #define REG_STATE_UNREGISTERED 0x2 |
29 | #define REG_STATE_UNUSED 0x3 |
30 | |
31 | static DEFINE_IDA(mem_id_pool); |
32 | static DEFINE_MUTEX(mem_id_lock); |
33 | #define MEM_ID_MAX 0xFFFE |
34 | #define MEM_ID_MIN 1 |
35 | static int mem_id_next = MEM_ID_MIN; |
36 | |
37 | static bool mem_id_init; /* false */ |
38 | static struct rhashtable *mem_id_ht; |
39 | |
40 | static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) |
41 | { |
42 | const u32 *k = data; |
43 | const u32 key = *k; |
44 | |
45 | BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id) |
46 | != sizeof(u32)); |
47 | |
48 | /* Use cyclic increasing ID as direct hash key */ |
49 | return key; |
50 | } |
51 | |
52 | static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg, |
53 | const void *ptr) |
54 | { |
55 | const struct xdp_mem_allocator *xa = ptr; |
56 | u32 mem_id = *(u32 *)arg->key; |
57 | |
58 | return xa->mem.id != mem_id; |
59 | } |
60 | |
61 | static const struct rhashtable_params mem_id_rht_params = { |
62 | .nelem_hint = 64, |
63 | .head_offset = offsetof(struct xdp_mem_allocator, node), |
64 | .key_offset = offsetof(struct xdp_mem_allocator, mem.id), |
65 | .key_len = sizeof_field(struct xdp_mem_allocator, mem.id), |
66 | .max_size = MEM_ID_MAX, |
67 | .min_size = 8, |
68 | .automatic_shrinking = true, |
69 | .hashfn = xdp_mem_id_hashfn, |
70 | .obj_cmpfn = xdp_mem_id_cmp, |
71 | }; |
72 | |
73 | static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) |
74 | { |
75 | struct xdp_mem_allocator *xa; |
76 | |
77 | xa = container_of(rcu, struct xdp_mem_allocator, rcu); |
78 | |
79 | /* Allow this ID to be reused */ |
80 | ida_free(&mem_id_pool, id: xa->mem.id); |
81 | |
82 | kfree(objp: xa); |
83 | } |
84 | |
85 | static void mem_xa_remove(struct xdp_mem_allocator *xa) |
86 | { |
87 | trace_mem_disconnect(xa); |
88 | |
89 | if (!rhashtable_remove_fast(ht: mem_id_ht, obj: &xa->node, params: mem_id_rht_params)) |
90 | call_rcu(head: &xa->rcu, func: __xdp_mem_allocator_rcu_free); |
91 | } |
92 | |
93 | static void mem_allocator_disconnect(void *allocator) |
94 | { |
95 | struct xdp_mem_allocator *xa; |
96 | struct rhashtable_iter iter; |
97 | |
98 | mutex_lock(&mem_id_lock); |
99 | |
100 | rhashtable_walk_enter(ht: mem_id_ht, iter: &iter); |
101 | do { |
102 | rhashtable_walk_start(iter: &iter); |
103 | |
104 | while ((xa = rhashtable_walk_next(iter: &iter)) && !IS_ERR(ptr: xa)) { |
105 | if (xa->allocator == allocator) |
106 | mem_xa_remove(xa); |
107 | } |
108 | |
109 | rhashtable_walk_stop(iter: &iter); |
110 | |
111 | } while (xa == ERR_PTR(error: -EAGAIN)); |
112 | rhashtable_walk_exit(iter: &iter); |
113 | |
114 | mutex_unlock(lock: &mem_id_lock); |
115 | } |
116 | |
117 | void xdp_unreg_mem_model(struct xdp_mem_info *mem) |
118 | { |
119 | struct xdp_mem_allocator *xa; |
120 | int type = mem->type; |
121 | int id = mem->id; |
122 | |
123 | /* Reset mem info to defaults */ |
124 | mem->id = 0; |
125 | mem->type = 0; |
126 | |
127 | if (id == 0) |
128 | return; |
129 | |
130 | if (type == MEM_TYPE_PAGE_POOL) { |
131 | xa = rhashtable_lookup_fast(ht: mem_id_ht, key: &id, params: mem_id_rht_params); |
132 | page_pool_destroy(pool: xa->page_pool); |
133 | } |
134 | } |
135 | EXPORT_SYMBOL_GPL(xdp_unreg_mem_model); |
136 | |
137 | void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) |
138 | { |
139 | if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { |
140 | WARN(1, "Missing register, driver bug"); |
141 | return; |
142 | } |
143 | |
144 | xdp_unreg_mem_model(&xdp_rxq->mem); |
145 | } |
146 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model); |
147 | |
148 | void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) |
149 | { |
150 | /* Simplify driver cleanup code paths, allow unreg "unused" */ |
151 | if (xdp_rxq->reg_state == REG_STATE_UNUSED) |
152 | return; |
153 | |
154 | xdp_rxq_info_unreg_mem_model(xdp_rxq); |
155 | |
156 | xdp_rxq->reg_state = REG_STATE_UNREGISTERED; |
157 | xdp_rxq->dev = NULL; |
158 | } |
159 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg); |
160 | |
161 | static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq) |
162 | { |
163 | memset(xdp_rxq, 0, sizeof(*xdp_rxq)); |
164 | } |
165 | |
166 | /* Returns 0 on success, negative on failure */ |
167 | int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, |
168 | struct net_device *dev, u32 queue_index, |
169 | unsigned int napi_id, u32 frag_size) |
170 | { |
171 | if (!dev) { |
172 | WARN(1, "Missing net_device from driver"); |
173 | return -ENODEV; |
174 | } |
175 | |
176 | if (xdp_rxq->reg_state == REG_STATE_UNUSED) { |
177 | WARN(1, "Driver promised not to register this"); |
178 | return -EINVAL; |
179 | } |
180 | |
181 | if (xdp_rxq->reg_state == REG_STATE_REGISTERED) { |
182 | WARN(1, "Missing unregister, handled but fix driver"); |
183 | xdp_rxq_info_unreg(xdp_rxq); |
184 | } |
185 | |
186 | /* State either UNREGISTERED or NEW */ |
187 | xdp_rxq_info_init(xdp_rxq); |
188 | xdp_rxq->dev = dev; |
189 | xdp_rxq->queue_index = queue_index; |
190 | xdp_rxq->frag_size = frag_size; |
191 | |
192 | xdp_rxq->reg_state = REG_STATE_REGISTERED; |
193 | return 0; |
194 | } |
195 | EXPORT_SYMBOL_GPL(__xdp_rxq_info_reg); |
196 | |
197 | void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq) |
198 | { |
199 | xdp_rxq->reg_state = REG_STATE_UNUSED; |
200 | } |
201 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unused); |
202 | |
203 | bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq) |
204 | { |
205 | return (xdp_rxq->reg_state == REG_STATE_REGISTERED); |
206 | } |
207 | EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg); |
208 | |
209 | static int __mem_id_init_hash_table(void) |
210 | { |
211 | struct rhashtable *rht; |
212 | int ret; |
213 | |
214 | if (unlikely(mem_id_init)) |
215 | return 0; |
216 | |
217 | rht = kzalloc(sizeof(*rht), GFP_KERNEL); |
218 | if (!rht) |
219 | return -ENOMEM; |
220 | |
221 | ret = rhashtable_init(rht, &mem_id_rht_params); |
222 | if (ret < 0) { |
223 | kfree(objp: rht); |
224 | return ret; |
225 | } |
226 | mem_id_ht = rht; |
227 | smp_mb(); /* mutex lock should provide enough pairing */ |
228 | mem_id_init = true; |
229 | |
230 | return 0; |
231 | } |
232 | |
233 | /* Allocate a cyclic ID that maps to allocator pointer. |
234 | * See: https://www.kernel.org/doc/html/latest/core-api/idr.html |
235 | * |
236 | * Caller must lock mem_id_lock. |
237 | */ |
238 | static int __mem_id_cyclic_get(gfp_t gfp) |
239 | { |
240 | int retries = 1; |
241 | int id; |
242 | |
243 | again: |
244 | id = ida_alloc_range(&mem_id_pool, min: mem_id_next, MEM_ID_MAX - 1, gfp); |
245 | if (id < 0) { |
246 | if (id == -ENOSPC) { |
247 | /* Cyclic allocator, reset next id */ |
248 | if (retries--) { |
249 | mem_id_next = MEM_ID_MIN; |
250 | goto again; |
251 | } |
252 | } |
253 | return id; /* errno */ |
254 | } |
255 | mem_id_next = id + 1; |
256 | |
257 | return id; |
258 | } |
259 | |
260 | static bool __is_supported_mem_type(enum xdp_mem_type type) |
261 | { |
262 | if (type == MEM_TYPE_PAGE_POOL) |
263 | return is_page_pool_compiled_in(); |
264 | |
265 | if (type >= MEM_TYPE_MAX) |
266 | return false; |
267 | |
268 | return true; |
269 | } |
270 | |
271 | static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem, |
272 | enum xdp_mem_type type, |
273 | void *allocator) |
274 | { |
275 | struct xdp_mem_allocator *xdp_alloc; |
276 | gfp_t gfp = GFP_KERNEL; |
277 | int id, errno, ret; |
278 | void *ptr; |
279 | |
280 | if (!__is_supported_mem_type(type)) |
281 | return ERR_PTR(error: -EOPNOTSUPP); |
282 | |
283 | mem->type = type; |
284 | |
285 | if (!allocator) { |
286 | if (type == MEM_TYPE_PAGE_POOL) |
287 | return ERR_PTR(error: -EINVAL); /* Setup time check page_pool req */ |
288 | return NULL; |
289 | } |
290 | |
291 | /* Delay init of rhashtable to save memory if feature isn't used */ |
292 | if (!mem_id_init) { |
293 | mutex_lock(&mem_id_lock); |
294 | ret = __mem_id_init_hash_table(); |
295 | mutex_unlock(lock: &mem_id_lock); |
296 | if (ret < 0) |
297 | return ERR_PTR(error: ret); |
298 | } |
299 | |
300 | xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp); |
301 | if (!xdp_alloc) |
302 | return ERR_PTR(error: -ENOMEM); |
303 | |
304 | mutex_lock(&mem_id_lock); |
305 | id = __mem_id_cyclic_get(gfp); |
306 | if (id < 0) { |
307 | errno = id; |
308 | goto err; |
309 | } |
310 | mem->id = id; |
311 | xdp_alloc->mem = *mem; |
312 | xdp_alloc->allocator = allocator; |
313 | |
314 | /* Insert allocator into ID lookup table */ |
315 | ptr = rhashtable_insert_slow(ht: mem_id_ht, key: &id, obj: &xdp_alloc->node); |
316 | if (IS_ERR(ptr)) { |
317 | ida_free(&mem_id_pool, id: mem->id); |
318 | mem->id = 0; |
319 | errno = PTR_ERR(ptr); |
320 | goto err; |
321 | } |
322 | |
323 | if (type == MEM_TYPE_PAGE_POOL) |
324 | page_pool_use_xdp_mem(pool: allocator, disconnect: mem_allocator_disconnect, mem); |
325 | |
326 | mutex_unlock(lock: &mem_id_lock); |
327 | |
328 | return xdp_alloc; |
329 | err: |
330 | mutex_unlock(lock: &mem_id_lock); |
331 | kfree(objp: xdp_alloc); |
332 | return ERR_PTR(error: errno); |
333 | } |
334 | |
335 | int xdp_reg_mem_model(struct xdp_mem_info *mem, |
336 | enum xdp_mem_type type, void *allocator) |
337 | { |
338 | struct xdp_mem_allocator *xdp_alloc; |
339 | |
340 | xdp_alloc = __xdp_reg_mem_model(mem, type, allocator); |
341 | if (IS_ERR(ptr: xdp_alloc)) |
342 | return PTR_ERR(ptr: xdp_alloc); |
343 | return 0; |
344 | } |
345 | EXPORT_SYMBOL_GPL(xdp_reg_mem_model); |
346 | |
347 | int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, |
348 | enum xdp_mem_type type, void *allocator) |
349 | { |
350 | struct xdp_mem_allocator *xdp_alloc; |
351 | |
352 | if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { |
353 | WARN(1, "Missing register, driver bug"); |
354 | return -EFAULT; |
355 | } |
356 | |
357 | xdp_alloc = __xdp_reg_mem_model(mem: &xdp_rxq->mem, type, allocator); |
358 | if (IS_ERR(ptr: xdp_alloc)) |
359 | return PTR_ERR(ptr: xdp_alloc); |
360 | |
361 | if (type == MEM_TYPE_XSK_BUFF_POOL && allocator) |
362 | xsk_pool_set_rxq_info(pool: allocator, rxq: xdp_rxq); |
363 | |
364 | if (trace_mem_connect_enabled() && xdp_alloc) |
365 | trace_mem_connect(xa: xdp_alloc, rxq: xdp_rxq); |
366 | return 0; |
367 | } |
368 | |
369 | EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); |
370 | |
371 | /** |
372 | * xdp_reg_page_pool - register &page_pool as a memory provider for XDP |
373 | * @pool: &page_pool to register |
374 | * |
375 | * Can be used to register pools manually without connecting to any XDP RxQ |
376 | * info, so that the XDP layer will be aware of them. Then, they can be |
377 | * attached to an RxQ info manually via xdp_rxq_info_attach_page_pool(). |
378 | * |
379 | * Return: %0 on success, -errno on error. |
380 | */ |
381 | int xdp_reg_page_pool(struct page_pool *pool) |
382 | { |
383 | struct xdp_mem_info mem; |
384 | |
385 | return xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pool); |
386 | } |
387 | EXPORT_SYMBOL_GPL(xdp_reg_page_pool); |
388 | |
389 | /** |
390 | * xdp_unreg_page_pool - unregister &page_pool from the memory providers list |
391 | * @pool: &page_pool to unregister |
392 | * |
393 | * A shorthand for manual unregistering page pools. If the pool was previously |
394 | * attached to an RxQ info, it must be detached first. |
395 | */ |
396 | void xdp_unreg_page_pool(const struct page_pool *pool) |
397 | { |
398 | struct xdp_mem_info mem = { |
399 | .type = MEM_TYPE_PAGE_POOL, |
400 | .id = pool->xdp_mem_id, |
401 | }; |
402 | |
403 | xdp_unreg_mem_model(&mem); |
404 | } |
405 | EXPORT_SYMBOL_GPL(xdp_unreg_page_pool); |
406 | |
407 | /** |
408 | * xdp_rxq_info_attach_page_pool - attach registered pool to RxQ info |
409 | * @xdp_rxq: XDP RxQ info to attach the pool to |
410 | * @pool: pool to attach |
411 | * |
412 | * If the pool was registered manually, this function must be called instead |
413 | * of xdp_rxq_info_reg_mem_model() to connect it to the RxQ info. |
414 | */ |
415 | void xdp_rxq_info_attach_page_pool(struct xdp_rxq_info *xdp_rxq, |
416 | const struct page_pool *pool) |
417 | { |
418 | struct xdp_mem_info mem = { |
419 | .type = MEM_TYPE_PAGE_POOL, |
420 | .id = pool->xdp_mem_id, |
421 | }; |
422 | |
423 | xdp_rxq_info_attach_mem_model(xdp_rxq, mem: &mem); |
424 | } |
425 | EXPORT_SYMBOL_GPL(xdp_rxq_info_attach_page_pool); |
426 | |
427 | /* XDP RX runs under NAPI protection, and in different delivery error |
428 | * scenarios (e.g. queue full), it is possible to return the xdp_frame |
429 | * while still leveraging this protection. The @napi_direct boolean |
430 | * is used for those calls sites. Thus, allowing for faster recycling |
431 | * of xdp_frames/pages in those cases. |
432 | */ |
433 | void __xdp_return(netmem_ref netmem, enum xdp_mem_type mem_type, |
434 | bool napi_direct, struct xdp_buff *xdp) |
435 | { |
436 | switch (mem_type) { |
437 | case MEM_TYPE_PAGE_POOL: |
438 | netmem = netmem_compound_head(netmem); |
439 | if (napi_direct && xdp_return_frame_no_direct()) |
440 | napi_direct = false; |
441 | /* No need to check netmem_is_pp() as mem->type knows this a |
442 | * page_pool page |
443 | */ |
444 | page_pool_put_full_netmem(pool: netmem_get_pp(netmem), netmem, |
445 | allow_direct: napi_direct); |
446 | break; |
447 | case MEM_TYPE_PAGE_SHARED: |
448 | page_frag_free(addr: __netmem_address(netmem)); |
449 | break; |
450 | case MEM_TYPE_PAGE_ORDER0: |
451 | put_page(page: __netmem_to_page(netmem)); |
452 | break; |
453 | case MEM_TYPE_XSK_BUFF_POOL: |
454 | /* NB! Only valid from an xdp_buff! */ |
455 | xsk_buff_free(xdp); |
456 | break; |
457 | default: |
458 | /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ |
459 | WARN(1, "Incorrect XDP memory type (%d) usage", mem_type); |
460 | break; |
461 | } |
462 | } |
463 | |
464 | void xdp_return_frame(struct xdp_frame *xdpf) |
465 | { |
466 | struct skb_shared_info *sinfo; |
467 | |
468 | if (likely(!xdp_frame_has_frags(xdpf))) |
469 | goto out; |
470 | |
471 | sinfo = xdp_get_shared_info_from_frame(frame: xdpf); |
472 | for (u32 i = 0; i < sinfo->nr_frags; i++) |
473 | __xdp_return(netmem: skb_frag_netmem(frag: &sinfo->frags[i]), mem_type: xdpf->mem_type, |
474 | napi_direct: false, NULL); |
475 | |
476 | out: |
477 | __xdp_return(netmem: virt_to_netmem(data: xdpf->data), mem_type: xdpf->mem_type, napi_direct: false, NULL); |
478 | } |
479 | EXPORT_SYMBOL_GPL(xdp_return_frame); |
480 | |
481 | void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) |
482 | { |
483 | struct skb_shared_info *sinfo; |
484 | |
485 | if (likely(!xdp_frame_has_frags(xdpf))) |
486 | goto out; |
487 | |
488 | sinfo = xdp_get_shared_info_from_frame(frame: xdpf); |
489 | for (u32 i = 0; i < sinfo->nr_frags; i++) |
490 | __xdp_return(netmem: skb_frag_netmem(frag: &sinfo->frags[i]), mem_type: xdpf->mem_type, |
491 | napi_direct: true, NULL); |
492 | |
493 | out: |
494 | __xdp_return(netmem: virt_to_netmem(data: xdpf->data), mem_type: xdpf->mem_type, napi_direct: true, NULL); |
495 | } |
496 | EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); |
497 | |
498 | /* XDP bulk APIs introduce a defer/flush mechanism to return |
499 | * pages belonging to the same xdp_mem_allocator object |
500 | * (identified via the mem.id field) in bulk to optimize |
501 | * I-cache and D-cache. |
502 | * The bulk queue size is set to 16 to be aligned to how |
503 | * XDP_REDIRECT bulking works. The bulk is flushed when |
504 | * it is full or when mem.id changes. |
505 | * xdp_frame_bulk is usually stored/allocated on the function |
506 | * call-stack to avoid locking penalties. |
507 | */ |
508 | |
509 | /* Must be called with rcu_read_lock held */ |
510 | void xdp_return_frame_bulk(struct xdp_frame *xdpf, |
511 | struct xdp_frame_bulk *bq) |
512 | { |
513 | if (xdpf->mem_type != MEM_TYPE_PAGE_POOL) { |
514 | xdp_return_frame(xdpf); |
515 | return; |
516 | } |
517 | |
518 | if (bq->count == XDP_BULK_QUEUE_SIZE) |
519 | xdp_flush_frame_bulk(bq); |
520 | |
521 | if (unlikely(xdp_frame_has_frags(xdpf))) { |
522 | struct skb_shared_info *sinfo; |
523 | int i; |
524 | |
525 | sinfo = xdp_get_shared_info_from_frame(frame: xdpf); |
526 | for (i = 0; i < sinfo->nr_frags; i++) { |
527 | skb_frag_t *frag = &sinfo->frags[i]; |
528 | |
529 | bq->q[bq->count++] = skb_frag_netmem(frag); |
530 | if (bq->count == XDP_BULK_QUEUE_SIZE) |
531 | xdp_flush_frame_bulk(bq); |
532 | } |
533 | } |
534 | bq->q[bq->count++] = virt_to_netmem(data: xdpf->data); |
535 | } |
536 | EXPORT_SYMBOL_GPL(xdp_return_frame_bulk); |
537 | |
538 | /** |
539 | * xdp_return_frag -- free one XDP frag or decrement its refcount |
540 | * @netmem: network memory reference to release |
541 | * @xdp: &xdp_buff to release the frag for |
542 | */ |
543 | void xdp_return_frag(netmem_ref netmem, const struct xdp_buff *xdp) |
544 | { |
545 | __xdp_return(netmem, mem_type: xdp->rxq->mem.type, napi_direct: true, NULL); |
546 | } |
547 | EXPORT_SYMBOL_GPL(xdp_return_frag); |
548 | |
549 | void xdp_return_buff(struct xdp_buff *xdp) |
550 | { |
551 | struct skb_shared_info *sinfo; |
552 | |
553 | if (likely(!xdp_buff_has_frags(xdp))) |
554 | goto out; |
555 | |
556 | sinfo = xdp_get_shared_info_from_buff(xdp); |
557 | for (u32 i = 0; i < sinfo->nr_frags; i++) |
558 | __xdp_return(netmem: skb_frag_netmem(frag: &sinfo->frags[i]), |
559 | mem_type: xdp->rxq->mem.type, napi_direct: true, xdp); |
560 | |
561 | out: |
562 | __xdp_return(netmem: virt_to_netmem(data: xdp->data), mem_type: xdp->rxq->mem.type, napi_direct: true, xdp); |
563 | } |
564 | EXPORT_SYMBOL_GPL(xdp_return_buff); |
565 | |
566 | void xdp_attachment_setup(struct xdp_attachment_info *info, |
567 | struct netdev_bpf *bpf) |
568 | { |
569 | if (info->prog) |
570 | bpf_prog_put(prog: info->prog); |
571 | info->prog = bpf->prog; |
572 | info->flags = bpf->flags; |
573 | } |
574 | EXPORT_SYMBOL_GPL(xdp_attachment_setup); |
575 | |
576 | struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp) |
577 | { |
578 | unsigned int metasize, totsize; |
579 | void *addr, *data_to_copy; |
580 | struct xdp_frame *xdpf; |
581 | struct page *page; |
582 | |
583 | /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */ |
584 | metasize = xdp_data_meta_unsupported(xdp) ? 0 : |
585 | xdp->data - xdp->data_meta; |
586 | totsize = xdp->data_end - xdp->data + metasize; |
587 | |
588 | if (sizeof(*xdpf) + totsize > PAGE_SIZE) |
589 | return NULL; |
590 | |
591 | page = dev_alloc_page(); |
592 | if (!page) |
593 | return NULL; |
594 | |
595 | addr = page_to_virt(page); |
596 | xdpf = addr; |
597 | memset(xdpf, 0, sizeof(*xdpf)); |
598 | |
599 | addr += sizeof(*xdpf); |
600 | data_to_copy = metasize ? xdp->data_meta : xdp->data; |
601 | memcpy(addr, data_to_copy, totsize); |
602 | |
603 | xdpf->data = addr + metasize; |
604 | xdpf->len = totsize - metasize; |
605 | xdpf->headroom = 0; |
606 | xdpf->metasize = metasize; |
607 | xdpf->frame_sz = PAGE_SIZE; |
608 | xdpf->mem_type = MEM_TYPE_PAGE_ORDER0; |
609 | |
610 | xsk_buff_free(xdp); |
611 | return xdpf; |
612 | } |
613 | EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame); |
614 | |
615 | /* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */ |
616 | void xdp_warn(const char *msg, const char *func, const int line) |
617 | { |
618 | WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg); |
619 | }; |
620 | EXPORT_SYMBOL_GPL(xdp_warn); |
621 | |
622 | /** |
623 | * xdp_build_skb_from_buff - create an skb from &xdp_buff |
624 | * @xdp: &xdp_buff to convert to an skb |
625 | * |
626 | * Perform common operations to create a new skb to pass up the stack from |
627 | * &xdp_buff: allocate an skb head from the NAPI percpu cache, initialize |
628 | * skb data pointers and offsets, set the recycle bit if the buff is |
629 | * PP-backed, Rx queue index, protocol and update frags info. |
630 | * |
631 | * Return: new &sk_buff on success, %NULL on error. |
632 | */ |
633 | struct sk_buff *xdp_build_skb_from_buff(const struct xdp_buff *xdp) |
634 | { |
635 | const struct xdp_rxq_info *rxq = xdp->rxq; |
636 | const struct skb_shared_info *sinfo; |
637 | struct sk_buff *skb; |
638 | u32 nr_frags = 0; |
639 | int metalen; |
640 | |
641 | if (unlikely(xdp_buff_has_frags(xdp))) { |
642 | sinfo = xdp_get_shared_info_from_buff(xdp); |
643 | nr_frags = sinfo->nr_frags; |
644 | } |
645 | |
646 | skb = napi_build_skb(data: xdp->data_hard_start, frag_size: xdp->frame_sz); |
647 | if (unlikely(!skb)) |
648 | return NULL; |
649 | |
650 | skb_reserve(skb, len: xdp->data - xdp->data_hard_start); |
651 | __skb_put(skb, len: xdp->data_end - xdp->data); |
652 | |
653 | metalen = xdp->data - xdp->data_meta; |
654 | if (metalen > 0) |
655 | skb_metadata_set(skb, meta_len: metalen); |
656 | |
657 | if (rxq->mem.type == MEM_TYPE_PAGE_POOL) |
658 | skb_mark_for_recycle(skb); |
659 | |
660 | skb_record_rx_queue(skb, rx_queue: rxq->queue_index); |
661 | |
662 | if (unlikely(nr_frags)) { |
663 | u32 tsize; |
664 | |
665 | tsize = sinfo->xdp_frags_truesize ? : nr_frags * xdp->frame_sz; |
666 | xdp_update_skb_shared_info(skb, nr_frags, |
667 | size: sinfo->xdp_frags_size, truesize: tsize, |
668 | pfmemalloc: xdp_buff_is_frag_pfmemalloc(xdp)); |
669 | } |
670 | |
671 | skb->protocol = eth_type_trans(skb, dev: rxq->dev); |
672 | |
673 | return skb; |
674 | } |
675 | EXPORT_SYMBOL_GPL(xdp_build_skb_from_buff); |
676 | |
677 | /** |
678 | * xdp_copy_frags_from_zc - copy frags from XSk buff to skb |
679 | * @skb: skb to copy frags to |
680 | * @xdp: XSk &xdp_buff from which the frags will be copied |
681 | * @pp: &page_pool backing page allocation, if available |
682 | * |
683 | * Copy all frags from XSk &xdp_buff to the skb to pass it up the stack. |
684 | * Allocate a new buffer for each frag, copy it and attach to the skb. |
685 | * |
686 | * Return: true on success, false on netmem allocation fail. |
687 | */ |
688 | static noinline bool xdp_copy_frags_from_zc(struct sk_buff *skb, |
689 | const struct xdp_buff *xdp, |
690 | struct page_pool *pp) |
691 | { |
692 | struct skb_shared_info *sinfo = skb_shinfo(skb); |
693 | const struct skb_shared_info *xinfo; |
694 | u32 nr_frags, tsize = 0; |
695 | bool pfmemalloc = false; |
696 | |
697 | xinfo = xdp_get_shared_info_from_buff(xdp); |
698 | nr_frags = xinfo->nr_frags; |
699 | |
700 | for (u32 i = 0; i < nr_frags; i++) { |
701 | const skb_frag_t *frag = &xinfo->frags[i]; |
702 | u32 len = skb_frag_size(frag); |
703 | u32 offset, truesize = len; |
704 | struct page *page; |
705 | |
706 | page = page_pool_dev_alloc(pool: pp, offset: &offset, size: &truesize); |
707 | if (unlikely(!page)) { |
708 | sinfo->nr_frags = i; |
709 | return false; |
710 | } |
711 | |
712 | memcpy(page_address(page) + offset, skb_frag_address(frag), |
713 | LARGEST_ALIGN(len)); |
714 | __skb_fill_page_desc_noacc(shinfo: sinfo, i, page, off: offset, size: len); |
715 | |
716 | tsize += truesize; |
717 | pfmemalloc |= page_is_pfmemalloc(page); |
718 | } |
719 | |
720 | xdp_update_skb_shared_info(skb, nr_frags, size: xinfo->xdp_frags_size, |
721 | truesize: tsize, pfmemalloc); |
722 | |
723 | return true; |
724 | } |
725 | |
726 | /** |
727 | * xdp_build_skb_from_zc - create an skb from XSk &xdp_buff |
728 | * @xdp: source XSk buff |
729 | * |
730 | * Similar to xdp_build_skb_from_buff(), but for XSk frames. Allocate an skb |
731 | * head, new buffer for the head, copy the data and initialize the skb fields. |
732 | * If there are frags, allocate new buffers for them and copy. |
733 | * Buffers are allocated from the system percpu pools to try recycling them. |
734 | * If new skb was built successfully, @xdp is returned to XSk pool's freelist. |
735 | * On error, it remains untouched and the caller must take care of this. |
736 | * |
737 | * Return: new &sk_buff on success, %NULL on error. |
738 | */ |
739 | struct sk_buff *xdp_build_skb_from_zc(struct xdp_buff *xdp) |
740 | { |
741 | const struct xdp_rxq_info *rxq = xdp->rxq; |
742 | u32 len = xdp->data_end - xdp->data_meta; |
743 | u32 truesize = xdp->frame_sz; |
744 | struct sk_buff *skb = NULL; |
745 | struct page_pool *pp; |
746 | int metalen; |
747 | void *data; |
748 | |
749 | if (!IS_ENABLED(CONFIG_PAGE_POOL)) |
750 | return NULL; |
751 | |
752 | local_lock_nested_bh(&system_page_pool.bh_lock); |
753 | pp = this_cpu_read(system_page_pool.pool); |
754 | data = page_pool_dev_alloc_va(pool: pp, size: &truesize); |
755 | if (unlikely(!data)) |
756 | goto out; |
757 | |
758 | skb = napi_build_skb(data, frag_size: truesize); |
759 | if (unlikely(!skb)) { |
760 | page_pool_free_va(pool: pp, va: data, allow_direct: true); |
761 | goto out; |
762 | } |
763 | |
764 | skb_mark_for_recycle(skb); |
765 | skb_reserve(skb, len: xdp->data_meta - xdp->data_hard_start); |
766 | |
767 | memcpy(__skb_put(skb, len), xdp->data_meta, LARGEST_ALIGN(len)); |
768 | |
769 | metalen = xdp->data - xdp->data_meta; |
770 | if (metalen > 0) { |
771 | skb_metadata_set(skb, meta_len: metalen); |
772 | __skb_pull(skb, len: metalen); |
773 | } |
774 | |
775 | skb_record_rx_queue(skb, rx_queue: rxq->queue_index); |
776 | |
777 | if (unlikely(xdp_buff_has_frags(xdp)) && |
778 | unlikely(!xdp_copy_frags_from_zc(skb, xdp, pp))) { |
779 | napi_consume_skb(skb, budget: true); |
780 | skb = NULL; |
781 | goto out; |
782 | } |
783 | |
784 | xsk_buff_free(xdp); |
785 | |
786 | skb->protocol = eth_type_trans(skb, dev: rxq->dev); |
787 | |
788 | out: |
789 | local_unlock_nested_bh(&system_page_pool.bh_lock); |
790 | return skb; |
791 | } |
792 | EXPORT_SYMBOL_GPL(xdp_build_skb_from_zc); |
793 | |
794 | struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf, |
795 | struct sk_buff *skb, |
796 | struct net_device *dev) |
797 | { |
798 | struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(frame: xdpf); |
799 | unsigned int headroom, frame_size; |
800 | void *hard_start; |
801 | u8 nr_frags; |
802 | |
803 | /* xdp frags frame */ |
804 | if (unlikely(xdp_frame_has_frags(xdpf))) |
805 | nr_frags = sinfo->nr_frags; |
806 | |
807 | /* Part of headroom was reserved to xdpf */ |
808 | headroom = sizeof(*xdpf) + xdpf->headroom; |
809 | |
810 | /* Memory size backing xdp_frame data already have reserved |
811 | * room for build_skb to place skb_shared_info in tailroom. |
812 | */ |
813 | frame_size = xdpf->frame_sz; |
814 | |
815 | hard_start = xdpf->data - headroom; |
816 | skb = build_skb_around(skb, data: hard_start, frag_size: frame_size); |
817 | if (unlikely(!skb)) |
818 | return NULL; |
819 | |
820 | skb_reserve(skb, len: headroom); |
821 | __skb_put(skb, len: xdpf->len); |
822 | if (xdpf->metasize) |
823 | skb_metadata_set(skb, meta_len: xdpf->metasize); |
824 | |
825 | if (unlikely(xdp_frame_has_frags(xdpf))) |
826 | xdp_update_skb_shared_info(skb, nr_frags, |
827 | size: sinfo->xdp_frags_size, |
828 | truesize: nr_frags * xdpf->frame_sz, |
829 | pfmemalloc: xdp_frame_is_frag_pfmemalloc(frame: xdpf)); |
830 | |
831 | /* Essential SKB info: protocol and skb->dev */ |
832 | skb->protocol = eth_type_trans(skb, dev); |
833 | |
834 | /* Optional SKB info, currently missing: |
835 | * - HW checksum info (skb->ip_summed) |
836 | * - HW RX hash (skb_set_hash) |
837 | * - RX ring dev queue index (skb_record_rx_queue) |
838 | */ |
839 | |
840 | if (xdpf->mem_type == MEM_TYPE_PAGE_POOL) |
841 | skb_mark_for_recycle(skb); |
842 | |
843 | /* Allow SKB to reuse area used by xdp_frame */ |
844 | xdp_scrub_frame(frame: xdpf); |
845 | |
846 | return skb; |
847 | } |
848 | EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame); |
849 | |
850 | struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf, |
851 | struct net_device *dev) |
852 | { |
853 | struct sk_buff *skb; |
854 | |
855 | skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC); |
856 | if (unlikely(!skb)) |
857 | return NULL; |
858 | |
859 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
860 | |
861 | return __xdp_build_skb_from_frame(xdpf, skb, dev); |
862 | } |
863 | EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame); |
864 | |
865 | struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf) |
866 | { |
867 | unsigned int headroom, totalsize; |
868 | struct xdp_frame *nxdpf; |
869 | struct page *page; |
870 | void *addr; |
871 | |
872 | headroom = xdpf->headroom + sizeof(*xdpf); |
873 | totalsize = headroom + xdpf->len; |
874 | |
875 | if (unlikely(totalsize > PAGE_SIZE)) |
876 | return NULL; |
877 | page = dev_alloc_page(); |
878 | if (!page) |
879 | return NULL; |
880 | addr = page_to_virt(page); |
881 | |
882 | memcpy(addr, xdpf, totalsize); |
883 | |
884 | nxdpf = addr; |
885 | nxdpf->data = addr + headroom; |
886 | nxdpf->frame_sz = PAGE_SIZE; |
887 | nxdpf->mem_type = MEM_TYPE_PAGE_ORDER0; |
888 | |
889 | return nxdpf; |
890 | } |
891 | |
892 | __bpf_kfunc_start_defs(); |
893 | |
894 | /** |
895 | * bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp. |
896 | * @ctx: XDP context pointer. |
897 | * @timestamp: Return value pointer. |
898 | * |
899 | * Return: |
900 | * * Returns 0 on success or ``-errno`` on error. |
901 | * * ``-EOPNOTSUPP`` : means device driver does not implement kfunc |
902 | * * ``-ENODATA`` : means no RX-timestamp available for this frame |
903 | */ |
904 | __bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) |
905 | { |
906 | return -EOPNOTSUPP; |
907 | } |
908 | |
909 | /** |
910 | * bpf_xdp_metadata_rx_hash - Read XDP frame RX hash. |
911 | * @ctx: XDP context pointer. |
912 | * @hash: Return value pointer. |
913 | * @rss_type: Return value pointer for RSS type. |
914 | * |
915 | * The RSS hash type (@rss_type) specifies what portion of packet headers NIC |
916 | * hardware used when calculating RSS hash value. The RSS type can be decoded |
917 | * via &enum xdp_rss_hash_type either matching on individual L3/L4 bits |
918 | * ``XDP_RSS_L*`` or by combined traditional *RSS Hashing Types* |
919 | * ``XDP_RSS_TYPE_L*``. |
920 | * |
921 | * Return: |
922 | * * Returns 0 on success or ``-errno`` on error. |
923 | * * ``-EOPNOTSUPP`` : means device driver doesn't implement kfunc |
924 | * * ``-ENODATA`` : means no RX-hash available for this frame |
925 | */ |
926 | __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash, |
927 | enum xdp_rss_hash_type *rss_type) |
928 | { |
929 | return -EOPNOTSUPP; |
930 | } |
931 | |
932 | /** |
933 | * bpf_xdp_metadata_rx_vlan_tag - Get XDP packet outermost VLAN tag |
934 | * @ctx: XDP context pointer. |
935 | * @vlan_proto: Destination pointer for VLAN Tag protocol identifier (TPID). |
936 | * @vlan_tci: Destination pointer for VLAN TCI (VID + DEI + PCP) |
937 | * |
938 | * In case of success, ``vlan_proto`` contains *Tag protocol identifier (TPID)*, |
939 | * usually ``ETH_P_8021Q`` or ``ETH_P_8021AD``, but some networks can use |
940 | * custom TPIDs. ``vlan_proto`` is stored in **network byte order (BE)** |
941 | * and should be used as follows: |
942 | * ``if (vlan_proto == bpf_htons(ETH_P_8021Q)) do_something();`` |
943 | * |
944 | * ``vlan_tci`` contains the remaining 16 bits of a VLAN tag. |
945 | * Driver is expected to provide those in **host byte order (usually LE)**, |
946 | * so the bpf program should not perform byte conversion. |
947 | * According to 802.1Q standard, *VLAN TCI (Tag control information)* |
948 | * is a bit field that contains: |
949 | * *VLAN identifier (VID)* that can be read with ``vlan_tci & 0xfff``, |
950 | * *Drop eligible indicator (DEI)* - 1 bit, |
951 | * *Priority code point (PCP)* - 3 bits. |
952 | * For detailed meaning of DEI and PCP, please refer to other sources. |
953 | * |
954 | * Return: |
955 | * * Returns 0 on success or ``-errno`` on error. |
956 | * * ``-EOPNOTSUPP`` : device driver doesn't implement kfunc |
957 | * * ``-ENODATA`` : VLAN tag was not stripped or is not available |
958 | */ |
959 | __bpf_kfunc int bpf_xdp_metadata_rx_vlan_tag(const struct xdp_md *ctx, |
960 | __be16 *vlan_proto, u16 *vlan_tci) |
961 | { |
962 | return -EOPNOTSUPP; |
963 | } |
964 | |
965 | __bpf_kfunc_end_defs(); |
966 | |
967 | BTF_KFUNCS_START(xdp_metadata_kfunc_ids) |
968 | #define XDP_METADATA_KFUNC(_, __, name, ___) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS) |
969 | XDP_METADATA_KFUNC_xxx |
970 | #undef XDP_METADATA_KFUNC |
971 | BTF_KFUNCS_END(xdp_metadata_kfunc_ids) |
972 | |
973 | static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = { |
974 | .owner = THIS_MODULE, |
975 | .set = &xdp_metadata_kfunc_ids, |
976 | }; |
977 | |
978 | BTF_ID_LIST(xdp_metadata_kfunc_ids_unsorted) |
979 | #define XDP_METADATA_KFUNC(name, _, str, __) BTF_ID(func, str) |
980 | XDP_METADATA_KFUNC_xxx |
981 | #undef XDP_METADATA_KFUNC |
982 | |
983 | u32 bpf_xdp_metadata_kfunc_id(int id) |
984 | { |
985 | /* xdp_metadata_kfunc_ids is sorted and can't be used */ |
986 | return xdp_metadata_kfunc_ids_unsorted[id]; |
987 | } |
988 | |
989 | bool bpf_dev_bound_kfunc_id(u32 btf_id) |
990 | { |
991 | return btf_id_set8_contains(set: &xdp_metadata_kfunc_ids, id: btf_id); |
992 | } |
993 | |
994 | static int __init xdp_metadata_init(void) |
995 | { |
996 | return register_btf_kfunc_id_set(prog_type: BPF_PROG_TYPE_XDP, s: &xdp_metadata_kfunc_set); |
997 | } |
998 | late_initcall(xdp_metadata_init); |
999 | |
1000 | void xdp_set_features_flag_locked(struct net_device *dev, xdp_features_t val) |
1001 | { |
1002 | val &= NETDEV_XDP_ACT_MASK; |
1003 | if (dev->xdp_features == val) |
1004 | return; |
1005 | |
1006 | netdev_assert_locked_or_invisible(dev); |
1007 | dev->xdp_features = val; |
1008 | |
1009 | if (dev->reg_state == NETREG_REGISTERED) |
1010 | call_netdevice_notifiers(val: NETDEV_XDP_FEAT_CHANGE, dev); |
1011 | } |
1012 | EXPORT_SYMBOL_GPL(xdp_set_features_flag_locked); |
1013 | |
1014 | void xdp_set_features_flag(struct net_device *dev, xdp_features_t val) |
1015 | { |
1016 | netdev_lock(dev); |
1017 | xdp_set_features_flag_locked(dev, val); |
1018 | netdev_unlock(dev); |
1019 | } |
1020 | EXPORT_SYMBOL_GPL(xdp_set_features_flag); |
1021 | |
1022 | void xdp_features_set_redirect_target_locked(struct net_device *dev, |
1023 | bool support_sg) |
1024 | { |
1025 | xdp_features_t val = (dev->xdp_features | NETDEV_XDP_ACT_NDO_XMIT); |
1026 | |
1027 | if (support_sg) |
1028 | val |= NETDEV_XDP_ACT_NDO_XMIT_SG; |
1029 | xdp_set_features_flag_locked(dev, val); |
1030 | } |
1031 | EXPORT_SYMBOL_GPL(xdp_features_set_redirect_target_locked); |
1032 | |
1033 | void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg) |
1034 | { |
1035 | netdev_lock(dev); |
1036 | xdp_features_set_redirect_target_locked(dev, support_sg); |
1037 | netdev_unlock(dev); |
1038 | } |
1039 | EXPORT_SYMBOL_GPL(xdp_features_set_redirect_target); |
1040 | |
1041 | void xdp_features_clear_redirect_target_locked(struct net_device *dev) |
1042 | { |
1043 | xdp_features_t val = dev->xdp_features; |
1044 | |
1045 | val &= ~(NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_NDO_XMIT_SG); |
1046 | xdp_set_features_flag_locked(dev, val); |
1047 | } |
1048 | EXPORT_SYMBOL_GPL(xdp_features_clear_redirect_target_locked); |
1049 | |
1050 | void xdp_features_clear_redirect_target(struct net_device *dev) |
1051 | { |
1052 | netdev_lock(dev); |
1053 | xdp_features_clear_redirect_target_locked(dev); |
1054 | netdev_unlock(dev); |
1055 | } |
1056 | EXPORT_SYMBOL_GPL(xdp_features_clear_redirect_target); |
1057 |
Definitions
- mem_id_pool
- mem_id_lock
- mem_id_next
- mem_id_init
- mem_id_ht
- xdp_mem_id_hashfn
- xdp_mem_id_cmp
- mem_id_rht_params
- __xdp_mem_allocator_rcu_free
- mem_xa_remove
- mem_allocator_disconnect
- xdp_unreg_mem_model
- xdp_rxq_info_unreg_mem_model
- xdp_rxq_info_unreg
- xdp_rxq_info_init
- __xdp_rxq_info_reg
- xdp_rxq_info_unused
- xdp_rxq_info_is_reg
- __mem_id_init_hash_table
- __mem_id_cyclic_get
- __is_supported_mem_type
- __xdp_reg_mem_model
- xdp_reg_mem_model
- xdp_rxq_info_reg_mem_model
- xdp_reg_page_pool
- xdp_unreg_page_pool
- xdp_rxq_info_attach_page_pool
- __xdp_return
- xdp_return_frame
- xdp_return_frame_rx_napi
- xdp_return_frame_bulk
- xdp_return_frag
- xdp_return_buff
- xdp_attachment_setup
- xdp_convert_zc_to_xdp_frame
- xdp_warn
- xdp_build_skb_from_buff
- xdp_copy_frags_from_zc
- xdp_build_skb_from_zc
- __xdp_build_skb_from_frame
- xdp_build_skb_from_frame
- xdpf_clone
- bpf_xdp_metadata_rx_timestamp
- bpf_xdp_metadata_rx_hash
- bpf_xdp_metadata_rx_vlan_tag
- xdp_metadata_kfunc_ids
- xdp_metadata_kfunc_set
- xdp_metadata_kfunc_ids_unsorted
- bpf_xdp_metadata_kfunc_id
- bpf_dev_bound_kfunc_id
- xdp_metadata_init
- xdp_set_features_flag_locked
- xdp_set_features_flag
- xdp_features_set_redirect_target_locked
- xdp_features_set_redirect_target
- xdp_features_clear_redirect_target_locked
Improve your Profiling and Debugging skills
Find out more