1 | // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) |
2 | |
3 | /* |
4 | * common eBPF ELF operations. |
5 | * |
6 | * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> |
7 | * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> |
8 | * Copyright (C) 2015 Huawei Inc. |
9 | * |
10 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU Lesser General Public |
12 | * License as published by the Free Software Foundation; |
13 | * version 2.1 of the License (not later!) |
14 | * |
15 | * This program is distributed in the hope that it will be useful, |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
18 | * GNU Lesser General Public License for more details. |
19 | * |
20 | * You should have received a copy of the GNU Lesser General Public |
21 | * License along with this program; if not, see <http://www.gnu.org/licenses> |
22 | */ |
23 | |
24 | #include <stdlib.h> |
25 | #include <string.h> |
26 | #include <memory.h> |
27 | #include <unistd.h> |
28 | #include <asm/unistd.h> |
29 | #include <errno.h> |
30 | #include <linux/bpf.h> |
31 | #include <linux/filter.h> |
32 | #include <linux/kernel.h> |
33 | #include <limits.h> |
34 | #include <sys/resource.h> |
35 | #include "bpf.h" |
36 | #include "libbpf.h" |
37 | #include "libbpf_internal.h" |
38 | |
39 | /* |
40 | * When building perf, unistd.h is overridden. __NR_bpf is |
41 | * required to be defined explicitly. |
42 | */ |
43 | #ifndef __NR_bpf |
44 | # if defined(__i386__) |
45 | # define __NR_bpf 357 |
46 | # elif defined(__x86_64__) |
47 | # define __NR_bpf 321 |
48 | # elif defined(__aarch64__) |
49 | # define __NR_bpf 280 |
50 | # elif defined(__sparc__) |
51 | # define __NR_bpf 349 |
52 | # elif defined(__s390__) |
53 | # define __NR_bpf 351 |
54 | # elif defined(__arc__) |
55 | # define __NR_bpf 280 |
56 | # elif defined(__mips__) && defined(_ABIO32) |
57 | # define __NR_bpf 4355 |
58 | # elif defined(__mips__) && defined(_ABIN32) |
59 | # define __NR_bpf 6319 |
60 | # elif defined(__mips__) && defined(_ABI64) |
61 | # define __NR_bpf 5315 |
62 | # else |
63 | # error __NR_bpf not defined. libbpf does not support your arch. |
64 | # endif |
65 | #endif |
66 | |
67 | static inline __u64 ptr_to_u64(const void *ptr) |
68 | { |
69 | return (__u64) (unsigned long) ptr; |
70 | } |
71 | |
72 | static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, |
73 | unsigned int size) |
74 | { |
75 | return syscall(__NR_bpf, cmd, attr, size); |
76 | } |
77 | |
78 | static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr, |
79 | unsigned int size) |
80 | { |
81 | int fd; |
82 | |
83 | fd = sys_bpf(cmd, attr, size); |
84 | return ensure_good_fd(fd); |
85 | } |
86 | |
87 | int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts) |
88 | { |
89 | int fd; |
90 | |
91 | do { |
92 | fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size); |
93 | } while (fd < 0 && errno == EAGAIN && --attempts > 0); |
94 | |
95 | return fd; |
96 | } |
97 | |
98 | /* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to |
99 | * memcg-based memory accounting for BPF maps and progs. This was done in [0]. |
100 | * We use the support for bpf_ktime_get_coarse_ns() helper, which was added in |
101 | * the same 5.11 Linux release ([1]), to detect memcg-based accounting for BPF. |
102 | * |
103 | * [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/ |
104 | * [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper") |
105 | */ |
106 | int probe_memcg_account(int token_fd) |
107 | { |
108 | const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd); |
109 | struct bpf_insn insns[] = { |
110 | BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns), |
111 | BPF_EXIT_INSN(), |
112 | }; |
113 | size_t insn_cnt = ARRAY_SIZE(insns); |
114 | union bpf_attr attr; |
115 | int prog_fd; |
116 | |
117 | /* attempt loading freplace trying to use custom BTF */ |
118 | memset(&attr, 0, attr_sz); |
119 | attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; |
120 | attr.insns = ptr_to_u64(ptr: insns); |
121 | attr.insn_cnt = insn_cnt; |
122 | attr.license = ptr_to_u64(ptr: "GPL" ); |
123 | attr.prog_token_fd = token_fd; |
124 | if (token_fd) |
125 | attr.prog_flags |= BPF_F_TOKEN_FD; |
126 | |
127 | prog_fd = sys_bpf_fd(cmd: BPF_PROG_LOAD, attr: &attr, size: attr_sz); |
128 | if (prog_fd >= 0) { |
129 | close(prog_fd); |
130 | return 1; |
131 | } |
132 | return 0; |
133 | } |
134 | |
135 | static bool memlock_bumped; |
136 | static rlim_t memlock_rlim = RLIM_INFINITY; |
137 | |
138 | int libbpf_set_memlock_rlim(size_t memlock_bytes) |
139 | { |
140 | if (memlock_bumped) |
141 | return libbpf_err(ret: -EBUSY); |
142 | |
143 | memlock_rlim = memlock_bytes; |
144 | return 0; |
145 | } |
146 | |
147 | int bump_rlimit_memlock(void) |
148 | { |
149 | struct rlimit rlim; |
150 | |
151 | /* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */ |
152 | if (memlock_bumped || feat_supported(NULL, feat_id: FEAT_MEMCG_ACCOUNT)) |
153 | return 0; |
154 | |
155 | memlock_bumped = true; |
156 | |
157 | /* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */ |
158 | if (memlock_rlim == 0) |
159 | return 0; |
160 | |
161 | rlim.rlim_cur = rlim.rlim_max = memlock_rlim; |
162 | if (setrlimit(RLIMIT_MEMLOCK, &rlim)) |
163 | return -errno; |
164 | |
165 | return 0; |
166 | } |
167 | |
168 | int bpf_map_create(enum bpf_map_type map_type, |
169 | const char *map_name, |
170 | __u32 key_size, |
171 | __u32 value_size, |
172 | __u32 max_entries, |
173 | const struct bpf_map_create_opts *opts) |
174 | { |
175 | const size_t attr_sz = offsetofend(union bpf_attr, map_token_fd); |
176 | union bpf_attr attr; |
177 | int fd; |
178 | |
179 | bump_rlimit_memlock(); |
180 | |
181 | memset(&attr, 0, attr_sz); |
182 | |
183 | if (!OPTS_VALID(opts, bpf_map_create_opts)) |
184 | return libbpf_err(ret: -EINVAL); |
185 | |
186 | attr.map_type = map_type; |
187 | if (map_name && feat_supported(NULL, feat_id: FEAT_PROG_NAME)) |
188 | libbpf_strlcpy(dst: attr.map_name, src: map_name, sz: sizeof(attr.map_name)); |
189 | attr.key_size = key_size; |
190 | attr.value_size = value_size; |
191 | attr.max_entries = max_entries; |
192 | |
193 | attr.btf_fd = OPTS_GET(opts, btf_fd, 0); |
194 | attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0); |
195 | attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0); |
196 | attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0); |
197 | attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, 0); |
198 | |
199 | attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0); |
200 | attr.map_flags = OPTS_GET(opts, map_flags, 0); |
201 | attr.map_extra = OPTS_GET(opts, map_extra, 0); |
202 | attr.numa_node = OPTS_GET(opts, numa_node, 0); |
203 | attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0); |
204 | |
205 | attr.map_token_fd = OPTS_GET(opts, token_fd, 0); |
206 | |
207 | fd = sys_bpf_fd(cmd: BPF_MAP_CREATE, attr: &attr, size: attr_sz); |
208 | return libbpf_err_errno(ret: fd); |
209 | } |
210 | |
211 | static void * |
212 | alloc_zero_tailing_info(const void *orecord, __u32 cnt, |
213 | __u32 actual_rec_size, __u32 expected_rec_size) |
214 | { |
215 | __u64 info_len = (__u64)actual_rec_size * cnt; |
216 | void *info, *nrecord; |
217 | int i; |
218 | |
219 | info = malloc(info_len); |
220 | if (!info) |
221 | return NULL; |
222 | |
223 | /* zero out bytes kernel does not understand */ |
224 | nrecord = info; |
225 | for (i = 0; i < cnt; i++) { |
226 | memcpy(nrecord, orecord, expected_rec_size); |
227 | memset(nrecord + expected_rec_size, 0, |
228 | actual_rec_size - expected_rec_size); |
229 | orecord += actual_rec_size; |
230 | nrecord += actual_rec_size; |
231 | } |
232 | |
233 | return info; |
234 | } |
235 | |
236 | int bpf_prog_load(enum bpf_prog_type prog_type, |
237 | const char *prog_name, const char *license, |
238 | const struct bpf_insn *insns, size_t insn_cnt, |
239 | struct bpf_prog_load_opts *opts) |
240 | { |
241 | const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd); |
242 | void *finfo = NULL, *linfo = NULL; |
243 | const char *func_info, *line_info; |
244 | __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd; |
245 | __u32 func_info_rec_size, line_info_rec_size; |
246 | int fd, attempts; |
247 | union bpf_attr attr; |
248 | char *log_buf; |
249 | |
250 | bump_rlimit_memlock(); |
251 | |
252 | if (!OPTS_VALID(opts, bpf_prog_load_opts)) |
253 | return libbpf_err(ret: -EINVAL); |
254 | |
255 | attempts = OPTS_GET(opts, attempts, 0); |
256 | if (attempts < 0) |
257 | return libbpf_err(ret: -EINVAL); |
258 | if (attempts == 0) |
259 | attempts = PROG_LOAD_ATTEMPTS; |
260 | |
261 | memset(&attr, 0, attr_sz); |
262 | |
263 | attr.prog_type = prog_type; |
264 | attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0); |
265 | |
266 | attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0); |
267 | attr.prog_flags = OPTS_GET(opts, prog_flags, 0); |
268 | attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0); |
269 | attr.kern_version = OPTS_GET(opts, kern_version, 0); |
270 | attr.prog_token_fd = OPTS_GET(opts, token_fd, 0); |
271 | |
272 | if (prog_name && feat_supported(NULL, feat_id: FEAT_PROG_NAME)) |
273 | libbpf_strlcpy(dst: attr.prog_name, src: prog_name, sz: sizeof(attr.prog_name)); |
274 | attr.license = ptr_to_u64(ptr: license); |
275 | |
276 | if (insn_cnt > UINT_MAX) |
277 | return libbpf_err(ret: -E2BIG); |
278 | |
279 | attr.insns = ptr_to_u64(ptr: insns); |
280 | attr.insn_cnt = (__u32)insn_cnt; |
281 | |
282 | attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); |
283 | attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0); |
284 | |
285 | if (attach_prog_fd && attach_btf_obj_fd) |
286 | return libbpf_err(ret: -EINVAL); |
287 | |
288 | attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0); |
289 | if (attach_prog_fd) |
290 | attr.attach_prog_fd = attach_prog_fd; |
291 | else |
292 | attr.attach_btf_obj_fd = attach_btf_obj_fd; |
293 | |
294 | log_buf = OPTS_GET(opts, log_buf, NULL); |
295 | log_size = OPTS_GET(opts, log_size, 0); |
296 | log_level = OPTS_GET(opts, log_level, 0); |
297 | |
298 | if (!!log_buf != !!log_size) |
299 | return libbpf_err(ret: -EINVAL); |
300 | |
301 | func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0); |
302 | func_info = OPTS_GET(opts, func_info, NULL); |
303 | attr.func_info_rec_size = func_info_rec_size; |
304 | attr.func_info = ptr_to_u64(ptr: func_info); |
305 | attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0); |
306 | |
307 | line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0); |
308 | line_info = OPTS_GET(opts, line_info, NULL); |
309 | attr.line_info_rec_size = line_info_rec_size; |
310 | attr.line_info = ptr_to_u64(ptr: line_info); |
311 | attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0); |
312 | |
313 | attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL)); |
314 | |
315 | if (log_level) { |
316 | attr.log_buf = ptr_to_u64(ptr: log_buf); |
317 | attr.log_size = log_size; |
318 | attr.log_level = log_level; |
319 | } |
320 | |
321 | fd = sys_bpf_prog_load(attr: &attr, size: attr_sz, attempts); |
322 | OPTS_SET(opts, log_true_size, attr.log_true_size); |
323 | if (fd >= 0) |
324 | return fd; |
325 | |
326 | /* After bpf_prog_load, the kernel may modify certain attributes |
327 | * to give user space a hint how to deal with loading failure. |
328 | * Check to see whether we can make some changes and load again. |
329 | */ |
330 | while (errno == E2BIG && (!finfo || !linfo)) { |
331 | if (!finfo && attr.func_info_cnt && |
332 | attr.func_info_rec_size < func_info_rec_size) { |
333 | /* try with corrected func info records */ |
334 | finfo = alloc_zero_tailing_info(orecord: func_info, |
335 | cnt: attr.func_info_cnt, |
336 | actual_rec_size: func_info_rec_size, |
337 | expected_rec_size: attr.func_info_rec_size); |
338 | if (!finfo) { |
339 | errno = E2BIG; |
340 | goto done; |
341 | } |
342 | |
343 | attr.func_info = ptr_to_u64(ptr: finfo); |
344 | attr.func_info_rec_size = func_info_rec_size; |
345 | } else if (!linfo && attr.line_info_cnt && |
346 | attr.line_info_rec_size < line_info_rec_size) { |
347 | linfo = alloc_zero_tailing_info(orecord: line_info, |
348 | cnt: attr.line_info_cnt, |
349 | actual_rec_size: line_info_rec_size, |
350 | expected_rec_size: attr.line_info_rec_size); |
351 | if (!linfo) { |
352 | errno = E2BIG; |
353 | goto done; |
354 | } |
355 | |
356 | attr.line_info = ptr_to_u64(ptr: linfo); |
357 | attr.line_info_rec_size = line_info_rec_size; |
358 | } else { |
359 | break; |
360 | } |
361 | |
362 | fd = sys_bpf_prog_load(attr: &attr, size: attr_sz, attempts); |
363 | OPTS_SET(opts, log_true_size, attr.log_true_size); |
364 | if (fd >= 0) |
365 | goto done; |
366 | } |
367 | |
368 | if (log_level == 0 && log_buf) { |
369 | /* log_level == 0 with non-NULL log_buf requires retrying on error |
370 | * with log_level == 1 and log_buf/log_buf_size set, to get details of |
371 | * failure |
372 | */ |
373 | attr.log_buf = ptr_to_u64(ptr: log_buf); |
374 | attr.log_size = log_size; |
375 | attr.log_level = 1; |
376 | |
377 | fd = sys_bpf_prog_load(attr: &attr, size: attr_sz, attempts); |
378 | OPTS_SET(opts, log_true_size, attr.log_true_size); |
379 | } |
380 | done: |
381 | /* free() doesn't affect errno, so we don't need to restore it */ |
382 | free(finfo); |
383 | free(linfo); |
384 | return libbpf_err_errno(ret: fd); |
385 | } |
386 | |
387 | int bpf_map_update_elem(int fd, const void *key, const void *value, |
388 | __u64 flags) |
389 | { |
390 | const size_t attr_sz = offsetofend(union bpf_attr, flags); |
391 | union bpf_attr attr; |
392 | int ret; |
393 | |
394 | memset(&attr, 0, attr_sz); |
395 | attr.map_fd = fd; |
396 | attr.key = ptr_to_u64(ptr: key); |
397 | attr.value = ptr_to_u64(ptr: value); |
398 | attr.flags = flags; |
399 | |
400 | ret = sys_bpf(cmd: BPF_MAP_UPDATE_ELEM, attr: &attr, size: attr_sz); |
401 | return libbpf_err_errno(ret); |
402 | } |
403 | |
404 | int bpf_map_lookup_elem(int fd, const void *key, void *value) |
405 | { |
406 | const size_t attr_sz = offsetofend(union bpf_attr, flags); |
407 | union bpf_attr attr; |
408 | int ret; |
409 | |
410 | memset(&attr, 0, attr_sz); |
411 | attr.map_fd = fd; |
412 | attr.key = ptr_to_u64(ptr: key); |
413 | attr.value = ptr_to_u64(ptr: value); |
414 | |
415 | ret = sys_bpf(cmd: BPF_MAP_LOOKUP_ELEM, attr: &attr, size: attr_sz); |
416 | return libbpf_err_errno(ret); |
417 | } |
418 | |
419 | int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags) |
420 | { |
421 | const size_t attr_sz = offsetofend(union bpf_attr, flags); |
422 | union bpf_attr attr; |
423 | int ret; |
424 | |
425 | memset(&attr, 0, attr_sz); |
426 | attr.map_fd = fd; |
427 | attr.key = ptr_to_u64(ptr: key); |
428 | attr.value = ptr_to_u64(ptr: value); |
429 | attr.flags = flags; |
430 | |
431 | ret = sys_bpf(cmd: BPF_MAP_LOOKUP_ELEM, attr: &attr, size: attr_sz); |
432 | return libbpf_err_errno(ret); |
433 | } |
434 | |
435 | int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value) |
436 | { |
437 | const size_t attr_sz = offsetofend(union bpf_attr, flags); |
438 | union bpf_attr attr; |
439 | int ret; |
440 | |
441 | memset(&attr, 0, attr_sz); |
442 | attr.map_fd = fd; |
443 | attr.key = ptr_to_u64(ptr: key); |
444 | attr.value = ptr_to_u64(ptr: value); |
445 | |
446 | ret = sys_bpf(cmd: BPF_MAP_LOOKUP_AND_DELETE_ELEM, attr: &attr, size: attr_sz); |
447 | return libbpf_err_errno(ret); |
448 | } |
449 | |
450 | int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags) |
451 | { |
452 | const size_t attr_sz = offsetofend(union bpf_attr, flags); |
453 | union bpf_attr attr; |
454 | int ret; |
455 | |
456 | memset(&attr, 0, attr_sz); |
457 | attr.map_fd = fd; |
458 | attr.key = ptr_to_u64(ptr: key); |
459 | attr.value = ptr_to_u64(ptr: value); |
460 | attr.flags = flags; |
461 | |
462 | ret = sys_bpf(cmd: BPF_MAP_LOOKUP_AND_DELETE_ELEM, attr: &attr, size: attr_sz); |
463 | return libbpf_err_errno(ret); |
464 | } |
465 | |
466 | int bpf_map_delete_elem(int fd, const void *key) |
467 | { |
468 | const size_t attr_sz = offsetofend(union bpf_attr, flags); |
469 | union bpf_attr attr; |
470 | int ret; |
471 | |
472 | memset(&attr, 0, attr_sz); |
473 | attr.map_fd = fd; |
474 | attr.key = ptr_to_u64(ptr: key); |
475 | |
476 | ret = sys_bpf(cmd: BPF_MAP_DELETE_ELEM, attr: &attr, size: attr_sz); |
477 | return libbpf_err_errno(ret); |
478 | } |
479 | |
480 | int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags) |
481 | { |
482 | const size_t attr_sz = offsetofend(union bpf_attr, flags); |
483 | union bpf_attr attr; |
484 | int ret; |
485 | |
486 | memset(&attr, 0, attr_sz); |
487 | attr.map_fd = fd; |
488 | attr.key = ptr_to_u64(ptr: key); |
489 | attr.flags = flags; |
490 | |
491 | ret = sys_bpf(cmd: BPF_MAP_DELETE_ELEM, attr: &attr, size: attr_sz); |
492 | return libbpf_err_errno(ret); |
493 | } |
494 | |
495 | int bpf_map_get_next_key(int fd, const void *key, void *next_key) |
496 | { |
497 | const size_t attr_sz = offsetofend(union bpf_attr, next_key); |
498 | union bpf_attr attr; |
499 | int ret; |
500 | |
501 | memset(&attr, 0, attr_sz); |
502 | attr.map_fd = fd; |
503 | attr.key = ptr_to_u64(ptr: key); |
504 | attr.next_key = ptr_to_u64(ptr: next_key); |
505 | |
506 | ret = sys_bpf(cmd: BPF_MAP_GET_NEXT_KEY, attr: &attr, size: attr_sz); |
507 | return libbpf_err_errno(ret); |
508 | } |
509 | |
510 | int bpf_map_freeze(int fd) |
511 | { |
512 | const size_t attr_sz = offsetofend(union bpf_attr, map_fd); |
513 | union bpf_attr attr; |
514 | int ret; |
515 | |
516 | memset(&attr, 0, attr_sz); |
517 | attr.map_fd = fd; |
518 | |
519 | ret = sys_bpf(cmd: BPF_MAP_FREEZE, attr: &attr, size: attr_sz); |
520 | return libbpf_err_errno(ret); |
521 | } |
522 | |
523 | static int bpf_map_batch_common(int cmd, int fd, void *in_batch, |
524 | void *out_batch, void *keys, void *values, |
525 | __u32 *count, |
526 | const struct bpf_map_batch_opts *opts) |
527 | { |
528 | const size_t attr_sz = offsetofend(union bpf_attr, batch); |
529 | union bpf_attr attr; |
530 | int ret; |
531 | |
532 | if (!OPTS_VALID(opts, bpf_map_batch_opts)) |
533 | return libbpf_err(ret: -EINVAL); |
534 | |
535 | memset(&attr, 0, attr_sz); |
536 | attr.batch.map_fd = fd; |
537 | attr.batch.in_batch = ptr_to_u64(ptr: in_batch); |
538 | attr.batch.out_batch = ptr_to_u64(ptr: out_batch); |
539 | attr.batch.keys = ptr_to_u64(ptr: keys); |
540 | attr.batch.values = ptr_to_u64(ptr: values); |
541 | attr.batch.count = *count; |
542 | attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0); |
543 | attr.batch.flags = OPTS_GET(opts, flags, 0); |
544 | |
545 | ret = sys_bpf(cmd, attr: &attr, size: attr_sz); |
546 | *count = attr.batch.count; |
547 | |
548 | return libbpf_err_errno(ret); |
549 | } |
550 | |
551 | int bpf_map_delete_batch(int fd, const void *keys, __u32 *count, |
552 | const struct bpf_map_batch_opts *opts) |
553 | { |
554 | return bpf_map_batch_common(cmd: BPF_MAP_DELETE_BATCH, fd, NULL, |
555 | NULL, keys: (void *)keys, NULL, count, opts); |
556 | } |
557 | |
558 | int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys, |
559 | void *values, __u32 *count, |
560 | const struct bpf_map_batch_opts *opts) |
561 | { |
562 | return bpf_map_batch_common(cmd: BPF_MAP_LOOKUP_BATCH, fd, in_batch, |
563 | out_batch, keys, values, count, opts); |
564 | } |
565 | |
566 | int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch, |
567 | void *keys, void *values, __u32 *count, |
568 | const struct bpf_map_batch_opts *opts) |
569 | { |
570 | return bpf_map_batch_common(cmd: BPF_MAP_LOOKUP_AND_DELETE_BATCH, |
571 | fd, in_batch, out_batch, keys, values, |
572 | count, opts); |
573 | } |
574 | |
575 | int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *count, |
576 | const struct bpf_map_batch_opts *opts) |
577 | { |
578 | return bpf_map_batch_common(cmd: BPF_MAP_UPDATE_BATCH, fd, NULL, NULL, |
579 | keys: (void *)keys, values: (void *)values, count, opts); |
580 | } |
581 | |
582 | int bpf_obj_pin_opts(int fd, const char *pathname, const struct bpf_obj_pin_opts *opts) |
583 | { |
584 | const size_t attr_sz = offsetofend(union bpf_attr, path_fd); |
585 | union bpf_attr attr; |
586 | int ret; |
587 | |
588 | if (!OPTS_VALID(opts, bpf_obj_pin_opts)) |
589 | return libbpf_err(ret: -EINVAL); |
590 | |
591 | memset(&attr, 0, attr_sz); |
592 | attr.path_fd = OPTS_GET(opts, path_fd, 0); |
593 | attr.pathname = ptr_to_u64(ptr: (void *)pathname); |
594 | attr.file_flags = OPTS_GET(opts, file_flags, 0); |
595 | attr.bpf_fd = fd; |
596 | |
597 | ret = sys_bpf(cmd: BPF_OBJ_PIN, attr: &attr, size: attr_sz); |
598 | return libbpf_err_errno(ret); |
599 | } |
600 | |
601 | int bpf_obj_pin(int fd, const char *pathname) |
602 | { |
603 | return bpf_obj_pin_opts(fd, pathname, NULL); |
604 | } |
605 | |
606 | int bpf_obj_get(const char *pathname) |
607 | { |
608 | return bpf_obj_get_opts(pathname, NULL); |
609 | } |
610 | |
611 | int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts) |
612 | { |
613 | const size_t attr_sz = offsetofend(union bpf_attr, path_fd); |
614 | union bpf_attr attr; |
615 | int fd; |
616 | |
617 | if (!OPTS_VALID(opts, bpf_obj_get_opts)) |
618 | return libbpf_err(ret: -EINVAL); |
619 | |
620 | memset(&attr, 0, attr_sz); |
621 | attr.path_fd = OPTS_GET(opts, path_fd, 0); |
622 | attr.pathname = ptr_to_u64(ptr: (void *)pathname); |
623 | attr.file_flags = OPTS_GET(opts, file_flags, 0); |
624 | |
625 | fd = sys_bpf_fd(cmd: BPF_OBJ_GET, attr: &attr, size: attr_sz); |
626 | return libbpf_err_errno(ret: fd); |
627 | } |
628 | |
629 | int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, |
630 | unsigned int flags) |
631 | { |
632 | DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts, |
633 | .flags = flags, |
634 | ); |
635 | |
636 | return bpf_prog_attach_opts(prog_fd, target: target_fd, type, opts: &opts); |
637 | } |
638 | |
639 | int bpf_prog_attach_opts(int prog_fd, int target, enum bpf_attach_type type, |
640 | const struct bpf_prog_attach_opts *opts) |
641 | { |
642 | const size_t attr_sz = offsetofend(union bpf_attr, expected_revision); |
643 | __u32 relative_id, flags; |
644 | int ret, relative_fd; |
645 | union bpf_attr attr; |
646 | |
647 | if (!OPTS_VALID(opts, bpf_prog_attach_opts)) |
648 | return libbpf_err(ret: -EINVAL); |
649 | |
650 | relative_id = OPTS_GET(opts, relative_id, 0); |
651 | relative_fd = OPTS_GET(opts, relative_fd, 0); |
652 | flags = OPTS_GET(opts, flags, 0); |
653 | |
654 | /* validate we don't have unexpected combinations of non-zero fields */ |
655 | if (relative_fd && relative_id) |
656 | return libbpf_err(ret: -EINVAL); |
657 | |
658 | memset(&attr, 0, attr_sz); |
659 | attr.target_fd = target; |
660 | attr.attach_bpf_fd = prog_fd; |
661 | attr.attach_type = type; |
662 | attr.replace_bpf_fd = OPTS_GET(opts, replace_fd, 0); |
663 | attr.expected_revision = OPTS_GET(opts, expected_revision, 0); |
664 | |
665 | if (relative_id) { |
666 | attr.attach_flags = flags | BPF_F_ID; |
667 | attr.relative_id = relative_id; |
668 | } else { |
669 | attr.attach_flags = flags; |
670 | attr.relative_fd = relative_fd; |
671 | } |
672 | |
673 | ret = sys_bpf(cmd: BPF_PROG_ATTACH, attr: &attr, size: attr_sz); |
674 | return libbpf_err_errno(ret); |
675 | } |
676 | |
677 | int bpf_prog_detach_opts(int prog_fd, int target, enum bpf_attach_type type, |
678 | const struct bpf_prog_detach_opts *opts) |
679 | { |
680 | const size_t attr_sz = offsetofend(union bpf_attr, expected_revision); |
681 | __u32 relative_id, flags; |
682 | int ret, relative_fd; |
683 | union bpf_attr attr; |
684 | |
685 | if (!OPTS_VALID(opts, bpf_prog_detach_opts)) |
686 | return libbpf_err(ret: -EINVAL); |
687 | |
688 | relative_id = OPTS_GET(opts, relative_id, 0); |
689 | relative_fd = OPTS_GET(opts, relative_fd, 0); |
690 | flags = OPTS_GET(opts, flags, 0); |
691 | |
692 | /* validate we don't have unexpected combinations of non-zero fields */ |
693 | if (relative_fd && relative_id) |
694 | return libbpf_err(ret: -EINVAL); |
695 | |
696 | memset(&attr, 0, attr_sz); |
697 | attr.target_fd = target; |
698 | attr.attach_bpf_fd = prog_fd; |
699 | attr.attach_type = type; |
700 | attr.expected_revision = OPTS_GET(opts, expected_revision, 0); |
701 | |
702 | if (relative_id) { |
703 | attr.attach_flags = flags | BPF_F_ID; |
704 | attr.relative_id = relative_id; |
705 | } else { |
706 | attr.attach_flags = flags; |
707 | attr.relative_fd = relative_fd; |
708 | } |
709 | |
710 | ret = sys_bpf(cmd: BPF_PROG_DETACH, attr: &attr, size: attr_sz); |
711 | return libbpf_err_errno(ret); |
712 | } |
713 | |
714 | int bpf_prog_detach(int target_fd, enum bpf_attach_type type) |
715 | { |
716 | return bpf_prog_detach_opts(prog_fd: 0, target: target_fd, type, NULL); |
717 | } |
718 | |
719 | int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type) |
720 | { |
721 | return bpf_prog_detach_opts(prog_fd, target: target_fd, type, NULL); |
722 | } |
723 | |
724 | int bpf_link_create(int prog_fd, int target_fd, |
725 | enum bpf_attach_type attach_type, |
726 | const struct bpf_link_create_opts *opts) |
727 | { |
728 | const size_t attr_sz = offsetofend(union bpf_attr, link_create); |
729 | __u32 target_btf_id, iter_info_len, relative_id; |
730 | int fd, err, relative_fd; |
731 | union bpf_attr attr; |
732 | |
733 | if (!OPTS_VALID(opts, bpf_link_create_opts)) |
734 | return libbpf_err(ret: -EINVAL); |
735 | |
736 | iter_info_len = OPTS_GET(opts, iter_info_len, 0); |
737 | target_btf_id = OPTS_GET(opts, target_btf_id, 0); |
738 | |
739 | /* validate we don't have unexpected combinations of non-zero fields */ |
740 | if (iter_info_len || target_btf_id) { |
741 | if (iter_info_len && target_btf_id) |
742 | return libbpf_err(ret: -EINVAL); |
743 | if (!OPTS_ZEROED(opts, target_btf_id)) |
744 | return libbpf_err(ret: -EINVAL); |
745 | } |
746 | |
747 | memset(&attr, 0, attr_sz); |
748 | attr.link_create.prog_fd = prog_fd; |
749 | attr.link_create.target_fd = target_fd; |
750 | attr.link_create.attach_type = attach_type; |
751 | attr.link_create.flags = OPTS_GET(opts, flags, 0); |
752 | |
753 | if (target_btf_id) { |
754 | attr.link_create.target_btf_id = target_btf_id; |
755 | goto proceed; |
756 | } |
757 | |
758 | switch (attach_type) { |
759 | case BPF_TRACE_ITER: |
760 | attr.link_create.iter_info = ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0)); |
761 | attr.link_create.iter_info_len = iter_info_len; |
762 | break; |
763 | case BPF_PERF_EVENT: |
764 | attr.link_create.perf_event.bpf_cookie = OPTS_GET(opts, perf_event.bpf_cookie, 0); |
765 | if (!OPTS_ZEROED(opts, perf_event)) |
766 | return libbpf_err(ret: -EINVAL); |
767 | break; |
768 | case BPF_TRACE_KPROBE_MULTI: |
769 | attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0); |
770 | attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0); |
771 | attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0)); |
772 | attr.link_create.kprobe_multi.addrs = ptr_to_u64(OPTS_GET(opts, kprobe_multi.addrs, 0)); |
773 | attr.link_create.kprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, kprobe_multi.cookies, 0)); |
774 | if (!OPTS_ZEROED(opts, kprobe_multi)) |
775 | return libbpf_err(ret: -EINVAL); |
776 | break; |
777 | case BPF_TRACE_UPROBE_MULTI: |
778 | attr.link_create.uprobe_multi.flags = OPTS_GET(opts, uprobe_multi.flags, 0); |
779 | attr.link_create.uprobe_multi.cnt = OPTS_GET(opts, uprobe_multi.cnt, 0); |
780 | attr.link_create.uprobe_multi.path = ptr_to_u64(OPTS_GET(opts, uprobe_multi.path, 0)); |
781 | attr.link_create.uprobe_multi.offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.offsets, 0)); |
782 | attr.link_create.uprobe_multi.ref_ctr_offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.ref_ctr_offsets, 0)); |
783 | attr.link_create.uprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, uprobe_multi.cookies, 0)); |
784 | attr.link_create.uprobe_multi.pid = OPTS_GET(opts, uprobe_multi.pid, 0); |
785 | if (!OPTS_ZEROED(opts, uprobe_multi)) |
786 | return libbpf_err(ret: -EINVAL); |
787 | break; |
788 | case BPF_TRACE_FENTRY: |
789 | case BPF_TRACE_FEXIT: |
790 | case BPF_MODIFY_RETURN: |
791 | case BPF_LSM_MAC: |
792 | attr.link_create.tracing.cookie = OPTS_GET(opts, tracing.cookie, 0); |
793 | if (!OPTS_ZEROED(opts, tracing)) |
794 | return libbpf_err(ret: -EINVAL); |
795 | break; |
796 | case BPF_NETFILTER: |
797 | attr.link_create.netfilter.pf = OPTS_GET(opts, netfilter.pf, 0); |
798 | attr.link_create.netfilter.hooknum = OPTS_GET(opts, netfilter.hooknum, 0); |
799 | attr.link_create.netfilter.priority = OPTS_GET(opts, netfilter.priority, 0); |
800 | attr.link_create.netfilter.flags = OPTS_GET(opts, netfilter.flags, 0); |
801 | if (!OPTS_ZEROED(opts, netfilter)) |
802 | return libbpf_err(ret: -EINVAL); |
803 | break; |
804 | case BPF_TCX_INGRESS: |
805 | case BPF_TCX_EGRESS: |
806 | relative_fd = OPTS_GET(opts, tcx.relative_fd, 0); |
807 | relative_id = OPTS_GET(opts, tcx.relative_id, 0); |
808 | if (relative_fd && relative_id) |
809 | return libbpf_err(ret: -EINVAL); |
810 | if (relative_id) { |
811 | attr.link_create.tcx.relative_id = relative_id; |
812 | attr.link_create.flags |= BPF_F_ID; |
813 | } else { |
814 | attr.link_create.tcx.relative_fd = relative_fd; |
815 | } |
816 | attr.link_create.tcx.expected_revision = OPTS_GET(opts, tcx.expected_revision, 0); |
817 | if (!OPTS_ZEROED(opts, tcx)) |
818 | return libbpf_err(ret: -EINVAL); |
819 | break; |
820 | case BPF_NETKIT_PRIMARY: |
821 | case BPF_NETKIT_PEER: |
822 | relative_fd = OPTS_GET(opts, netkit.relative_fd, 0); |
823 | relative_id = OPTS_GET(opts, netkit.relative_id, 0); |
824 | if (relative_fd && relative_id) |
825 | return libbpf_err(ret: -EINVAL); |
826 | if (relative_id) { |
827 | attr.link_create.netkit.relative_id = relative_id; |
828 | attr.link_create.flags |= BPF_F_ID; |
829 | } else { |
830 | attr.link_create.netkit.relative_fd = relative_fd; |
831 | } |
832 | attr.link_create.netkit.expected_revision = OPTS_GET(opts, netkit.expected_revision, 0); |
833 | if (!OPTS_ZEROED(opts, netkit)) |
834 | return libbpf_err(ret: -EINVAL); |
835 | break; |
836 | default: |
837 | if (!OPTS_ZEROED(opts, flags)) |
838 | return libbpf_err(ret: -EINVAL); |
839 | break; |
840 | } |
841 | proceed: |
842 | fd = sys_bpf_fd(cmd: BPF_LINK_CREATE, attr: &attr, size: attr_sz); |
843 | if (fd >= 0) |
844 | return fd; |
845 | /* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry |
846 | * and other similar programs |
847 | */ |
848 | err = -errno; |
849 | if (err != -EINVAL) |
850 | return libbpf_err(ret: err); |
851 | |
852 | /* if user used features not supported by |
853 | * BPF_RAW_TRACEPOINT_OPEN command, then just give up immediately |
854 | */ |
855 | if (attr.link_create.target_fd || attr.link_create.target_btf_id) |
856 | return libbpf_err(ret: err); |
857 | if (!OPTS_ZEROED(opts, sz)) |
858 | return libbpf_err(ret: err); |
859 | |
860 | /* otherwise, for few select kinds of programs that can be |
861 | * attached using BPF_RAW_TRACEPOINT_OPEN command, try that as |
862 | * a fallback for older kernels |
863 | */ |
864 | switch (attach_type) { |
865 | case BPF_TRACE_RAW_TP: |
866 | case BPF_LSM_MAC: |
867 | case BPF_TRACE_FENTRY: |
868 | case BPF_TRACE_FEXIT: |
869 | case BPF_MODIFY_RETURN: |
870 | return bpf_raw_tracepoint_open(NULL, prog_fd); |
871 | default: |
872 | return libbpf_err(ret: err); |
873 | } |
874 | } |
875 | |
876 | int bpf_link_detach(int link_fd) |
877 | { |
878 | const size_t attr_sz = offsetofend(union bpf_attr, link_detach); |
879 | union bpf_attr attr; |
880 | int ret; |
881 | |
882 | memset(&attr, 0, attr_sz); |
883 | attr.link_detach.link_fd = link_fd; |
884 | |
885 | ret = sys_bpf(cmd: BPF_LINK_DETACH, attr: &attr, size: attr_sz); |
886 | return libbpf_err_errno(ret); |
887 | } |
888 | |
889 | int bpf_link_update(int link_fd, int new_prog_fd, |
890 | const struct bpf_link_update_opts *opts) |
891 | { |
892 | const size_t attr_sz = offsetofend(union bpf_attr, link_update); |
893 | union bpf_attr attr; |
894 | int ret; |
895 | |
896 | if (!OPTS_VALID(opts, bpf_link_update_opts)) |
897 | return libbpf_err(ret: -EINVAL); |
898 | |
899 | if (OPTS_GET(opts, old_prog_fd, 0) && OPTS_GET(opts, old_map_fd, 0)) |
900 | return libbpf_err(ret: -EINVAL); |
901 | |
902 | memset(&attr, 0, attr_sz); |
903 | attr.link_update.link_fd = link_fd; |
904 | attr.link_update.new_prog_fd = new_prog_fd; |
905 | attr.link_update.flags = OPTS_GET(opts, flags, 0); |
906 | if (OPTS_GET(opts, old_prog_fd, 0)) |
907 | attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); |
908 | else if (OPTS_GET(opts, old_map_fd, 0)) |
909 | attr.link_update.old_map_fd = OPTS_GET(opts, old_map_fd, 0); |
910 | |
911 | ret = sys_bpf(cmd: BPF_LINK_UPDATE, attr: &attr, size: attr_sz); |
912 | return libbpf_err_errno(ret); |
913 | } |
914 | |
915 | int bpf_iter_create(int link_fd) |
916 | { |
917 | const size_t attr_sz = offsetofend(union bpf_attr, iter_create); |
918 | union bpf_attr attr; |
919 | int fd; |
920 | |
921 | memset(&attr, 0, attr_sz); |
922 | attr.iter_create.link_fd = link_fd; |
923 | |
924 | fd = sys_bpf_fd(cmd: BPF_ITER_CREATE, attr: &attr, size: attr_sz); |
925 | return libbpf_err_errno(ret: fd); |
926 | } |
927 | |
928 | int bpf_prog_query_opts(int target, enum bpf_attach_type type, |
929 | struct bpf_prog_query_opts *opts) |
930 | { |
931 | const size_t attr_sz = offsetofend(union bpf_attr, query); |
932 | union bpf_attr attr; |
933 | int ret; |
934 | |
935 | if (!OPTS_VALID(opts, bpf_prog_query_opts)) |
936 | return libbpf_err(ret: -EINVAL); |
937 | |
938 | memset(&attr, 0, attr_sz); |
939 | attr.query.target_fd = target; |
940 | attr.query.attach_type = type; |
941 | attr.query.query_flags = OPTS_GET(opts, query_flags, 0); |
942 | attr.query.count = OPTS_GET(opts, count, 0); |
943 | attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL)); |
944 | attr.query.link_ids = ptr_to_u64(OPTS_GET(opts, link_ids, NULL)); |
945 | attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL)); |
946 | attr.query.link_attach_flags = ptr_to_u64(OPTS_GET(opts, link_attach_flags, NULL)); |
947 | |
948 | ret = sys_bpf(cmd: BPF_PROG_QUERY, attr: &attr, size: attr_sz); |
949 | |
950 | OPTS_SET(opts, attach_flags, attr.query.attach_flags); |
951 | OPTS_SET(opts, revision, attr.query.revision); |
952 | OPTS_SET(opts, count, attr.query.count); |
953 | |
954 | return libbpf_err_errno(ret); |
955 | } |
956 | |
957 | int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags, |
958 | __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt) |
959 | { |
960 | LIBBPF_OPTS(bpf_prog_query_opts, opts); |
961 | int ret; |
962 | |
963 | opts.query_flags = query_flags; |
964 | opts.prog_ids = prog_ids; |
965 | opts.prog_cnt = *prog_cnt; |
966 | |
967 | ret = bpf_prog_query_opts(target: target_fd, type, opts: &opts); |
968 | |
969 | if (attach_flags) |
970 | *attach_flags = opts.attach_flags; |
971 | *prog_cnt = opts.prog_cnt; |
972 | |
973 | return libbpf_err_errno(ret); |
974 | } |
975 | |
976 | int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts) |
977 | { |
978 | const size_t attr_sz = offsetofend(union bpf_attr, test); |
979 | union bpf_attr attr; |
980 | int ret; |
981 | |
982 | if (!OPTS_VALID(opts, bpf_test_run_opts)) |
983 | return libbpf_err(ret: -EINVAL); |
984 | |
985 | memset(&attr, 0, attr_sz); |
986 | attr.test.prog_fd = prog_fd; |
987 | attr.test.batch_size = OPTS_GET(opts, batch_size, 0); |
988 | attr.test.cpu = OPTS_GET(opts, cpu, 0); |
989 | attr.test.flags = OPTS_GET(opts, flags, 0); |
990 | attr.test.repeat = OPTS_GET(opts, repeat, 0); |
991 | attr.test.duration = OPTS_GET(opts, duration, 0); |
992 | attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0); |
993 | attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0); |
994 | attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0); |
995 | attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0); |
996 | attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL)); |
997 | attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL)); |
998 | attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL)); |
999 | attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL)); |
1000 | |
1001 | ret = sys_bpf(cmd: BPF_PROG_TEST_RUN, attr: &attr, size: attr_sz); |
1002 | |
1003 | OPTS_SET(opts, data_size_out, attr.test.data_size_out); |
1004 | OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out); |
1005 | OPTS_SET(opts, duration, attr.test.duration); |
1006 | OPTS_SET(opts, retval, attr.test.retval); |
1007 | |
1008 | return libbpf_err_errno(ret); |
1009 | } |
1010 | |
1011 | static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd) |
1012 | { |
1013 | const size_t attr_sz = offsetofend(union bpf_attr, open_flags); |
1014 | union bpf_attr attr; |
1015 | int err; |
1016 | |
1017 | memset(&attr, 0, attr_sz); |
1018 | attr.start_id = start_id; |
1019 | |
1020 | err = sys_bpf(cmd, attr: &attr, size: attr_sz); |
1021 | if (!err) |
1022 | *next_id = attr.next_id; |
1023 | |
1024 | return libbpf_err_errno(ret: err); |
1025 | } |
1026 | |
1027 | int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id) |
1028 | { |
1029 | return bpf_obj_get_next_id(start_id, next_id, cmd: BPF_PROG_GET_NEXT_ID); |
1030 | } |
1031 | |
1032 | int bpf_map_get_next_id(__u32 start_id, __u32 *next_id) |
1033 | { |
1034 | return bpf_obj_get_next_id(start_id, next_id, cmd: BPF_MAP_GET_NEXT_ID); |
1035 | } |
1036 | |
1037 | int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id) |
1038 | { |
1039 | return bpf_obj_get_next_id(start_id, next_id, cmd: BPF_BTF_GET_NEXT_ID); |
1040 | } |
1041 | |
1042 | int bpf_link_get_next_id(__u32 start_id, __u32 *next_id) |
1043 | { |
1044 | return bpf_obj_get_next_id(start_id, next_id, cmd: BPF_LINK_GET_NEXT_ID); |
1045 | } |
1046 | |
1047 | int bpf_prog_get_fd_by_id_opts(__u32 id, |
1048 | const struct bpf_get_fd_by_id_opts *opts) |
1049 | { |
1050 | const size_t attr_sz = offsetofend(union bpf_attr, open_flags); |
1051 | union bpf_attr attr; |
1052 | int fd; |
1053 | |
1054 | if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) |
1055 | return libbpf_err(ret: -EINVAL); |
1056 | |
1057 | memset(&attr, 0, attr_sz); |
1058 | attr.prog_id = id; |
1059 | attr.open_flags = OPTS_GET(opts, open_flags, 0); |
1060 | |
1061 | fd = sys_bpf_fd(cmd: BPF_PROG_GET_FD_BY_ID, attr: &attr, size: attr_sz); |
1062 | return libbpf_err_errno(ret: fd); |
1063 | } |
1064 | |
1065 | int bpf_prog_get_fd_by_id(__u32 id) |
1066 | { |
1067 | return bpf_prog_get_fd_by_id_opts(id, NULL); |
1068 | } |
1069 | |
1070 | int bpf_map_get_fd_by_id_opts(__u32 id, |
1071 | const struct bpf_get_fd_by_id_opts *opts) |
1072 | { |
1073 | const size_t attr_sz = offsetofend(union bpf_attr, open_flags); |
1074 | union bpf_attr attr; |
1075 | int fd; |
1076 | |
1077 | if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) |
1078 | return libbpf_err(ret: -EINVAL); |
1079 | |
1080 | memset(&attr, 0, attr_sz); |
1081 | attr.map_id = id; |
1082 | attr.open_flags = OPTS_GET(opts, open_flags, 0); |
1083 | |
1084 | fd = sys_bpf_fd(cmd: BPF_MAP_GET_FD_BY_ID, attr: &attr, size: attr_sz); |
1085 | return libbpf_err_errno(ret: fd); |
1086 | } |
1087 | |
1088 | int bpf_map_get_fd_by_id(__u32 id) |
1089 | { |
1090 | return bpf_map_get_fd_by_id_opts(id, NULL); |
1091 | } |
1092 | |
1093 | int bpf_btf_get_fd_by_id_opts(__u32 id, |
1094 | const struct bpf_get_fd_by_id_opts *opts) |
1095 | { |
1096 | const size_t attr_sz = offsetofend(union bpf_attr, open_flags); |
1097 | union bpf_attr attr; |
1098 | int fd; |
1099 | |
1100 | if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) |
1101 | return libbpf_err(ret: -EINVAL); |
1102 | |
1103 | memset(&attr, 0, attr_sz); |
1104 | attr.btf_id = id; |
1105 | attr.open_flags = OPTS_GET(opts, open_flags, 0); |
1106 | |
1107 | fd = sys_bpf_fd(cmd: BPF_BTF_GET_FD_BY_ID, attr: &attr, size: attr_sz); |
1108 | return libbpf_err_errno(ret: fd); |
1109 | } |
1110 | |
1111 | int bpf_btf_get_fd_by_id(__u32 id) |
1112 | { |
1113 | return bpf_btf_get_fd_by_id_opts(id, NULL); |
1114 | } |
1115 | |
1116 | int bpf_link_get_fd_by_id_opts(__u32 id, |
1117 | const struct bpf_get_fd_by_id_opts *opts) |
1118 | { |
1119 | const size_t attr_sz = offsetofend(union bpf_attr, open_flags); |
1120 | union bpf_attr attr; |
1121 | int fd; |
1122 | |
1123 | if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts)) |
1124 | return libbpf_err(ret: -EINVAL); |
1125 | |
1126 | memset(&attr, 0, attr_sz); |
1127 | attr.link_id = id; |
1128 | attr.open_flags = OPTS_GET(opts, open_flags, 0); |
1129 | |
1130 | fd = sys_bpf_fd(cmd: BPF_LINK_GET_FD_BY_ID, attr: &attr, size: attr_sz); |
1131 | return libbpf_err_errno(ret: fd); |
1132 | } |
1133 | |
1134 | int bpf_link_get_fd_by_id(__u32 id) |
1135 | { |
1136 | return bpf_link_get_fd_by_id_opts(id, NULL); |
1137 | } |
1138 | |
1139 | int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len) |
1140 | { |
1141 | const size_t attr_sz = offsetofend(union bpf_attr, info); |
1142 | union bpf_attr attr; |
1143 | int err; |
1144 | |
1145 | memset(&attr, 0, attr_sz); |
1146 | attr.info.bpf_fd = bpf_fd; |
1147 | attr.info.info_len = *info_len; |
1148 | attr.info.info = ptr_to_u64(ptr: info); |
1149 | |
1150 | err = sys_bpf(cmd: BPF_OBJ_GET_INFO_BY_FD, attr: &attr, size: attr_sz); |
1151 | if (!err) |
1152 | *info_len = attr.info.info_len; |
1153 | return libbpf_err_errno(ret: err); |
1154 | } |
1155 | |
1156 | int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len) |
1157 | { |
1158 | return bpf_obj_get_info_by_fd(bpf_fd: prog_fd, info, info_len); |
1159 | } |
1160 | |
1161 | int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len) |
1162 | { |
1163 | return bpf_obj_get_info_by_fd(bpf_fd: map_fd, info, info_len); |
1164 | } |
1165 | |
1166 | int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len) |
1167 | { |
1168 | return bpf_obj_get_info_by_fd(bpf_fd: btf_fd, info, info_len); |
1169 | } |
1170 | |
1171 | int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len) |
1172 | { |
1173 | return bpf_obj_get_info_by_fd(bpf_fd: link_fd, info, info_len); |
1174 | } |
1175 | |
1176 | int bpf_raw_tracepoint_open(const char *name, int prog_fd) |
1177 | { |
1178 | const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint); |
1179 | union bpf_attr attr; |
1180 | int fd; |
1181 | |
1182 | memset(&attr, 0, attr_sz); |
1183 | attr.raw_tracepoint.name = ptr_to_u64(ptr: name); |
1184 | attr.raw_tracepoint.prog_fd = prog_fd; |
1185 | |
1186 | fd = sys_bpf_fd(cmd: BPF_RAW_TRACEPOINT_OPEN, attr: &attr, size: attr_sz); |
1187 | return libbpf_err_errno(ret: fd); |
1188 | } |
1189 | |
1190 | int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts) |
1191 | { |
1192 | const size_t attr_sz = offsetofend(union bpf_attr, btf_token_fd); |
1193 | union bpf_attr attr; |
1194 | char *log_buf; |
1195 | size_t log_size; |
1196 | __u32 log_level; |
1197 | int fd; |
1198 | |
1199 | bump_rlimit_memlock(); |
1200 | |
1201 | memset(&attr, 0, attr_sz); |
1202 | |
1203 | if (!OPTS_VALID(opts, bpf_btf_load_opts)) |
1204 | return libbpf_err(ret: -EINVAL); |
1205 | |
1206 | log_buf = OPTS_GET(opts, log_buf, NULL); |
1207 | log_size = OPTS_GET(opts, log_size, 0); |
1208 | log_level = OPTS_GET(opts, log_level, 0); |
1209 | |
1210 | if (log_size > UINT_MAX) |
1211 | return libbpf_err(ret: -EINVAL); |
1212 | if (log_size && !log_buf) |
1213 | return libbpf_err(ret: -EINVAL); |
1214 | |
1215 | attr.btf = ptr_to_u64(ptr: btf_data); |
1216 | attr.btf_size = btf_size; |
1217 | |
1218 | attr.btf_flags = OPTS_GET(opts, btf_flags, 0); |
1219 | attr.btf_token_fd = OPTS_GET(opts, token_fd, 0); |
1220 | |
1221 | /* log_level == 0 and log_buf != NULL means "try loading without |
1222 | * log_buf, but retry with log_buf and log_level=1 on error", which is |
1223 | * consistent across low-level and high-level BTF and program loading |
1224 | * APIs within libbpf and provides a sensible behavior in practice |
1225 | */ |
1226 | if (log_level) { |
1227 | attr.btf_log_buf = ptr_to_u64(ptr: log_buf); |
1228 | attr.btf_log_size = (__u32)log_size; |
1229 | attr.btf_log_level = log_level; |
1230 | } |
1231 | |
1232 | fd = sys_bpf_fd(cmd: BPF_BTF_LOAD, attr: &attr, size: attr_sz); |
1233 | if (fd < 0 && log_buf && log_level == 0) { |
1234 | attr.btf_log_buf = ptr_to_u64(ptr: log_buf); |
1235 | attr.btf_log_size = (__u32)log_size; |
1236 | attr.btf_log_level = 1; |
1237 | fd = sys_bpf_fd(cmd: BPF_BTF_LOAD, attr: &attr, size: attr_sz); |
1238 | } |
1239 | |
1240 | OPTS_SET(opts, log_true_size, attr.btf_log_true_size); |
1241 | return libbpf_err_errno(ret: fd); |
1242 | } |
1243 | |
1244 | int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, |
1245 | __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, |
1246 | __u64 *probe_addr) |
1247 | { |
1248 | const size_t attr_sz = offsetofend(union bpf_attr, task_fd_query); |
1249 | union bpf_attr attr; |
1250 | int err; |
1251 | |
1252 | memset(&attr, 0, attr_sz); |
1253 | attr.task_fd_query.pid = pid; |
1254 | attr.task_fd_query.fd = fd; |
1255 | attr.task_fd_query.flags = flags; |
1256 | attr.task_fd_query.buf = ptr_to_u64(ptr: buf); |
1257 | attr.task_fd_query.buf_len = *buf_len; |
1258 | |
1259 | err = sys_bpf(cmd: BPF_TASK_FD_QUERY, attr: &attr, size: attr_sz); |
1260 | |
1261 | *buf_len = attr.task_fd_query.buf_len; |
1262 | *prog_id = attr.task_fd_query.prog_id; |
1263 | *fd_type = attr.task_fd_query.fd_type; |
1264 | *probe_offset = attr.task_fd_query.probe_offset; |
1265 | *probe_addr = attr.task_fd_query.probe_addr; |
1266 | |
1267 | return libbpf_err_errno(ret: err); |
1268 | } |
1269 | |
1270 | int bpf_enable_stats(enum bpf_stats_type type) |
1271 | { |
1272 | const size_t attr_sz = offsetofend(union bpf_attr, enable_stats); |
1273 | union bpf_attr attr; |
1274 | int fd; |
1275 | |
1276 | memset(&attr, 0, attr_sz); |
1277 | attr.enable_stats.type = type; |
1278 | |
1279 | fd = sys_bpf_fd(cmd: BPF_ENABLE_STATS, attr: &attr, size: attr_sz); |
1280 | return libbpf_err_errno(ret: fd); |
1281 | } |
1282 | |
1283 | int bpf_prog_bind_map(int prog_fd, int map_fd, |
1284 | const struct bpf_prog_bind_opts *opts) |
1285 | { |
1286 | const size_t attr_sz = offsetofend(union bpf_attr, prog_bind_map); |
1287 | union bpf_attr attr; |
1288 | int ret; |
1289 | |
1290 | if (!OPTS_VALID(opts, bpf_prog_bind_opts)) |
1291 | return libbpf_err(ret: -EINVAL); |
1292 | |
1293 | memset(&attr, 0, attr_sz); |
1294 | attr.prog_bind_map.prog_fd = prog_fd; |
1295 | attr.prog_bind_map.map_fd = map_fd; |
1296 | attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0); |
1297 | |
1298 | ret = sys_bpf(cmd: BPF_PROG_BIND_MAP, attr: &attr, size: attr_sz); |
1299 | return libbpf_err_errno(ret); |
1300 | } |
1301 | |
1302 | int bpf_token_create(int bpffs_fd, struct bpf_token_create_opts *opts) |
1303 | { |
1304 | const size_t attr_sz = offsetofend(union bpf_attr, token_create); |
1305 | union bpf_attr attr; |
1306 | int fd; |
1307 | |
1308 | if (!OPTS_VALID(opts, bpf_token_create_opts)) |
1309 | return libbpf_err(ret: -EINVAL); |
1310 | |
1311 | memset(&attr, 0, attr_sz); |
1312 | attr.token_create.bpffs_fd = bpffs_fd; |
1313 | attr.token_create.flags = OPTS_GET(opts, flags, 0); |
1314 | |
1315 | fd = sys_bpf_fd(cmd: BPF_TOKEN_CREATE, attr: &attr, size: attr_sz); |
1316 | return libbpf_err_errno(ret: fd); |
1317 | } |
1318 | |