1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ |
3 | #include <linux/capability.h> |
4 | #include <stdlib.h> |
5 | #include <test_progs.h> |
6 | #include <bpf/btf.h> |
7 | |
8 | #include "autoconf_helper.h" |
9 | #include "unpriv_helpers.h" |
10 | #include "cap_helpers.h" |
11 | |
12 | #define str_has_pfx(str, pfx) \ |
13 | (strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0) |
14 | |
15 | #define TEST_LOADER_LOG_BUF_SZ 2097152 |
16 | |
17 | #define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure" |
18 | #define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success" |
19 | #define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg=" |
20 | #define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv" |
21 | #define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv" |
22 | #define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv=" |
23 | #define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level=" |
24 | #define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags=" |
25 | #define TEST_TAG_DESCRIPTION_PFX "comment:test_description=" |
26 | #define TEST_TAG_RETVAL_PFX "comment:test_retval=" |
27 | #define TEST_TAG_RETVAL_PFX_UNPRIV "comment:test_retval_unpriv=" |
28 | #define TEST_TAG_AUXILIARY "comment:test_auxiliary" |
29 | #define TEST_TAG_AUXILIARY_UNPRIV "comment:test_auxiliary_unpriv" |
30 | #define TEST_BTF_PATH "comment:test_btf_path=" |
31 | |
32 | /* Warning: duplicated in bpf_misc.h */ |
33 | #define POINTER_VALUE 0xcafe4all |
34 | #define TEST_DATA_LEN 64 |
35 | |
36 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
37 | #define EFFICIENT_UNALIGNED_ACCESS 1 |
38 | #else |
39 | #define EFFICIENT_UNALIGNED_ACCESS 0 |
40 | #endif |
41 | |
42 | static int sysctl_unpriv_disabled = -1; |
43 | |
44 | enum mode { |
45 | PRIV = 1, |
46 | UNPRIV = 2 |
47 | }; |
48 | |
49 | struct test_subspec { |
50 | char *name; |
51 | bool expect_failure; |
52 | const char **expect_msgs; |
53 | size_t expect_msg_cnt; |
54 | int retval; |
55 | bool execute; |
56 | }; |
57 | |
58 | struct test_spec { |
59 | const char *prog_name; |
60 | struct test_subspec priv; |
61 | struct test_subspec unpriv; |
62 | const char *btf_custom_path; |
63 | int log_level; |
64 | int prog_flags; |
65 | int mode_mask; |
66 | bool auxiliary; |
67 | bool valid; |
68 | }; |
69 | |
70 | static int tester_init(struct test_loader *tester) |
71 | { |
72 | if (!tester->log_buf) { |
73 | tester->log_buf_sz = TEST_LOADER_LOG_BUF_SZ; |
74 | tester->log_buf = calloc(tester->log_buf_sz, 1); |
75 | if (!ASSERT_OK_PTR(tester->log_buf, "tester_log_buf" )) |
76 | return -ENOMEM; |
77 | } |
78 | |
79 | return 0; |
80 | } |
81 | |
82 | void test_loader_fini(struct test_loader *tester) |
83 | { |
84 | if (!tester) |
85 | return; |
86 | |
87 | free(tester->log_buf); |
88 | } |
89 | |
90 | static void free_test_spec(struct test_spec *spec) |
91 | { |
92 | free(spec->priv.name); |
93 | free(spec->unpriv.name); |
94 | free(spec->priv.expect_msgs); |
95 | free(spec->unpriv.expect_msgs); |
96 | |
97 | spec->priv.name = NULL; |
98 | spec->unpriv.name = NULL; |
99 | spec->priv.expect_msgs = NULL; |
100 | spec->unpriv.expect_msgs = NULL; |
101 | } |
102 | |
103 | static int push_msg(const char *msg, struct test_subspec *subspec) |
104 | { |
105 | void *tmp; |
106 | |
107 | tmp = realloc(subspec->expect_msgs, (1 + subspec->expect_msg_cnt) * sizeof(void *)); |
108 | if (!tmp) { |
109 | ASSERT_FAIL("failed to realloc memory for messages\n" ); |
110 | return -ENOMEM; |
111 | } |
112 | subspec->expect_msgs = tmp; |
113 | subspec->expect_msgs[subspec->expect_msg_cnt++] = msg; |
114 | |
115 | return 0; |
116 | } |
117 | |
118 | static int parse_int(const char *str, int *val, const char *name) |
119 | { |
120 | char *end; |
121 | long tmp; |
122 | |
123 | errno = 0; |
124 | if (str_has_pfx(str, "0x" )) |
125 | tmp = strtol(str + 2, &end, 16); |
126 | else |
127 | tmp = strtol(str, &end, 10); |
128 | if (errno || end[0] != '\0') { |
129 | PRINT_FAIL("failed to parse %s from '%s'\n" , name, str); |
130 | return -EINVAL; |
131 | } |
132 | *val = tmp; |
133 | return 0; |
134 | } |
135 | |
136 | static int parse_retval(const char *str, int *val, const char *name) |
137 | { |
138 | struct { |
139 | char *name; |
140 | int val; |
141 | } named_values[] = { |
142 | { "INT_MIN" , INT_MIN }, |
143 | { "POINTER_VALUE" , POINTER_VALUE }, |
144 | { "TEST_DATA_LEN" , TEST_DATA_LEN }, |
145 | }; |
146 | int i; |
147 | |
148 | for (i = 0; i < ARRAY_SIZE(named_values); ++i) { |
149 | if (strcmp(str, named_values[i].name) != 0) |
150 | continue; |
151 | *val = named_values[i].val; |
152 | return 0; |
153 | } |
154 | |
155 | return parse_int(str, val, name); |
156 | } |
157 | |
158 | static void update_flags(int *flags, int flag, bool clear) |
159 | { |
160 | if (clear) |
161 | *flags &= ~flag; |
162 | else |
163 | *flags |= flag; |
164 | } |
165 | |
166 | /* Uses btf_decl_tag attributes to describe the expected test |
167 | * behavior, see bpf_misc.h for detailed description of each attribute |
168 | * and attribute combinations. |
169 | */ |
170 | static int parse_test_spec(struct test_loader *tester, |
171 | struct bpf_object *obj, |
172 | struct bpf_program *prog, |
173 | struct test_spec *spec) |
174 | { |
175 | const char *description = NULL; |
176 | bool has_unpriv_result = false; |
177 | bool has_unpriv_retval = false; |
178 | int func_id, i, err = 0; |
179 | struct btf *btf; |
180 | |
181 | memset(spec, 0, sizeof(*spec)); |
182 | |
183 | spec->prog_name = bpf_program__name(prog); |
184 | spec->prog_flags = testing_prog_flags(); |
185 | |
186 | btf = bpf_object__btf(obj); |
187 | if (!btf) { |
188 | ASSERT_FAIL("BPF object has no BTF" ); |
189 | return -EINVAL; |
190 | } |
191 | |
192 | func_id = btf__find_by_name_kind(btf, spec->prog_name, BTF_KIND_FUNC); |
193 | if (func_id < 0) { |
194 | ASSERT_FAIL("failed to find FUNC BTF type for '%s'" , spec->prog_name); |
195 | return -EINVAL; |
196 | } |
197 | |
198 | for (i = 1; i < btf__type_cnt(btf); i++) { |
199 | const char *s, *val, *msg; |
200 | const struct btf_type *t; |
201 | bool clear; |
202 | int flags; |
203 | |
204 | t = btf__type_by_id(btf, i); |
205 | if (!btf_is_decl_tag(t)) |
206 | continue; |
207 | |
208 | if (t->type != func_id || btf_decl_tag(t)->component_idx != -1) |
209 | continue; |
210 | |
211 | s = btf__str_by_offset(btf, t->name_off); |
212 | if (str_has_pfx(s, TEST_TAG_DESCRIPTION_PFX)) { |
213 | description = s + sizeof(TEST_TAG_DESCRIPTION_PFX) - 1; |
214 | } else if (strcmp(s, TEST_TAG_EXPECT_FAILURE) == 0) { |
215 | spec->priv.expect_failure = true; |
216 | spec->mode_mask |= PRIV; |
217 | } else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS) == 0) { |
218 | spec->priv.expect_failure = false; |
219 | spec->mode_mask |= PRIV; |
220 | } else if (strcmp(s, TEST_TAG_EXPECT_FAILURE_UNPRIV) == 0) { |
221 | spec->unpriv.expect_failure = true; |
222 | spec->mode_mask |= UNPRIV; |
223 | has_unpriv_result = true; |
224 | } else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS_UNPRIV) == 0) { |
225 | spec->unpriv.expect_failure = false; |
226 | spec->mode_mask |= UNPRIV; |
227 | has_unpriv_result = true; |
228 | } else if (strcmp(s, TEST_TAG_AUXILIARY) == 0) { |
229 | spec->auxiliary = true; |
230 | spec->mode_mask |= PRIV; |
231 | } else if (strcmp(s, TEST_TAG_AUXILIARY_UNPRIV) == 0) { |
232 | spec->auxiliary = true; |
233 | spec->mode_mask |= UNPRIV; |
234 | } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) { |
235 | msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1; |
236 | err = push_msg(msg, subspec: &spec->priv); |
237 | if (err) |
238 | goto cleanup; |
239 | spec->mode_mask |= PRIV; |
240 | } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV)) { |
241 | msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX_UNPRIV) - 1; |
242 | err = push_msg(msg, subspec: &spec->unpriv); |
243 | if (err) |
244 | goto cleanup; |
245 | spec->mode_mask |= UNPRIV; |
246 | } else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX)) { |
247 | val = s + sizeof(TEST_TAG_RETVAL_PFX) - 1; |
248 | err = parse_retval(str: val, val: &spec->priv.retval, name: "__retval" ); |
249 | if (err) |
250 | goto cleanup; |
251 | spec->priv.execute = true; |
252 | spec->mode_mask |= PRIV; |
253 | } else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX_UNPRIV)) { |
254 | val = s + sizeof(TEST_TAG_RETVAL_PFX_UNPRIV) - 1; |
255 | err = parse_retval(str: val, val: &spec->unpriv.retval, name: "__retval_unpriv" ); |
256 | if (err) |
257 | goto cleanup; |
258 | spec->mode_mask |= UNPRIV; |
259 | spec->unpriv.execute = true; |
260 | has_unpriv_retval = true; |
261 | } else if (str_has_pfx(s, TEST_TAG_LOG_LEVEL_PFX)) { |
262 | val = s + sizeof(TEST_TAG_LOG_LEVEL_PFX) - 1; |
263 | err = parse_int(str: val, val: &spec->log_level, name: "test log level" ); |
264 | if (err) |
265 | goto cleanup; |
266 | } else if (str_has_pfx(s, TEST_TAG_PROG_FLAGS_PFX)) { |
267 | val = s + sizeof(TEST_TAG_PROG_FLAGS_PFX) - 1; |
268 | |
269 | clear = val[0] == '!'; |
270 | if (clear) |
271 | val++; |
272 | |
273 | if (strcmp(val, "BPF_F_STRICT_ALIGNMENT" ) == 0) { |
274 | update_flags(flags: &spec->prog_flags, BPF_F_STRICT_ALIGNMENT, clear); |
275 | } else if (strcmp(val, "BPF_F_ANY_ALIGNMENT" ) == 0) { |
276 | update_flags(flags: &spec->prog_flags, BPF_F_ANY_ALIGNMENT, clear); |
277 | } else if (strcmp(val, "BPF_F_TEST_RND_HI32" ) == 0) { |
278 | update_flags(flags: &spec->prog_flags, BPF_F_TEST_RND_HI32, clear); |
279 | } else if (strcmp(val, "BPF_F_TEST_STATE_FREQ" ) == 0) { |
280 | update_flags(flags: &spec->prog_flags, BPF_F_TEST_STATE_FREQ, clear); |
281 | } else if (strcmp(val, "BPF_F_SLEEPABLE" ) == 0) { |
282 | update_flags(flags: &spec->prog_flags, BPF_F_SLEEPABLE, clear); |
283 | } else if (strcmp(val, "BPF_F_XDP_HAS_FRAGS" ) == 0) { |
284 | update_flags(flags: &spec->prog_flags, BPF_F_XDP_HAS_FRAGS, clear); |
285 | } else if (strcmp(val, "BPF_F_TEST_REG_INVARIANTS" ) == 0) { |
286 | update_flags(flags: &spec->prog_flags, BPF_F_TEST_REG_INVARIANTS, clear); |
287 | } else /* assume numeric value */ { |
288 | err = parse_int(str: val, val: &flags, name: "test prog flags" ); |
289 | if (err) |
290 | goto cleanup; |
291 | update_flags(flags: &spec->prog_flags, flag: flags, clear); |
292 | } |
293 | } else if (str_has_pfx(s, TEST_BTF_PATH)) { |
294 | spec->btf_custom_path = s + sizeof(TEST_BTF_PATH) - 1; |
295 | } |
296 | } |
297 | |
298 | if (spec->mode_mask == 0) |
299 | spec->mode_mask = PRIV; |
300 | |
301 | if (!description) |
302 | description = spec->prog_name; |
303 | |
304 | if (spec->mode_mask & PRIV) { |
305 | spec->priv.name = strdup(description); |
306 | if (!spec->priv.name) { |
307 | PRINT_FAIL("failed to allocate memory for priv.name\n" ); |
308 | err = -ENOMEM; |
309 | goto cleanup; |
310 | } |
311 | } |
312 | |
313 | if (spec->mode_mask & UNPRIV) { |
314 | int descr_len = strlen(description); |
315 | const char *suffix = " @unpriv" ; |
316 | char *name; |
317 | |
318 | name = malloc(descr_len + strlen(suffix) + 1); |
319 | if (!name) { |
320 | PRINT_FAIL("failed to allocate memory for unpriv.name\n" ); |
321 | err = -ENOMEM; |
322 | goto cleanup; |
323 | } |
324 | |
325 | strcpy(p: name, q: description); |
326 | strcpy(p: &name[descr_len], q: suffix); |
327 | spec->unpriv.name = name; |
328 | } |
329 | |
330 | if (spec->mode_mask & (PRIV | UNPRIV)) { |
331 | if (!has_unpriv_result) |
332 | spec->unpriv.expect_failure = spec->priv.expect_failure; |
333 | |
334 | if (!has_unpriv_retval) { |
335 | spec->unpriv.retval = spec->priv.retval; |
336 | spec->unpriv.execute = spec->priv.execute; |
337 | } |
338 | |
339 | if (!spec->unpriv.expect_msgs) { |
340 | size_t sz = spec->priv.expect_msg_cnt * sizeof(void *); |
341 | |
342 | spec->unpriv.expect_msgs = malloc(sz); |
343 | if (!spec->unpriv.expect_msgs) { |
344 | PRINT_FAIL("failed to allocate memory for unpriv.expect_msgs\n" ); |
345 | err = -ENOMEM; |
346 | goto cleanup; |
347 | } |
348 | memcpy(spec->unpriv.expect_msgs, spec->priv.expect_msgs, sz); |
349 | spec->unpriv.expect_msg_cnt = spec->priv.expect_msg_cnt; |
350 | } |
351 | } |
352 | |
353 | spec->valid = true; |
354 | |
355 | return 0; |
356 | |
357 | cleanup: |
358 | free_test_spec(spec); |
359 | return err; |
360 | } |
361 | |
362 | static void prepare_case(struct test_loader *tester, |
363 | struct test_spec *spec, |
364 | struct bpf_object *obj, |
365 | struct bpf_program *prog) |
366 | { |
367 | int min_log_level = 0, prog_flags; |
368 | |
369 | if (env.verbosity > VERBOSE_NONE) |
370 | min_log_level = 1; |
371 | if (env.verbosity > VERBOSE_VERY) |
372 | min_log_level = 2; |
373 | |
374 | bpf_program__set_log_buf(prog, tester->log_buf, tester->log_buf_sz); |
375 | |
376 | /* Make sure we set at least minimal log level, unless test requires |
377 | * even higher level already. Make sure to preserve independent log |
378 | * level 4 (verifier stats), though. |
379 | */ |
380 | if ((spec->log_level & 3) < min_log_level) |
381 | bpf_program__set_log_level(prog, (spec->log_level & 4) | min_log_level); |
382 | else |
383 | bpf_program__set_log_level(prog, spec->log_level); |
384 | |
385 | prog_flags = bpf_program__flags(prog); |
386 | bpf_program__set_flags(prog, prog_flags | spec->prog_flags); |
387 | |
388 | tester->log_buf[0] = '\0'; |
389 | tester->next_match_pos = 0; |
390 | } |
391 | |
392 | static void emit_verifier_log(const char *log_buf, bool force) |
393 | { |
394 | if (!force && env.verbosity == VERBOSE_NONE) |
395 | return; |
396 | fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n" , log_buf); |
397 | } |
398 | |
399 | static void validate_case(struct test_loader *tester, |
400 | struct test_subspec *subspec, |
401 | struct bpf_object *obj, |
402 | struct bpf_program *prog, |
403 | int load_err) |
404 | { |
405 | int i, j; |
406 | |
407 | for (i = 0; i < subspec->expect_msg_cnt; i++) { |
408 | char *match; |
409 | const char *expect_msg; |
410 | |
411 | expect_msg = subspec->expect_msgs[i]; |
412 | |
413 | match = strstr(tester->log_buf + tester->next_match_pos, expect_msg); |
414 | if (!ASSERT_OK_PTR(match, "expect_msg" )) { |
415 | /* if we are in verbose mode, we've already emitted log */ |
416 | if (env.verbosity == VERBOSE_NONE) |
417 | emit_verifier_log(log_buf: tester->log_buf, force: true /*force*/); |
418 | for (j = 0; j < i; j++) |
419 | fprintf(stderr, |
420 | "MATCHED MSG: '%s'\n" , subspec->expect_msgs[j]); |
421 | fprintf(stderr, "EXPECTED MSG: '%s'\n" , expect_msg); |
422 | return; |
423 | } |
424 | |
425 | tester->next_match_pos = match - tester->log_buf + strlen(expect_msg); |
426 | } |
427 | } |
428 | |
429 | struct cap_state { |
430 | __u64 old_caps; |
431 | bool initialized; |
432 | }; |
433 | |
434 | static int drop_capabilities(struct cap_state *caps) |
435 | { |
436 | const __u64 caps_to_drop = (1ULL << CAP_SYS_ADMIN | 1ULL << CAP_NET_ADMIN | |
437 | 1ULL << CAP_PERFMON | 1ULL << CAP_BPF); |
438 | int err; |
439 | |
440 | err = cap_disable_effective(caps: caps_to_drop, old_caps: &caps->old_caps); |
441 | if (err) { |
442 | PRINT_FAIL("failed to drop capabilities: %i, %s\n" , err, strerror(err)); |
443 | return err; |
444 | } |
445 | |
446 | caps->initialized = true; |
447 | return 0; |
448 | } |
449 | |
450 | static int restore_capabilities(struct cap_state *caps) |
451 | { |
452 | int err; |
453 | |
454 | if (!caps->initialized) |
455 | return 0; |
456 | |
457 | err = cap_enable_effective(caps: caps->old_caps, NULL); |
458 | if (err) |
459 | PRINT_FAIL("failed to restore capabilities: %i, %s\n" , err, strerror(err)); |
460 | caps->initialized = false; |
461 | return err; |
462 | } |
463 | |
464 | static bool can_execute_unpriv(struct test_loader *tester, struct test_spec *spec) |
465 | { |
466 | if (sysctl_unpriv_disabled < 0) |
467 | sysctl_unpriv_disabled = get_unpriv_disabled() ? 1 : 0; |
468 | if (sysctl_unpriv_disabled) |
469 | return false; |
470 | if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS) |
471 | return false; |
472 | return true; |
473 | } |
474 | |
475 | static bool is_unpriv_capable_map(struct bpf_map *map) |
476 | { |
477 | enum bpf_map_type type; |
478 | __u32 flags; |
479 | |
480 | type = bpf_map__type(map); |
481 | |
482 | switch (type) { |
483 | case BPF_MAP_TYPE_HASH: |
484 | case BPF_MAP_TYPE_PERCPU_HASH: |
485 | case BPF_MAP_TYPE_HASH_OF_MAPS: |
486 | flags = bpf_map__map_flags(map); |
487 | return !(flags & BPF_F_ZERO_SEED); |
488 | case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: |
489 | case BPF_MAP_TYPE_ARRAY: |
490 | case BPF_MAP_TYPE_RINGBUF: |
491 | case BPF_MAP_TYPE_PROG_ARRAY: |
492 | case BPF_MAP_TYPE_CGROUP_ARRAY: |
493 | case BPF_MAP_TYPE_PERCPU_ARRAY: |
494 | case BPF_MAP_TYPE_USER_RINGBUF: |
495 | case BPF_MAP_TYPE_ARRAY_OF_MAPS: |
496 | case BPF_MAP_TYPE_CGROUP_STORAGE: |
497 | case BPF_MAP_TYPE_PERF_EVENT_ARRAY: |
498 | return true; |
499 | default: |
500 | return false; |
501 | } |
502 | } |
503 | |
504 | static int do_prog_test_run(int fd_prog, int *retval, bool empty_opts) |
505 | { |
506 | __u8 tmp_out[TEST_DATA_LEN << 2] = {}; |
507 | __u8 tmp_in[TEST_DATA_LEN] = {}; |
508 | int err, saved_errno; |
509 | LIBBPF_OPTS(bpf_test_run_opts, topts, |
510 | .data_in = tmp_in, |
511 | .data_size_in = sizeof(tmp_in), |
512 | .data_out = tmp_out, |
513 | .data_size_out = sizeof(tmp_out), |
514 | .repeat = 1, |
515 | ); |
516 | |
517 | if (empty_opts) { |
518 | memset(&topts, 0, sizeof(struct bpf_test_run_opts)); |
519 | topts.sz = sizeof(struct bpf_test_run_opts); |
520 | } |
521 | err = bpf_prog_test_run_opts(fd_prog, &topts); |
522 | saved_errno = errno; |
523 | |
524 | if (err) { |
525 | PRINT_FAIL("FAIL: Unexpected bpf_prog_test_run error: %d (%s) " , |
526 | saved_errno, strerror(saved_errno)); |
527 | return err; |
528 | } |
529 | |
530 | ASSERT_OK(0, "bpf_prog_test_run" ); |
531 | *retval = topts.retval; |
532 | |
533 | return 0; |
534 | } |
535 | |
536 | static bool should_do_test_run(struct test_spec *spec, struct test_subspec *subspec) |
537 | { |
538 | if (!subspec->execute) |
539 | return false; |
540 | |
541 | if (subspec->expect_failure) |
542 | return false; |
543 | |
544 | if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS) { |
545 | if (env.verbosity != VERBOSE_NONE) |
546 | printf("alignment prevents execution\n" ); |
547 | return false; |
548 | } |
549 | |
550 | return true; |
551 | } |
552 | |
553 | /* this function is forced noinline and has short generic name to look better |
554 | * in test_progs output (in case of a failure) |
555 | */ |
556 | static noinline |
557 | void run_subtest(struct test_loader *tester, |
558 | struct bpf_object_open_opts *open_opts, |
559 | const void *obj_bytes, |
560 | size_t obj_byte_cnt, |
561 | struct test_spec *specs, |
562 | struct test_spec *spec, |
563 | bool unpriv) |
564 | { |
565 | struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv; |
566 | struct bpf_program *tprog = NULL, *tprog_iter; |
567 | struct test_spec *spec_iter; |
568 | struct cap_state caps = {}; |
569 | struct bpf_object *tobj; |
570 | struct bpf_map *map; |
571 | int retval, err, i; |
572 | bool should_load; |
573 | |
574 | if (!test__start_subtest(name: subspec->name)) |
575 | return; |
576 | |
577 | if (unpriv) { |
578 | if (!can_execute_unpriv(tester, spec)) { |
579 | test__skip(); |
580 | test__end_subtest(); |
581 | return; |
582 | } |
583 | if (drop_capabilities(caps: &caps)) { |
584 | test__end_subtest(); |
585 | return; |
586 | } |
587 | } |
588 | |
589 | /* Implicitly reset to NULL if next test case doesn't specify */ |
590 | open_opts->btf_custom_path = spec->btf_custom_path; |
591 | |
592 | tobj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, open_opts); |
593 | if (!ASSERT_OK_PTR(tobj, "obj_open_mem" )) /* shouldn't happen */ |
594 | goto subtest_cleanup; |
595 | |
596 | i = 0; |
597 | bpf_object__for_each_program(tprog_iter, tobj) { |
598 | spec_iter = &specs[i++]; |
599 | should_load = false; |
600 | |
601 | if (spec_iter->valid) { |
602 | if (strcmp(bpf_program__name(tprog_iter), spec->prog_name) == 0) { |
603 | tprog = tprog_iter; |
604 | should_load = true; |
605 | } |
606 | |
607 | if (spec_iter->auxiliary && |
608 | spec_iter->mode_mask & (unpriv ? UNPRIV : PRIV)) |
609 | should_load = true; |
610 | } |
611 | |
612 | bpf_program__set_autoload(tprog_iter, should_load); |
613 | } |
614 | |
615 | prepare_case(tester, spec, obj: tobj, prog: tprog); |
616 | |
617 | /* By default bpf_object__load() automatically creates all |
618 | * maps declared in the skeleton. Some map types are only |
619 | * allowed in priv mode. Disable autoload for such maps in |
620 | * unpriv mode. |
621 | */ |
622 | bpf_object__for_each_map(map, tobj) |
623 | bpf_map__set_autocreate(map, !unpriv || is_unpriv_capable_map(map)); |
624 | |
625 | err = bpf_object__load(tobj); |
626 | if (subspec->expect_failure) { |
627 | if (!ASSERT_ERR(err, "unexpected_load_success" )) { |
628 | emit_verifier_log(log_buf: tester->log_buf, force: false /*force*/); |
629 | goto tobj_cleanup; |
630 | } |
631 | } else { |
632 | if (!ASSERT_OK(err, "unexpected_load_failure" )) { |
633 | emit_verifier_log(log_buf: tester->log_buf, force: true /*force*/); |
634 | goto tobj_cleanup; |
635 | } |
636 | } |
637 | |
638 | emit_verifier_log(log_buf: tester->log_buf, force: false /*force*/); |
639 | validate_case(tester, subspec, obj: tobj, prog: tprog, load_err: err); |
640 | |
641 | if (should_do_test_run(spec, subspec)) { |
642 | /* For some reason test_verifier executes programs |
643 | * with all capabilities restored. Do the same here. |
644 | */ |
645 | if (restore_capabilities(caps: &caps)) |
646 | goto tobj_cleanup; |
647 | |
648 | if (tester->pre_execution_cb) { |
649 | err = tester->pre_execution_cb(tobj); |
650 | if (err) { |
651 | PRINT_FAIL("pre_execution_cb failed: %d\n" , err); |
652 | goto tobj_cleanup; |
653 | } |
654 | } |
655 | |
656 | do_prog_test_run(fd_prog: bpf_program__fd(tprog), retval: &retval, |
657 | empty_opts: bpf_program__type(tprog) == BPF_PROG_TYPE_SYSCALL ? true : false); |
658 | if (retval != subspec->retval && subspec->retval != POINTER_VALUE) { |
659 | PRINT_FAIL("Unexpected retval: %d != %d\n" , retval, subspec->retval); |
660 | goto tobj_cleanup; |
661 | } |
662 | } |
663 | |
664 | tobj_cleanup: |
665 | bpf_object__close(tobj); |
666 | subtest_cleanup: |
667 | test__end_subtest(); |
668 | restore_capabilities(caps: &caps); |
669 | } |
670 | |
671 | static void process_subtest(struct test_loader *tester, |
672 | const char *skel_name, |
673 | skel_elf_bytes_fn elf_bytes_factory) |
674 | { |
675 | LIBBPF_OPTS(bpf_object_open_opts, open_opts, .object_name = skel_name); |
676 | struct test_spec *specs = NULL; |
677 | struct bpf_object *obj = NULL; |
678 | struct bpf_program *prog; |
679 | const void *obj_bytes; |
680 | int err, i, nr_progs; |
681 | size_t obj_byte_cnt; |
682 | |
683 | if (tester_init(tester) < 0) |
684 | return; /* failed to initialize tester */ |
685 | |
686 | obj_bytes = elf_bytes_factory(&obj_byte_cnt); |
687 | obj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, &open_opts); |
688 | if (!ASSERT_OK_PTR(obj, "obj_open_mem" )) |
689 | return; |
690 | |
691 | nr_progs = 0; |
692 | bpf_object__for_each_program(prog, obj) |
693 | ++nr_progs; |
694 | |
695 | specs = calloc(nr_progs, sizeof(struct test_spec)); |
696 | if (!ASSERT_OK_PTR(specs, "specs_alloc" )) |
697 | return; |
698 | |
699 | i = 0; |
700 | bpf_object__for_each_program(prog, obj) { |
701 | /* ignore tests for which we can't derive test specification */ |
702 | err = parse_test_spec(tester, obj, prog, spec: &specs[i++]); |
703 | if (err) |
704 | PRINT_FAIL("Can't parse test spec for program '%s'\n" , |
705 | bpf_program__name(prog)); |
706 | } |
707 | |
708 | i = 0; |
709 | bpf_object__for_each_program(prog, obj) { |
710 | struct test_spec *spec = &specs[i++]; |
711 | |
712 | if (!spec->valid || spec->auxiliary) |
713 | continue; |
714 | |
715 | if (spec->mode_mask & PRIV) |
716 | run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, |
717 | specs, spec, false); |
718 | if (spec->mode_mask & UNPRIV) |
719 | run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, |
720 | specs, spec, true); |
721 | |
722 | } |
723 | |
724 | for (i = 0; i < nr_progs; ++i) |
725 | free_test_spec(spec: &specs[i]); |
726 | free(specs); |
727 | bpf_object__close(obj); |
728 | } |
729 | |
730 | void test_loader__run_subtests(struct test_loader *tester, |
731 | const char *skel_name, |
732 | skel_elf_bytes_fn elf_bytes_factory) |
733 | { |
734 | /* see comment in run_subtest() for why we do this function nesting */ |
735 | process_subtest(tester, skel_name, elf_bytes_factory); |
736 | } |
737 | |