1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* Copyright (c) 2018 Facebook */ |
3 | |
4 | #include <uapi/linux/btf.h> |
5 | #include <uapi/linux/bpf.h> |
6 | #include <uapi/linux/bpf_perf_event.h> |
7 | #include <uapi/linux/types.h> |
8 | #include <linux/seq_file.h> |
9 | #include <linux/compiler.h> |
10 | #include <linux/ctype.h> |
11 | #include <linux/errno.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/anon_inodes.h> |
14 | #include <linux/file.h> |
15 | #include <linux/uaccess.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/idr.h> |
18 | #include <linux/sort.h> |
19 | #include <linux/bpf_verifier.h> |
20 | #include <linux/btf.h> |
21 | #include <linux/btf_ids.h> |
22 | #include <linux/bpf.h> |
23 | #include <linux/bpf_lsm.h> |
24 | #include <linux/skmsg.h> |
25 | #include <linux/perf_event.h> |
26 | #include <linux/bsearch.h> |
27 | #include <linux/kobject.h> |
28 | #include <linux/sysfs.h> |
29 | #include <linux/overflow.h> |
30 | |
31 | #include <net/netfilter/nf_bpf_link.h> |
32 | |
33 | #include <net/sock.h> |
34 | #include <net/xdp.h> |
35 | #include "../tools/lib/bpf/relo_core.h" |
36 | |
37 | /* BTF (BPF Type Format) is the meta data format which describes |
38 | * the data types of BPF program/map. Hence, it basically focus |
39 | * on the C programming language which the modern BPF is primary |
40 | * using. |
41 | * |
42 | * ELF Section: |
43 | * ~~~~~~~~~~~ |
44 | * The BTF data is stored under the ".BTF" ELF section |
45 | * |
46 | * struct btf_type: |
47 | * ~~~~~~~~~~~~~~~ |
48 | * Each 'struct btf_type' object describes a C data type. |
49 | * Depending on the type it is describing, a 'struct btf_type' |
50 | * object may be followed by more data. F.e. |
51 | * To describe an array, 'struct btf_type' is followed by |
52 | * 'struct btf_array'. |
53 | * |
54 | * 'struct btf_type' and any extra data following it are |
55 | * 4 bytes aligned. |
56 | * |
57 | * Type section: |
58 | * ~~~~~~~~~~~~~ |
59 | * The BTF type section contains a list of 'struct btf_type' objects. |
60 | * Each one describes a C type. Recall from the above section |
61 | * that a 'struct btf_type' object could be immediately followed by extra |
62 | * data in order to describe some particular C types. |
63 | * |
64 | * type_id: |
65 | * ~~~~~~~ |
66 | * Each btf_type object is identified by a type_id. The type_id |
67 | * is implicitly implied by the location of the btf_type object in |
68 | * the BTF type section. The first one has type_id 1. The second |
69 | * one has type_id 2...etc. Hence, an earlier btf_type has |
70 | * a smaller type_id. |
71 | * |
72 | * A btf_type object may refer to another btf_type object by using |
73 | * type_id (i.e. the "type" in the "struct btf_type"). |
74 | * |
75 | * NOTE that we cannot assume any reference-order. |
76 | * A btf_type object can refer to an earlier btf_type object |
77 | * but it can also refer to a later btf_type object. |
78 | * |
79 | * For example, to describe "const void *". A btf_type |
80 | * object describing "const" may refer to another btf_type |
81 | * object describing "void *". This type-reference is done |
82 | * by specifying type_id: |
83 | * |
84 | * [1] CONST (anon) type_id=2 |
85 | * [2] PTR (anon) type_id=0 |
86 | * |
87 | * The above is the btf_verifier debug log: |
88 | * - Each line started with "[?]" is a btf_type object |
89 | * - [?] is the type_id of the btf_type object. |
90 | * - CONST/PTR is the BTF_KIND_XXX |
91 | * - "(anon)" is the name of the type. It just |
92 | * happens that CONST and PTR has no name. |
93 | * - type_id=XXX is the 'u32 type' in btf_type |
94 | * |
95 | * NOTE: "void" has type_id 0 |
96 | * |
97 | * String section: |
98 | * ~~~~~~~~~~~~~~ |
99 | * The BTF string section contains the names used by the type section. |
100 | * Each string is referred by an "offset" from the beginning of the |
101 | * string section. |
102 | * |
103 | * Each string is '\0' terminated. |
104 | * |
105 | * The first character in the string section must be '\0' |
106 | * which is used to mean 'anonymous'. Some btf_type may not |
107 | * have a name. |
108 | */ |
109 | |
110 | /* BTF verification: |
111 | * |
112 | * To verify BTF data, two passes are needed. |
113 | * |
114 | * Pass #1 |
115 | * ~~~~~~~ |
116 | * The first pass is to collect all btf_type objects to |
117 | * an array: "btf->types". |
118 | * |
119 | * Depending on the C type that a btf_type is describing, |
120 | * a btf_type may be followed by extra data. We don't know |
121 | * how many btf_type is there, and more importantly we don't |
122 | * know where each btf_type is located in the type section. |
123 | * |
124 | * Without knowing the location of each type_id, most verifications |
125 | * cannot be done. e.g. an earlier btf_type may refer to a later |
126 | * btf_type (recall the "const void *" above), so we cannot |
127 | * check this type-reference in the first pass. |
128 | * |
129 | * In the first pass, it still does some verifications (e.g. |
130 | * checking the name is a valid offset to the string section). |
131 | * |
132 | * Pass #2 |
133 | * ~~~~~~~ |
134 | * The main focus is to resolve a btf_type that is referring |
135 | * to another type. |
136 | * |
137 | * We have to ensure the referring type: |
138 | * 1) does exist in the BTF (i.e. in btf->types[]) |
139 | * 2) does not cause a loop: |
140 | * struct A { |
141 | * struct B b; |
142 | * }; |
143 | * |
144 | * struct B { |
145 | * struct A a; |
146 | * }; |
147 | * |
148 | * btf_type_needs_resolve() decides if a btf_type needs |
149 | * to be resolved. |
150 | * |
151 | * The needs_resolve type implements the "resolve()" ops which |
152 | * essentially does a DFS and detects backedge. |
153 | * |
154 | * During resolve (or DFS), different C types have different |
155 | * "RESOLVED" conditions. |
156 | * |
157 | * When resolving a BTF_KIND_STRUCT, we need to resolve all its |
158 | * members because a member is always referring to another |
159 | * type. A struct's member can be treated as "RESOLVED" if |
160 | * it is referring to a BTF_KIND_PTR. Otherwise, the |
161 | * following valid C struct would be rejected: |
162 | * |
163 | * struct A { |
164 | * int m; |
165 | * struct A *a; |
166 | * }; |
167 | * |
168 | * When resolving a BTF_KIND_PTR, it needs to keep resolving if |
169 | * it is referring to another BTF_KIND_PTR. Otherwise, we cannot |
170 | * detect a pointer loop, e.g.: |
171 | * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR + |
172 | * ^ | |
173 | * +-----------------------------------------+ |
174 | * |
175 | */ |
176 | |
177 | #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2) |
178 | #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1) |
179 | #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK) |
180 | #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3) |
181 | #define BITS_ROUNDUP_BYTES(bits) \ |
182 | (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits)) |
183 | |
184 | #define BTF_INFO_MASK 0x9f00ffff |
185 | #define BTF_INT_MASK 0x0fffffff |
186 | #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE) |
187 | #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET) |
188 | |
189 | /* 16MB for 64k structs and each has 16 members and |
190 | * a few MB spaces for the string section. |
191 | * The hard limit is S32_MAX. |
192 | */ |
193 | #define BTF_MAX_SIZE (16 * 1024 * 1024) |
194 | |
195 | #define for_each_member_from(i, from, struct_type, member) \ |
196 | for (i = from, member = btf_type_member(struct_type) + from; \ |
197 | i < btf_type_vlen(struct_type); \ |
198 | i++, member++) |
199 | |
200 | #define for_each_vsi_from(i, from, struct_type, member) \ |
201 | for (i = from, member = btf_type_var_secinfo(struct_type) + from; \ |
202 | i < btf_type_vlen(struct_type); \ |
203 | i++, member++) |
204 | |
205 | DEFINE_IDR(btf_idr); |
206 | DEFINE_SPINLOCK(btf_idr_lock); |
207 | |
208 | enum btf_kfunc_hook { |
209 | BTF_KFUNC_HOOK_COMMON, |
210 | BTF_KFUNC_HOOK_XDP, |
211 | BTF_KFUNC_HOOK_TC, |
212 | BTF_KFUNC_HOOK_STRUCT_OPS, |
213 | BTF_KFUNC_HOOK_TRACING, |
214 | BTF_KFUNC_HOOK_SYSCALL, |
215 | BTF_KFUNC_HOOK_FMODRET, |
216 | BTF_KFUNC_HOOK_CGROUP, |
217 | BTF_KFUNC_HOOK_SCHED_ACT, |
218 | BTF_KFUNC_HOOK_SK_SKB, |
219 | BTF_KFUNC_HOOK_SOCKET_FILTER, |
220 | BTF_KFUNC_HOOK_LWT, |
221 | BTF_KFUNC_HOOK_NETFILTER, |
222 | BTF_KFUNC_HOOK_KPROBE, |
223 | BTF_KFUNC_HOOK_MAX, |
224 | }; |
225 | |
226 | enum { |
227 | BTF_KFUNC_SET_MAX_CNT = 256, |
228 | BTF_DTOR_KFUNC_MAX_CNT = 256, |
229 | BTF_KFUNC_FILTER_MAX_CNT = 16, |
230 | }; |
231 | |
232 | struct btf_kfunc_hook_filter { |
233 | btf_kfunc_filter_t filters[BTF_KFUNC_FILTER_MAX_CNT]; |
234 | u32 nr_filters; |
235 | }; |
236 | |
237 | struct btf_kfunc_set_tab { |
238 | struct btf_id_set8 *sets[BTF_KFUNC_HOOK_MAX]; |
239 | struct btf_kfunc_hook_filter hook_filters[BTF_KFUNC_HOOK_MAX]; |
240 | }; |
241 | |
242 | struct btf_id_dtor_kfunc_tab { |
243 | u32 cnt; |
244 | struct btf_id_dtor_kfunc dtors[]; |
245 | }; |
246 | |
247 | struct btf_struct_ops_tab { |
248 | u32 cnt; |
249 | u32 capacity; |
250 | struct bpf_struct_ops_desc ops[]; |
251 | }; |
252 | |
253 | struct btf { |
254 | void *data; |
255 | struct btf_type **types; |
256 | u32 *resolved_ids; |
257 | u32 *resolved_sizes; |
258 | const char *strings; |
259 | void *nohdr_data; |
260 | struct btf_header hdr; |
261 | u32 nr_types; /* includes VOID for base BTF */ |
262 | u32 types_size; |
263 | u32 data_size; |
264 | refcount_t refcnt; |
265 | u32 id; |
266 | struct rcu_head rcu; |
267 | struct btf_kfunc_set_tab *kfunc_set_tab; |
268 | struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab; |
269 | struct btf_struct_metas *struct_meta_tab; |
270 | struct btf_struct_ops_tab *struct_ops_tab; |
271 | |
272 | /* split BTF support */ |
273 | struct btf *base_btf; |
274 | u32 start_id; /* first type ID in this BTF (0 for base BTF) */ |
275 | u32 start_str_off; /* first string offset (0 for base BTF) */ |
276 | char name[MODULE_NAME_LEN]; |
277 | bool kernel_btf; |
278 | __u32 *base_id_map; /* map from distilled base BTF -> vmlinux BTF ids */ |
279 | }; |
280 | |
281 | enum verifier_phase { |
282 | CHECK_META, |
283 | CHECK_TYPE, |
284 | }; |
285 | |
286 | struct resolve_vertex { |
287 | const struct btf_type *t; |
288 | u32 type_id; |
289 | u16 next_member; |
290 | }; |
291 | |
292 | enum visit_state { |
293 | NOT_VISITED, |
294 | VISITED, |
295 | RESOLVED, |
296 | }; |
297 | |
298 | enum resolve_mode { |
299 | RESOLVE_TBD, /* To Be Determined */ |
300 | RESOLVE_PTR, /* Resolving for Pointer */ |
301 | RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union |
302 | * or array |
303 | */ |
304 | }; |
305 | |
306 | #define MAX_RESOLVE_DEPTH 32 |
307 | |
308 | struct btf_sec_info { |
309 | u32 off; |
310 | u32 len; |
311 | }; |
312 | |
313 | struct btf_verifier_env { |
314 | struct btf *btf; |
315 | u8 *visit_states; |
316 | struct resolve_vertex stack[MAX_RESOLVE_DEPTH]; |
317 | struct bpf_verifier_log log; |
318 | u32 log_type_id; |
319 | u32 top_stack; |
320 | enum verifier_phase phase; |
321 | enum resolve_mode resolve_mode; |
322 | }; |
323 | |
324 | static const char * const btf_kind_str[NR_BTF_KINDS] = { |
325 | [BTF_KIND_UNKN] = "UNKNOWN", |
326 | [BTF_KIND_INT] = "INT", |
327 | [BTF_KIND_PTR] = "PTR", |
328 | [BTF_KIND_ARRAY] = "ARRAY", |
329 | [BTF_KIND_STRUCT] = "STRUCT", |
330 | [BTF_KIND_UNION] = "UNION", |
331 | [BTF_KIND_ENUM] = "ENUM", |
332 | [BTF_KIND_FWD] = "FWD", |
333 | [BTF_KIND_TYPEDEF] = "TYPEDEF", |
334 | [BTF_KIND_VOLATILE] = "VOLATILE", |
335 | [BTF_KIND_CONST] = "CONST", |
336 | [BTF_KIND_RESTRICT] = "RESTRICT", |
337 | [BTF_KIND_FUNC] = "FUNC", |
338 | [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO", |
339 | [BTF_KIND_VAR] = "VAR", |
340 | [BTF_KIND_DATASEC] = "DATASEC", |
341 | [BTF_KIND_FLOAT] = "FLOAT", |
342 | [BTF_KIND_DECL_TAG] = "DECL_TAG", |
343 | [BTF_KIND_TYPE_TAG] = "TYPE_TAG", |
344 | [BTF_KIND_ENUM64] = "ENUM64", |
345 | }; |
346 | |
347 | const char *btf_type_str(const struct btf_type *t) |
348 | { |
349 | return btf_kind_str[BTF_INFO_KIND(t->info)]; |
350 | } |
351 | |
352 | /* Chunk size we use in safe copy of data to be shown. */ |
353 | #define BTF_SHOW_OBJ_SAFE_SIZE 32 |
354 | |
355 | /* |
356 | * This is the maximum size of a base type value (equivalent to a |
357 | * 128-bit int); if we are at the end of our safe buffer and have |
358 | * less than 16 bytes space we can't be assured of being able |
359 | * to copy the next type safely, so in such cases we will initiate |
360 | * a new copy. |
361 | */ |
362 | #define BTF_SHOW_OBJ_BASE_TYPE_SIZE 16 |
363 | |
364 | /* Type name size */ |
365 | #define BTF_SHOW_NAME_SIZE 80 |
366 | |
367 | /* |
368 | * The suffix of a type that indicates it cannot alias another type when |
369 | * comparing BTF IDs for kfunc invocations. |
370 | */ |
371 | #define NOCAST_ALIAS_SUFFIX "___init" |
372 | |
373 | /* |
374 | * Common data to all BTF show operations. Private show functions can add |
375 | * their own data to a structure containing a struct btf_show and consult it |
376 | * in the show callback. See btf_type_show() below. |
377 | * |
378 | * One challenge with showing nested data is we want to skip 0-valued |
379 | * data, but in order to figure out whether a nested object is all zeros |
380 | * we need to walk through it. As a result, we need to make two passes |
381 | * when handling structs, unions and arrays; the first path simply looks |
382 | * for nonzero data, while the second actually does the display. The first |
383 | * pass is signalled by show->state.depth_check being set, and if we |
384 | * encounter a non-zero value we set show->state.depth_to_show to |
385 | * the depth at which we encountered it. When we have completed the |
386 | * first pass, we will know if anything needs to be displayed if |
387 | * depth_to_show > depth. See btf_[struct,array]_show() for the |
388 | * implementation of this. |
389 | * |
390 | * Another problem is we want to ensure the data for display is safe to |
391 | * access. To support this, the anonymous "struct {} obj" tracks the data |
392 | * object and our safe copy of it. We copy portions of the data needed |
393 | * to the object "copy" buffer, but because its size is limited to |
394 | * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we |
395 | * traverse larger objects for display. |
396 | * |
397 | * The various data type show functions all start with a call to |
398 | * btf_show_start_type() which returns a pointer to the safe copy |
399 | * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the |
400 | * raw data itself). btf_show_obj_safe() is responsible for |
401 | * using copy_from_kernel_nofault() to update the safe data if necessary |
402 | * as we traverse the object's data. skbuff-like semantics are |
403 | * used: |
404 | * |
405 | * - obj.head points to the start of the toplevel object for display |
406 | * - obj.size is the size of the toplevel object |
407 | * - obj.data points to the current point in the original data at |
408 | * which our safe data starts. obj.data will advance as we copy |
409 | * portions of the data. |
410 | * |
411 | * In most cases a single copy will suffice, but larger data structures |
412 | * such as "struct task_struct" will require many copies. The logic in |
413 | * btf_show_obj_safe() handles the logic that determines if a new |
414 | * copy_from_kernel_nofault() is needed. |
415 | */ |
416 | struct btf_show { |
417 | u64 flags; |
418 | void *target; /* target of show operation (seq file, buffer) */ |
419 | __printf(2, 0) void (*showfn)(struct btf_show *show, const char *fmt, va_list args); |
420 | const struct btf *btf; |
421 | /* below are used during iteration */ |
422 | struct { |
423 | u8 depth; |
424 | u8 depth_to_show; |
425 | u8 depth_check; |
426 | u8 array_member:1, |
427 | array_terminated:1; |
428 | u16 array_encoding; |
429 | u32 type_id; |
430 | int status; /* non-zero for error */ |
431 | const struct btf_type *type; |
432 | const struct btf_member *member; |
433 | char name[BTF_SHOW_NAME_SIZE]; /* space for member name/type */ |
434 | } state; |
435 | struct { |
436 | u32 size; |
437 | void *head; |
438 | void *data; |
439 | u8 safe[BTF_SHOW_OBJ_SAFE_SIZE]; |
440 | } obj; |
441 | }; |
442 | |
443 | struct btf_kind_operations { |
444 | s32 (*check_meta)(struct btf_verifier_env *env, |
445 | const struct btf_type *t, |
446 | u32 meta_left); |
447 | int (*resolve)(struct btf_verifier_env *env, |
448 | const struct resolve_vertex *v); |
449 | int (*check_member)(struct btf_verifier_env *env, |
450 | const struct btf_type *struct_type, |
451 | const struct btf_member *member, |
452 | const struct btf_type *member_type); |
453 | int (*check_kflag_member)(struct btf_verifier_env *env, |
454 | const struct btf_type *struct_type, |
455 | const struct btf_member *member, |
456 | const struct btf_type *member_type); |
457 | void (*log_details)(struct btf_verifier_env *env, |
458 | const struct btf_type *t); |
459 | void (*show)(const struct btf *btf, const struct btf_type *t, |
460 | u32 type_id, void *data, u8 bits_offsets, |
461 | struct btf_show *show); |
462 | }; |
463 | |
464 | static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS]; |
465 | static struct btf_type btf_void; |
466 | |
467 | static int btf_resolve(struct btf_verifier_env *env, |
468 | const struct btf_type *t, u32 type_id); |
469 | |
470 | static int btf_func_check(struct btf_verifier_env *env, |
471 | const struct btf_type *t); |
472 | |
473 | static bool btf_type_is_modifier(const struct btf_type *t) |
474 | { |
475 | /* Some of them is not strictly a C modifier |
476 | * but they are grouped into the same bucket |
477 | * for BTF concern: |
478 | * A type (t) that refers to another |
479 | * type through t->type AND its size cannot |
480 | * be determined without following the t->type. |
481 | * |
482 | * ptr does not fall into this bucket |
483 | * because its size is always sizeof(void *). |
484 | */ |
485 | switch (BTF_INFO_KIND(t->info)) { |
486 | case BTF_KIND_TYPEDEF: |
487 | case BTF_KIND_VOLATILE: |
488 | case BTF_KIND_CONST: |
489 | case BTF_KIND_RESTRICT: |
490 | case BTF_KIND_TYPE_TAG: |
491 | return true; |
492 | } |
493 | |
494 | return false; |
495 | } |
496 | |
497 | bool btf_type_is_void(const struct btf_type *t) |
498 | { |
499 | return t == &btf_void; |
500 | } |
501 | |
502 | static bool btf_type_is_datasec(const struct btf_type *t) |
503 | { |
504 | return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC; |
505 | } |
506 | |
507 | static bool btf_type_is_decl_tag(const struct btf_type *t) |
508 | { |
509 | return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG; |
510 | } |
511 | |
512 | static bool btf_type_nosize(const struct btf_type *t) |
513 | { |
514 | return btf_type_is_void(t) || btf_type_is_fwd(t) || |
515 | btf_type_is_func(t) || btf_type_is_func_proto(t) || |
516 | btf_type_is_decl_tag(t); |
517 | } |
518 | |
519 | static bool btf_type_nosize_or_null(const struct btf_type *t) |
520 | { |
521 | return !t || btf_type_nosize(t); |
522 | } |
523 | |
524 | static bool btf_type_is_decl_tag_target(const struct btf_type *t) |
525 | { |
526 | return btf_type_is_func(t) || btf_type_is_struct(t) || |
527 | btf_type_is_var(t) || btf_type_is_typedef(t); |
528 | } |
529 | |
530 | bool btf_is_vmlinux(const struct btf *btf) |
531 | { |
532 | return btf->kernel_btf && !btf->base_btf; |
533 | } |
534 | |
535 | u32 btf_nr_types(const struct btf *btf) |
536 | { |
537 | u32 total = 0; |
538 | |
539 | while (btf) { |
540 | total += btf->nr_types; |
541 | btf = btf->base_btf; |
542 | } |
543 | |
544 | return total; |
545 | } |
546 | |
547 | s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind) |
548 | { |
549 | const struct btf_type *t; |
550 | const char *tname; |
551 | u32 i, total; |
552 | |
553 | total = btf_nr_types(btf); |
554 | for (i = 1; i < total; i++) { |
555 | t = btf_type_by_id(btf, type_id: i); |
556 | if (BTF_INFO_KIND(t->info) != kind) |
557 | continue; |
558 | |
559 | tname = btf_name_by_offset(btf, offset: t->name_off); |
560 | if (!strcmp(tname, name)) |
561 | return i; |
562 | } |
563 | |
564 | return -ENOENT; |
565 | } |
566 | |
567 | s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p) |
568 | { |
569 | struct btf *btf; |
570 | s32 ret; |
571 | int id; |
572 | |
573 | btf = bpf_get_btf_vmlinux(); |
574 | if (IS_ERR(ptr: btf)) |
575 | return PTR_ERR(ptr: btf); |
576 | if (!btf) |
577 | return -EINVAL; |
578 | |
579 | ret = btf_find_by_name_kind(btf, name, kind); |
580 | /* ret is never zero, since btf_find_by_name_kind returns |
581 | * positive btf_id or negative error. |
582 | */ |
583 | if (ret > 0) { |
584 | btf_get(btf); |
585 | *btf_p = btf; |
586 | return ret; |
587 | } |
588 | |
589 | /* If name is not found in vmlinux's BTF then search in module's BTFs */ |
590 | spin_lock_bh(lock: &btf_idr_lock); |
591 | idr_for_each_entry(&btf_idr, btf, id) { |
592 | if (!btf_is_module(btf)) |
593 | continue; |
594 | /* linear search could be slow hence unlock/lock |
595 | * the IDR to avoiding holding it for too long |
596 | */ |
597 | btf_get(btf); |
598 | spin_unlock_bh(lock: &btf_idr_lock); |
599 | ret = btf_find_by_name_kind(btf, name, kind); |
600 | if (ret > 0) { |
601 | *btf_p = btf; |
602 | return ret; |
603 | } |
604 | btf_put(btf); |
605 | spin_lock_bh(lock: &btf_idr_lock); |
606 | } |
607 | spin_unlock_bh(lock: &btf_idr_lock); |
608 | return ret; |
609 | } |
610 | EXPORT_SYMBOL_GPL(bpf_find_btf_id); |
611 | |
612 | const struct btf_type *btf_type_skip_modifiers(const struct btf *btf, |
613 | u32 id, u32 *res_id) |
614 | { |
615 | const struct btf_type *t = btf_type_by_id(btf, type_id: id); |
616 | |
617 | while (btf_type_is_modifier(t)) { |
618 | id = t->type; |
619 | t = btf_type_by_id(btf, type_id: t->type); |
620 | } |
621 | |
622 | if (res_id) |
623 | *res_id = id; |
624 | |
625 | return t; |
626 | } |
627 | |
628 | const struct btf_type *btf_type_resolve_ptr(const struct btf *btf, |
629 | u32 id, u32 *res_id) |
630 | { |
631 | const struct btf_type *t; |
632 | |
633 | t = btf_type_skip_modifiers(btf, id, NULL); |
634 | if (!btf_type_is_ptr(t)) |
635 | return NULL; |
636 | |
637 | return btf_type_skip_modifiers(btf, id: t->type, res_id); |
638 | } |
639 | |
640 | const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf, |
641 | u32 id, u32 *res_id) |
642 | { |
643 | const struct btf_type *ptype; |
644 | |
645 | ptype = btf_type_resolve_ptr(btf, id, res_id); |
646 | if (ptype && btf_type_is_func_proto(t: ptype)) |
647 | return ptype; |
648 | |
649 | return NULL; |
650 | } |
651 | |
652 | /* Types that act only as a source, not sink or intermediate |
653 | * type when resolving. |
654 | */ |
655 | static bool btf_type_is_resolve_source_only(const struct btf_type *t) |
656 | { |
657 | return btf_type_is_var(t) || |
658 | btf_type_is_decl_tag(t) || |
659 | btf_type_is_datasec(t); |
660 | } |
661 | |
662 | /* What types need to be resolved? |
663 | * |
664 | * btf_type_is_modifier() is an obvious one. |
665 | * |
666 | * btf_type_is_struct() because its member refers to |
667 | * another type (through member->type). |
668 | * |
669 | * btf_type_is_var() because the variable refers to |
670 | * another type. btf_type_is_datasec() holds multiple |
671 | * btf_type_is_var() types that need resolving. |
672 | * |
673 | * btf_type_is_array() because its element (array->type) |
674 | * refers to another type. Array can be thought of a |
675 | * special case of struct while array just has the same |
676 | * member-type repeated by array->nelems of times. |
677 | */ |
678 | static bool btf_type_needs_resolve(const struct btf_type *t) |
679 | { |
680 | return btf_type_is_modifier(t) || |
681 | btf_type_is_ptr(t) || |
682 | btf_type_is_struct(t) || |
683 | btf_type_is_array(t) || |
684 | btf_type_is_var(t) || |
685 | btf_type_is_func(t) || |
686 | btf_type_is_decl_tag(t) || |
687 | btf_type_is_datasec(t); |
688 | } |
689 | |
690 | /* t->size can be used */ |
691 | static bool btf_type_has_size(const struct btf_type *t) |
692 | { |
693 | switch (BTF_INFO_KIND(t->info)) { |
694 | case BTF_KIND_INT: |
695 | case BTF_KIND_STRUCT: |
696 | case BTF_KIND_UNION: |
697 | case BTF_KIND_ENUM: |
698 | case BTF_KIND_DATASEC: |
699 | case BTF_KIND_FLOAT: |
700 | case BTF_KIND_ENUM64: |
701 | return true; |
702 | } |
703 | |
704 | return false; |
705 | } |
706 | |
707 | static const char *btf_int_encoding_str(u8 encoding) |
708 | { |
709 | if (encoding == 0) |
710 | return "(none)"; |
711 | else if (encoding == BTF_INT_SIGNED) |
712 | return "SIGNED"; |
713 | else if (encoding == BTF_INT_CHAR) |
714 | return "CHAR"; |
715 | else if (encoding == BTF_INT_BOOL) |
716 | return "BOOL"; |
717 | else |
718 | return "UNKN"; |
719 | } |
720 | |
721 | static u32 btf_type_int(const struct btf_type *t) |
722 | { |
723 | return *(u32 *)(t + 1); |
724 | } |
725 | |
726 | static const struct btf_array *btf_type_array(const struct btf_type *t) |
727 | { |
728 | return (const struct btf_array *)(t + 1); |
729 | } |
730 | |
731 | static const struct btf_enum *btf_type_enum(const struct btf_type *t) |
732 | { |
733 | return (const struct btf_enum *)(t + 1); |
734 | } |
735 | |
736 | static const struct btf_var *btf_type_var(const struct btf_type *t) |
737 | { |
738 | return (const struct btf_var *)(t + 1); |
739 | } |
740 | |
741 | static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t) |
742 | { |
743 | return (const struct btf_decl_tag *)(t + 1); |
744 | } |
745 | |
746 | static const struct btf_enum64 *btf_type_enum64(const struct btf_type *t) |
747 | { |
748 | return (const struct btf_enum64 *)(t + 1); |
749 | } |
750 | |
751 | static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) |
752 | { |
753 | return kind_ops[BTF_INFO_KIND(t->info)]; |
754 | } |
755 | |
756 | static bool btf_name_offset_valid(const struct btf *btf, u32 offset) |
757 | { |
758 | if (!BTF_STR_OFFSET_VALID(offset)) |
759 | return false; |
760 | |
761 | while (offset < btf->start_str_off) |
762 | btf = btf->base_btf; |
763 | |
764 | offset -= btf->start_str_off; |
765 | return offset < btf->hdr.str_len; |
766 | } |
767 | |
768 | static bool __btf_name_char_ok(char c, bool first) |
769 | { |
770 | if ((first ? !isalpha(c) : |
771 | !isalnum(c)) && |
772 | c != '_' && |
773 | c != '.') |
774 | return false; |
775 | return true; |
776 | } |
777 | |
778 | const char *btf_str_by_offset(const struct btf *btf, u32 offset) |
779 | { |
780 | while (offset < btf->start_str_off) |
781 | btf = btf->base_btf; |
782 | |
783 | offset -= btf->start_str_off; |
784 | if (offset < btf->hdr.str_len) |
785 | return &btf->strings[offset]; |
786 | |
787 | return NULL; |
788 | } |
789 | |
790 | static bool btf_name_valid_identifier(const struct btf *btf, u32 offset) |
791 | { |
792 | /* offset must be valid */ |
793 | const char *src = btf_str_by_offset(btf, offset); |
794 | const char *src_limit; |
795 | |
796 | if (!__btf_name_char_ok(c: *src, first: true)) |
797 | return false; |
798 | |
799 | /* set a limit on identifier length */ |
800 | src_limit = src + KSYM_NAME_LEN; |
801 | src++; |
802 | while (*src && src < src_limit) { |
803 | if (!__btf_name_char_ok(c: *src, first: false)) |
804 | return false; |
805 | src++; |
806 | } |
807 | |
808 | return !*src; |
809 | } |
810 | |
811 | /* Allow any printable character in DATASEC names */ |
812 | static bool btf_name_valid_section(const struct btf *btf, u32 offset) |
813 | { |
814 | /* offset must be valid */ |
815 | const char *src = btf_str_by_offset(btf, offset); |
816 | const char *src_limit; |
817 | |
818 | if (!*src) |
819 | return false; |
820 | |
821 | /* set a limit on identifier length */ |
822 | src_limit = src + KSYM_NAME_LEN; |
823 | while (*src && src < src_limit) { |
824 | if (!isprint(*src)) |
825 | return false; |
826 | src++; |
827 | } |
828 | |
829 | return !*src; |
830 | } |
831 | |
832 | static const char *__btf_name_by_offset(const struct btf *btf, u32 offset) |
833 | { |
834 | const char *name; |
835 | |
836 | if (!offset) |
837 | return "(anon)"; |
838 | |
839 | name = btf_str_by_offset(btf, offset); |
840 | return name ?: "(invalid-name-offset)"; |
841 | } |
842 | |
843 | const char *btf_name_by_offset(const struct btf *btf, u32 offset) |
844 | { |
845 | return btf_str_by_offset(btf, offset); |
846 | } |
847 | |
848 | const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) |
849 | { |
850 | while (type_id < btf->start_id) |
851 | btf = btf->base_btf; |
852 | |
853 | type_id -= btf->start_id; |
854 | if (type_id >= btf->nr_types) |
855 | return NULL; |
856 | return btf->types[type_id]; |
857 | } |
858 | EXPORT_SYMBOL_GPL(btf_type_by_id); |
859 | |
860 | /* |
861 | * Regular int is not a bit field and it must be either |
862 | * u8/u16/u32/u64 or __int128. |
863 | */ |
864 | static bool btf_type_int_is_regular(const struct btf_type *t) |
865 | { |
866 | u8 nr_bits, nr_bytes; |
867 | u32 int_data; |
868 | |
869 | int_data = btf_type_int(t); |
870 | nr_bits = BTF_INT_BITS(int_data); |
871 | nr_bytes = BITS_ROUNDUP_BYTES(nr_bits); |
872 | if (BITS_PER_BYTE_MASKED(nr_bits) || |
873 | BTF_INT_OFFSET(int_data) || |
874 | (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) && |
875 | nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) && |
876 | nr_bytes != (2 * sizeof(u64)))) { |
877 | return false; |
878 | } |
879 | |
880 | return true; |
881 | } |
882 | |
883 | /* |
884 | * Check that given struct member is a regular int with expected |
885 | * offset and size. |
886 | */ |
887 | bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, |
888 | const struct btf_member *m, |
889 | u32 expected_offset, u32 expected_size) |
890 | { |
891 | const struct btf_type *t; |
892 | u32 id, int_data; |
893 | u8 nr_bits; |
894 | |
895 | id = m->type; |
896 | t = btf_type_id_size(btf, type_id: &id, NULL); |
897 | if (!t || !btf_type_is_int(t)) |
898 | return false; |
899 | |
900 | int_data = btf_type_int(t); |
901 | nr_bits = BTF_INT_BITS(int_data); |
902 | if (btf_type_kflag(t: s)) { |
903 | u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset); |
904 | u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset); |
905 | |
906 | /* if kflag set, int should be a regular int and |
907 | * bit offset should be at byte boundary. |
908 | */ |
909 | return !bitfield_size && |
910 | BITS_ROUNDUP_BYTES(bit_offset) == expected_offset && |
911 | BITS_ROUNDUP_BYTES(nr_bits) == expected_size; |
912 | } |
913 | |
914 | if (BTF_INT_OFFSET(int_data) || |
915 | BITS_PER_BYTE_MASKED(m->offset) || |
916 | BITS_ROUNDUP_BYTES(m->offset) != expected_offset || |
917 | BITS_PER_BYTE_MASKED(nr_bits) || |
918 | BITS_ROUNDUP_BYTES(nr_bits) != expected_size) |
919 | return false; |
920 | |
921 | return true; |
922 | } |
923 | |
924 | /* Similar to btf_type_skip_modifiers() but does not skip typedefs. */ |
925 | static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf, |
926 | u32 id) |
927 | { |
928 | const struct btf_type *t = btf_type_by_id(btf, id); |
929 | |
930 | while (btf_type_is_modifier(t) && |
931 | BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) { |
932 | t = btf_type_by_id(btf, t->type); |
933 | } |
934 | |
935 | return t; |
936 | } |
937 | |
938 | #define BTF_SHOW_MAX_ITER 10 |
939 | |
940 | #define BTF_KIND_BIT(kind) (1ULL << kind) |
941 | |
942 | /* |
943 | * Populate show->state.name with type name information. |
944 | * Format of type name is |
945 | * |
946 | * [.member_name = ] (type_name) |
947 | */ |
948 | static const char *btf_show_name(struct btf_show *show) |
949 | { |
950 | /* BTF_MAX_ITER array suffixes "[]" */ |
951 | const char *array_suffixes = "[][][][][][][][][][]"; |
952 | const char *array_suffix = &array_suffixes[strlen(array_suffixes)]; |
953 | /* BTF_MAX_ITER pointer suffixes "*" */ |
954 | const char *ptr_suffixes = "**********"; |
955 | const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)]; |
956 | const char *name = NULL, *prefix = "", *parens = ""; |
957 | const struct btf_member *m = show->state.member; |
958 | const struct btf_type *t; |
959 | const struct btf_array *array; |
960 | u32 id = show->state.type_id; |
961 | const char *member = NULL; |
962 | bool show_member = false; |
963 | u64 kinds = 0; |
964 | int i; |
965 | |
966 | show->state.name[0] = '\0'; |
967 | |
968 | /* |
969 | * Don't show type name if we're showing an array member; |
970 | * in that case we show the array type so don't need to repeat |
971 | * ourselves for each member. |
972 | */ |
973 | if (show->state.array_member) |
974 | return ""; |
975 | |
976 | /* Retrieve member name, if any. */ |
977 | if (m) { |
978 | member = btf_name_by_offset(btf: show->btf, offset: m->name_off); |
979 | show_member = strlen(member) > 0; |
980 | id = m->type; |
981 | } |
982 | |
983 | /* |
984 | * Start with type_id, as we have resolved the struct btf_type * |
985 | * via btf_modifier_show() past the parent typedef to the child |
986 | * struct, int etc it is defined as. In such cases, the type_id |
987 | * still represents the starting type while the struct btf_type * |
988 | * in our show->state points at the resolved type of the typedef. |
989 | */ |
990 | t = btf_type_by_id(show->btf, id); |
991 | if (!t) |
992 | return ""; |
993 | |
994 | /* |
995 | * The goal here is to build up the right number of pointer and |
996 | * array suffixes while ensuring the type name for a typedef |
997 | * is represented. Along the way we accumulate a list of |
998 | * BTF kinds we have encountered, since these will inform later |
999 | * display; for example, pointer types will not require an |
1000 | * opening "{" for struct, we will just display the pointer value. |
1001 | * |
1002 | * We also want to accumulate the right number of pointer or array |
1003 | * indices in the format string while iterating until we get to |
1004 | * the typedef/pointee/array member target type. |
1005 | * |
1006 | * We start by pointing at the end of pointer and array suffix |
1007 | * strings; as we accumulate pointers and arrays we move the pointer |
1008 | * or array string backwards so it will show the expected number of |
1009 | * '*' or '[]' for the type. BTF_SHOW_MAX_ITER of nesting of pointers |
1010 | * and/or arrays and typedefs are supported as a precaution. |
1011 | * |
1012 | * We also want to get typedef name while proceeding to resolve |
1013 | * type it points to so that we can add parentheses if it is a |
1014 | * "typedef struct" etc. |
1015 | */ |
1016 | for (i = 0; i < BTF_SHOW_MAX_ITER; i++) { |
1017 | |
1018 | switch (BTF_INFO_KIND(t->info)) { |
1019 | case BTF_KIND_TYPEDEF: |
1020 | if (!name) |
1021 | name = btf_name_by_offset(btf: show->btf, |
1022 | offset: t->name_off); |
1023 | kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF); |
1024 | id = t->type; |
1025 | break; |
1026 | case BTF_KIND_ARRAY: |
1027 | kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY); |
1028 | parens = "["; |
1029 | if (!t) |
1030 | return ""; |
1031 | array = btf_type_array(t); |
1032 | if (array_suffix > array_suffixes) |
1033 | array_suffix -= 2; |
1034 | id = array->type; |
1035 | break; |
1036 | case BTF_KIND_PTR: |
1037 | kinds |= BTF_KIND_BIT(BTF_KIND_PTR); |
1038 | if (ptr_suffix > ptr_suffixes) |
1039 | ptr_suffix -= 1; |
1040 | id = t->type; |
1041 | break; |
1042 | default: |
1043 | id = 0; |
1044 | break; |
1045 | } |
1046 | if (!id) |
1047 | break; |
1048 | t = btf_type_skip_qualifiers(btf: show->btf, id); |
1049 | } |
1050 | /* We may not be able to represent this type; bail to be safe */ |
1051 | if (i == BTF_SHOW_MAX_ITER) |
1052 | return ""; |
1053 | |
1054 | if (!name) |
1055 | name = btf_name_by_offset(btf: show->btf, offset: t->name_off); |
1056 | |
1057 | switch (BTF_INFO_KIND(t->info)) { |
1058 | case BTF_KIND_STRUCT: |
1059 | case BTF_KIND_UNION: |
1060 | prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ? |
1061 | "struct": "union"; |
1062 | /* if it's an array of struct/union, parens is already set */ |
1063 | if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY)))) |
1064 | parens = "{"; |
1065 | break; |
1066 | case BTF_KIND_ENUM: |
1067 | case BTF_KIND_ENUM64: |
1068 | prefix = "enum"; |
1069 | break; |
1070 | default: |
1071 | break; |
1072 | } |
1073 | |
1074 | /* pointer does not require parens */ |
1075 | if (kinds & BTF_KIND_BIT(BTF_KIND_PTR)) |
1076 | parens = ""; |
1077 | /* typedef does not require struct/union/enum prefix */ |
1078 | if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF)) |
1079 | prefix = ""; |
1080 | |
1081 | if (!name) |
1082 | name = ""; |
1083 | |
1084 | /* Even if we don't want type name info, we want parentheses etc */ |
1085 | if (show->flags & BTF_SHOW_NONAME) |
1086 | snprintf(buf: show->state.name, size: sizeof(show->state.name), fmt: "%s", |
1087 | parens); |
1088 | else |
1089 | snprintf(buf: show->state.name, size: sizeof(show->state.name), |
1090 | fmt: "%s%s%s(%s%s%s%s%s%s)%s", |
1091 | /* first 3 strings comprise ".member = " */ |
1092 | show_member ? ".": "", |
1093 | show_member ? member : "", |
1094 | show_member ? " = ": "", |
1095 | /* ...next is our prefix (struct, enum, etc) */ |
1096 | prefix, |
1097 | strlen(prefix) > 0 && strlen(name) > 0 ? " ": "", |
1098 | /* ...this is the type name itself */ |
1099 | name, |
1100 | /* ...suffixed by the appropriate '*', '[]' suffixes */ |
1101 | strlen(ptr_suffix) > 0 ? " ": "", ptr_suffix, |
1102 | array_suffix, parens); |
1103 | |
1104 | return show->state.name; |
1105 | } |
1106 | |
1107 | static const char *__btf_show_indent(struct btf_show *show) |
1108 | { |
1109 | const char *indents = " "; |
1110 | const char *indent = &indents[strlen(indents)]; |
1111 | |
1112 | if ((indent - show->state.depth) >= indents) |
1113 | return indent - show->state.depth; |
1114 | return indents; |
1115 | } |
1116 | |
1117 | static const char *btf_show_indent(struct btf_show *show) |
1118 | { |
1119 | return show->flags & BTF_SHOW_COMPACT ? "": __btf_show_indent(show); |
1120 | } |
1121 | |
1122 | static const char *btf_show_newline(struct btf_show *show) |
1123 | { |
1124 | return show->flags & BTF_SHOW_COMPACT ? "": "\n"; |
1125 | } |
1126 | |
1127 | static const char *btf_show_delim(struct btf_show *show) |
1128 | { |
1129 | if (show->state.depth == 0) |
1130 | return ""; |
1131 | |
1132 | if ((show->flags & BTF_SHOW_COMPACT) && show->state.type && |
1133 | BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION) |
1134 | return "|"; |
1135 | |
1136 | return ","; |
1137 | } |
1138 | |
1139 | __printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...) |
1140 | { |
1141 | va_list args; |
1142 | |
1143 | if (!show->state.depth_check) { |
1144 | va_start(args, fmt); |
1145 | show->showfn(show, fmt, args); |
1146 | va_end(args); |
1147 | } |
1148 | } |
1149 | |
1150 | /* Macros are used here as btf_show_type_value[s]() prepends and appends |
1151 | * format specifiers to the format specifier passed in; these do the work of |
1152 | * adding indentation, delimiters etc while the caller simply has to specify |
1153 | * the type value(s) in the format specifier + value(s). |
1154 | */ |
1155 | #define btf_show_type_value(show, fmt, value) \ |
1156 | do { \ |
1157 | if ((value) != (__typeof__(value))0 || \ |
1158 | (show->flags & BTF_SHOW_ZERO) || \ |
1159 | show->state.depth == 0) { \ |
1160 | btf_show(show, "%s%s" fmt "%s%s", \ |
1161 | btf_show_indent(show), \ |
1162 | btf_show_name(show), \ |
1163 | value, btf_show_delim(show), \ |
1164 | btf_show_newline(show)); \ |
1165 | if (show->state.depth > show->state.depth_to_show) \ |
1166 | show->state.depth_to_show = show->state.depth; \ |
1167 | } \ |
1168 | } while (0) |
1169 | |
1170 | #define btf_show_type_values(show, fmt, ...) \ |
1171 | do { \ |
1172 | btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show), \ |
1173 | btf_show_name(show), \ |
1174 | __VA_ARGS__, btf_show_delim(show), \ |
1175 | btf_show_newline(show)); \ |
1176 | if (show->state.depth > show->state.depth_to_show) \ |
1177 | show->state.depth_to_show = show->state.depth; \ |
1178 | } while (0) |
1179 | |
1180 | /* How much is left to copy to safe buffer after @data? */ |
1181 | static int btf_show_obj_size_left(struct btf_show *show, void *data) |
1182 | { |
1183 | return show->obj.head + show->obj.size - data; |
1184 | } |
1185 | |
1186 | /* Is object pointed to by @data of @size already copied to our safe buffer? */ |
1187 | static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size) |
1188 | { |
1189 | return data >= show->obj.data && |
1190 | (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE); |
1191 | } |
1192 | |
1193 | /* |
1194 | * If object pointed to by @data of @size falls within our safe buffer, return |
1195 | * the equivalent pointer to the same safe data. Assumes |
1196 | * copy_from_kernel_nofault() has already happened and our safe buffer is |
1197 | * populated. |
1198 | */ |
1199 | static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size) |
1200 | { |
1201 | if (btf_show_obj_is_safe(show, data, size)) |
1202 | return show->obj.safe + (data - show->obj.data); |
1203 | return NULL; |
1204 | } |
1205 | |
1206 | /* |
1207 | * Return a safe-to-access version of data pointed to by @data. |
1208 | * We do this by copying the relevant amount of information |
1209 | * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault(). |
1210 | * |
1211 | * If BTF_SHOW_UNSAFE is specified, just return data as-is; no |
1212 | * safe copy is needed. |
1213 | * |
1214 | * Otherwise we need to determine if we have the required amount |
1215 | * of data (determined by the @data pointer and the size of the |
1216 | * largest base type we can encounter (represented by |
1217 | * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures |
1218 | * that we will be able to print some of the current object, |
1219 | * and if more is needed a copy will be triggered. |
1220 | * Some objects such as structs will not fit into the buffer; |
1221 | * in such cases additional copies when we iterate over their |
1222 | * members may be needed. |
1223 | * |
1224 | * btf_show_obj_safe() is used to return a safe buffer for |
1225 | * btf_show_start_type(); this ensures that as we recurse into |
1226 | * nested types we always have safe data for the given type. |
1227 | * This approach is somewhat wasteful; it's possible for example |
1228 | * that when iterating over a large union we'll end up copying the |
1229 | * same data repeatedly, but the goal is safety not performance. |
1230 | * We use stack data as opposed to per-CPU buffers because the |
1231 | * iteration over a type can take some time, and preemption handling |
1232 | * would greatly complicate use of the safe buffer. |
1233 | */ |
1234 | static void *btf_show_obj_safe(struct btf_show *show, |
1235 | const struct btf_type *t, |
1236 | void *data) |
1237 | { |
1238 | const struct btf_type *rt; |
1239 | int size_left, size; |
1240 | void *safe = NULL; |
1241 | |
1242 | if (show->flags & BTF_SHOW_UNSAFE) |
1243 | return data; |
1244 | |
1245 | rt = btf_resolve_size(btf: show->btf, type: t, type_size: &size); |
1246 | if (IS_ERR(ptr: rt)) { |
1247 | show->state.status = PTR_ERR(ptr: rt); |
1248 | return NULL; |
1249 | } |
1250 | |
1251 | /* |
1252 | * Is this toplevel object? If so, set total object size and |
1253 | * initialize pointers. Otherwise check if we still fall within |
1254 | * our safe object data. |
1255 | */ |
1256 | if (show->state.depth == 0) { |
1257 | show->obj.size = size; |
1258 | show->obj.head = data; |
1259 | } else { |
1260 | /* |
1261 | * If the size of the current object is > our remaining |
1262 | * safe buffer we _may_ need to do a new copy. However |
1263 | * consider the case of a nested struct; it's size pushes |
1264 | * us over the safe buffer limit, but showing any individual |
1265 | * struct members does not. In such cases, we don't need |
1266 | * to initiate a fresh copy yet; however we definitely need |
1267 | * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left |
1268 | * in our buffer, regardless of the current object size. |
1269 | * The logic here is that as we resolve types we will |
1270 | * hit a base type at some point, and we need to be sure |
1271 | * the next chunk of data is safely available to display |
1272 | * that type info safely. We cannot rely on the size of |
1273 | * the current object here because it may be much larger |
1274 | * than our current buffer (e.g. task_struct is 8k). |
1275 | * All we want to do here is ensure that we can print the |
1276 | * next basic type, which we can if either |
1277 | * - the current type size is within the safe buffer; or |
1278 | * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in |
1279 | * the safe buffer. |
1280 | */ |
1281 | safe = __btf_show_obj_safe(show, data, |
1282 | min(size, |
1283 | BTF_SHOW_OBJ_BASE_TYPE_SIZE)); |
1284 | } |
1285 | |
1286 | /* |
1287 | * We need a new copy to our safe object, either because we haven't |
1288 | * yet copied and are initializing safe data, or because the data |
1289 | * we want falls outside the boundaries of the safe object. |
1290 | */ |
1291 | if (!safe) { |
1292 | size_left = btf_show_obj_size_left(show, data); |
1293 | if (size_left > BTF_SHOW_OBJ_SAFE_SIZE) |
1294 | size_left = BTF_SHOW_OBJ_SAFE_SIZE; |
1295 | show->state.status = copy_from_kernel_nofault(dst: show->obj.safe, |
1296 | src: data, size: size_left); |
1297 | if (!show->state.status) { |
1298 | show->obj.data = data; |
1299 | safe = show->obj.safe; |
1300 | } |
1301 | } |
1302 | |
1303 | return safe; |
1304 | } |
1305 | |
1306 | /* |
1307 | * Set the type we are starting to show and return a safe data pointer |
1308 | * to be used for showing the associated data. |
1309 | */ |
1310 | static void *btf_show_start_type(struct btf_show *show, |
1311 | const struct btf_type *t, |
1312 | u32 type_id, void *data) |
1313 | { |
1314 | show->state.type = t; |
1315 | show->state.type_id = type_id; |
1316 | show->state.name[0] = '\0'; |
1317 | |
1318 | return btf_show_obj_safe(show, t, data); |
1319 | } |
1320 | |
1321 | static void btf_show_end_type(struct btf_show *show) |
1322 | { |
1323 | show->state.type = NULL; |
1324 | show->state.type_id = 0; |
1325 | show->state.name[0] = '\0'; |
1326 | } |
1327 | |
1328 | static void *btf_show_start_aggr_type(struct btf_show *show, |
1329 | const struct btf_type *t, |
1330 | u32 type_id, void *data) |
1331 | { |
1332 | void *safe_data = btf_show_start_type(show, t, type_id, data); |
1333 | |
1334 | if (!safe_data) |
1335 | return safe_data; |
1336 | |
1337 | btf_show(show, fmt: "%s%s%s", btf_show_indent(show), |
1338 | btf_show_name(show), |
1339 | btf_show_newline(show)); |
1340 | show->state.depth++; |
1341 | return safe_data; |
1342 | } |
1343 | |
1344 | static void btf_show_end_aggr_type(struct btf_show *show, |
1345 | const char *suffix) |
1346 | { |
1347 | show->state.depth--; |
1348 | btf_show(show, fmt: "%s%s%s%s", btf_show_indent(show), suffix, |
1349 | btf_show_delim(show), btf_show_newline(show)); |
1350 | btf_show_end_type(show); |
1351 | } |
1352 | |
1353 | static void btf_show_start_member(struct btf_show *show, |
1354 | const struct btf_member *m) |
1355 | { |
1356 | show->state.member = m; |
1357 | } |
1358 | |
1359 | static void btf_show_start_array_member(struct btf_show *show) |
1360 | { |
1361 | show->state.array_member = 1; |
1362 | btf_show_start_member(show, NULL); |
1363 | } |
1364 | |
1365 | static void btf_show_end_member(struct btf_show *show) |
1366 | { |
1367 | show->state.member = NULL; |
1368 | } |
1369 | |
1370 | static void btf_show_end_array_member(struct btf_show *show) |
1371 | { |
1372 | show->state.array_member = 0; |
1373 | btf_show_end_member(show); |
1374 | } |
1375 | |
1376 | static void *btf_show_start_array_type(struct btf_show *show, |
1377 | const struct btf_type *t, |
1378 | u32 type_id, |
1379 | u16 array_encoding, |
1380 | void *data) |
1381 | { |
1382 | show->state.array_encoding = array_encoding; |
1383 | show->state.array_terminated = 0; |
1384 | return btf_show_start_aggr_type(show, t, type_id, data); |
1385 | } |
1386 | |
1387 | static void btf_show_end_array_type(struct btf_show *show) |
1388 | { |
1389 | show->state.array_encoding = 0; |
1390 | show->state.array_terminated = 0; |
1391 | btf_show_end_aggr_type(show, suffix: "]"); |
1392 | } |
1393 | |
1394 | static void *btf_show_start_struct_type(struct btf_show *show, |
1395 | const struct btf_type *t, |
1396 | u32 type_id, |
1397 | void *data) |
1398 | { |
1399 | return btf_show_start_aggr_type(show, t, type_id, data); |
1400 | } |
1401 | |
1402 | static void btf_show_end_struct_type(struct btf_show *show) |
1403 | { |
1404 | btf_show_end_aggr_type(show, suffix: "}"); |
1405 | } |
1406 | |
1407 | __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log, |
1408 | const char *fmt, ...) |
1409 | { |
1410 | va_list args; |
1411 | |
1412 | va_start(args, fmt); |
1413 | bpf_verifier_vlog(log, fmt, args); |
1414 | va_end(args); |
1415 | } |
1416 | |
1417 | __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env, |
1418 | const char *fmt, ...) |
1419 | { |
1420 | struct bpf_verifier_log *log = &env->log; |
1421 | va_list args; |
1422 | |
1423 | if (!bpf_verifier_log_needed(log)) |
1424 | return; |
1425 | |
1426 | va_start(args, fmt); |
1427 | bpf_verifier_vlog(log, fmt, args); |
1428 | va_end(args); |
1429 | } |
1430 | |
1431 | __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env, |
1432 | const struct btf_type *t, |
1433 | bool log_details, |
1434 | const char *fmt, ...) |
1435 | { |
1436 | struct bpf_verifier_log *log = &env->log; |
1437 | struct btf *btf = env->btf; |
1438 | va_list args; |
1439 | |
1440 | if (!bpf_verifier_log_needed(log)) |
1441 | return; |
1442 | |
1443 | if (log->level == BPF_LOG_KERNEL) { |
1444 | /* btf verifier prints all types it is processing via |
1445 | * btf_verifier_log_type(..., fmt = NULL). |
1446 | * Skip those prints for in-kernel BTF verification. |
1447 | */ |
1448 | if (!fmt) |
1449 | return; |
1450 | |
1451 | /* Skip logging when loading module BTF with mismatches permitted */ |
1452 | if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) |
1453 | return; |
1454 | } |
1455 | |
1456 | __btf_verifier_log(log, fmt: "[%u] %s %s%s", |
1457 | env->log_type_id, |
1458 | btf_type_str(t), |
1459 | __btf_name_by_offset(btf, offset: t->name_off), |
1460 | log_details ? " ": ""); |
1461 | |
1462 | if (log_details) |
1463 | btf_type_ops(t)->log_details(env, t); |
1464 | |
1465 | if (fmt && *fmt) { |
1466 | __btf_verifier_log(log, fmt: " "); |
1467 | va_start(args, fmt); |
1468 | bpf_verifier_vlog(log, fmt, args); |
1469 | va_end(args); |
1470 | } |
1471 | |
1472 | __btf_verifier_log(log, fmt: "\n"); |
1473 | } |
1474 | |
1475 | #define btf_verifier_log_type(env, t, ...) \ |
1476 | __btf_verifier_log_type((env), (t), true, __VA_ARGS__) |
1477 | #define btf_verifier_log_basic(env, t, ...) \ |
1478 | __btf_verifier_log_type((env), (t), false, __VA_ARGS__) |
1479 | |
1480 | __printf(4, 5) |
1481 | static void btf_verifier_log_member(struct btf_verifier_env *env, |
1482 | const struct btf_type *struct_type, |
1483 | const struct btf_member *member, |
1484 | const char *fmt, ...) |
1485 | { |
1486 | struct bpf_verifier_log *log = &env->log; |
1487 | struct btf *btf = env->btf; |
1488 | va_list args; |
1489 | |
1490 | if (!bpf_verifier_log_needed(log)) |
1491 | return; |
1492 | |
1493 | if (log->level == BPF_LOG_KERNEL) { |
1494 | if (!fmt) |
1495 | return; |
1496 | |
1497 | /* Skip logging when loading module BTF with mismatches permitted */ |
1498 | if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) |
1499 | return; |
1500 | } |
1501 | |
1502 | /* The CHECK_META phase already did a btf dump. |
1503 | * |
1504 | * If member is logged again, it must hit an error in |
1505 | * parsing this member. It is useful to print out which |
1506 | * struct this member belongs to. |
1507 | */ |
1508 | if (env->phase != CHECK_META) |
1509 | btf_verifier_log_type(env, struct_type, NULL); |
1510 | |
1511 | if (btf_type_kflag(t: struct_type)) |
1512 | __btf_verifier_log(log, |
1513 | fmt: "\t%s type_id=%u bitfield_size=%u bits_offset=%u", |
1514 | __btf_name_by_offset(btf, offset: member->name_off), |
1515 | member->type, |
1516 | BTF_MEMBER_BITFIELD_SIZE(member->offset), |
1517 | BTF_MEMBER_BIT_OFFSET(member->offset)); |
1518 | else |
1519 | __btf_verifier_log(log, fmt: "\t%s type_id=%u bits_offset=%u", |
1520 | __btf_name_by_offset(btf, offset: member->name_off), |
1521 | member->type, member->offset); |
1522 | |
1523 | if (fmt && *fmt) { |
1524 | __btf_verifier_log(log, fmt: " "); |
1525 | va_start(args, fmt); |
1526 | bpf_verifier_vlog(log, fmt, args); |
1527 | va_end(args); |
1528 | } |
1529 | |
1530 | __btf_verifier_log(log, fmt: "\n"); |
1531 | } |
1532 | |
1533 | __printf(4, 5) |
1534 | static void btf_verifier_log_vsi(struct btf_verifier_env *env, |
1535 | const struct btf_type *datasec_type, |
1536 | const struct btf_var_secinfo *vsi, |
1537 | const char *fmt, ...) |
1538 | { |
1539 | struct bpf_verifier_log *log = &env->log; |
1540 | va_list args; |
1541 | |
1542 | if (!bpf_verifier_log_needed(log)) |
1543 | return; |
1544 | if (log->level == BPF_LOG_KERNEL && !fmt) |
1545 | return; |
1546 | if (env->phase != CHECK_META) |
1547 | btf_verifier_log_type(env, datasec_type, NULL); |
1548 | |
1549 | __btf_verifier_log(log, fmt: "\t type_id=%u offset=%u size=%u", |
1550 | vsi->type, vsi->offset, vsi->size); |
1551 | if (fmt && *fmt) { |
1552 | __btf_verifier_log(log, fmt: " "); |
1553 | va_start(args, fmt); |
1554 | bpf_verifier_vlog(log, fmt, args); |
1555 | va_end(args); |
1556 | } |
1557 | |
1558 | __btf_verifier_log(log, fmt: "\n"); |
1559 | } |
1560 | |
1561 | static void btf_verifier_log_hdr(struct btf_verifier_env *env, |
1562 | u32 btf_data_size) |
1563 | { |
1564 | struct bpf_verifier_log *log = &env->log; |
1565 | const struct btf *btf = env->btf; |
1566 | const struct btf_header *hdr; |
1567 | |
1568 | if (!bpf_verifier_log_needed(log)) |
1569 | return; |
1570 | |
1571 | if (log->level == BPF_LOG_KERNEL) |
1572 | return; |
1573 | hdr = &btf->hdr; |
1574 | __btf_verifier_log(log, fmt: "magic: 0x%x\n", hdr->magic); |
1575 | __btf_verifier_log(log, fmt: "version: %u\n", hdr->version); |
1576 | __btf_verifier_log(log, fmt: "flags: 0x%x\n", hdr->flags); |
1577 | __btf_verifier_log(log, fmt: "hdr_len: %u\n", hdr->hdr_len); |
1578 | __btf_verifier_log(log, fmt: "type_off: %u\n", hdr->type_off); |
1579 | __btf_verifier_log(log, fmt: "type_len: %u\n", hdr->type_len); |
1580 | __btf_verifier_log(log, fmt: "str_off: %u\n", hdr->str_off); |
1581 | __btf_verifier_log(log, fmt: "str_len: %u\n", hdr->str_len); |
1582 | __btf_verifier_log(log, fmt: "btf_total_size: %u\n", btf_data_size); |
1583 | } |
1584 | |
1585 | static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t) |
1586 | { |
1587 | struct btf *btf = env->btf; |
1588 | |
1589 | if (btf->types_size == btf->nr_types) { |
1590 | /* Expand 'types' array */ |
1591 | |
1592 | struct btf_type **new_types; |
1593 | u32 expand_by, new_size; |
1594 | |
1595 | if (btf->start_id + btf->types_size == BTF_MAX_TYPE) { |
1596 | btf_verifier_log(env, fmt: "Exceeded max num of types"); |
1597 | return -E2BIG; |
1598 | } |
1599 | |
1600 | expand_by = max_t(u32, btf->types_size >> 2, 16); |
1601 | new_size = min_t(u32, BTF_MAX_TYPE, |
1602 | btf->types_size + expand_by); |
1603 | |
1604 | new_types = kvcalloc(new_size, sizeof(*new_types), |
1605 | GFP_KERNEL | __GFP_NOWARN); |
1606 | if (!new_types) |
1607 | return -ENOMEM; |
1608 | |
1609 | if (btf->nr_types == 0) { |
1610 | if (!btf->base_btf) { |
1611 | /* lazily init VOID type */ |
1612 | new_types[0] = &btf_void; |
1613 | btf->nr_types++; |
1614 | } |
1615 | } else { |
1616 | memcpy(new_types, btf->types, |
1617 | sizeof(*btf->types) * btf->nr_types); |
1618 | } |
1619 | |
1620 | kvfree(addr: btf->types); |
1621 | btf->types = new_types; |
1622 | btf->types_size = new_size; |
1623 | } |
1624 | |
1625 | btf->types[btf->nr_types++] = t; |
1626 | |
1627 | return 0; |
1628 | } |
1629 | |
1630 | static int btf_alloc_id(struct btf *btf) |
1631 | { |
1632 | int id; |
1633 | |
1634 | idr_preload(GFP_KERNEL); |
1635 | spin_lock_bh(lock: &btf_idr_lock); |
1636 | id = idr_alloc_cyclic(&btf_idr, ptr: btf, start: 1, INT_MAX, GFP_ATOMIC); |
1637 | if (id > 0) |
1638 | btf->id = id; |
1639 | spin_unlock_bh(lock: &btf_idr_lock); |
1640 | idr_preload_end(); |
1641 | |
1642 | if (WARN_ON_ONCE(!id)) |
1643 | return -ENOSPC; |
1644 | |
1645 | return id > 0 ? 0 : id; |
1646 | } |
1647 | |
1648 | static void btf_free_id(struct btf *btf) |
1649 | { |
1650 | unsigned long flags; |
1651 | |
1652 | /* |
1653 | * In map-in-map, calling map_delete_elem() on outer |
1654 | * map will call bpf_map_put on the inner map. |
1655 | * It will then eventually call btf_free_id() |
1656 | * on the inner map. Some of the map_delete_elem() |
1657 | * implementation may have irq disabled, so |
1658 | * we need to use the _irqsave() version instead |
1659 | * of the _bh() version. |
1660 | */ |
1661 | spin_lock_irqsave(&btf_idr_lock, flags); |
1662 | idr_remove(&btf_idr, id: btf->id); |
1663 | spin_unlock_irqrestore(lock: &btf_idr_lock, flags); |
1664 | } |
1665 | |
1666 | static void btf_free_kfunc_set_tab(struct btf *btf) |
1667 | { |
1668 | struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab; |
1669 | int hook; |
1670 | |
1671 | if (!tab) |
1672 | return; |
1673 | for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++) |
1674 | kfree(objp: tab->sets[hook]); |
1675 | kfree(objp: tab); |
1676 | btf->kfunc_set_tab = NULL; |
1677 | } |
1678 | |
1679 | static void btf_free_dtor_kfunc_tab(struct btf *btf) |
1680 | { |
1681 | struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab; |
1682 | |
1683 | if (!tab) |
1684 | return; |
1685 | kfree(objp: tab); |
1686 | btf->dtor_kfunc_tab = NULL; |
1687 | } |
1688 | |
1689 | static void btf_struct_metas_free(struct btf_struct_metas *tab) |
1690 | { |
1691 | int i; |
1692 | |
1693 | if (!tab) |
1694 | return; |
1695 | for (i = 0; i < tab->cnt; i++) |
1696 | btf_record_free(rec: tab->types[i].record); |
1697 | kfree(objp: tab); |
1698 | } |
1699 | |
1700 | static void btf_free_struct_meta_tab(struct btf *btf) |
1701 | { |
1702 | struct btf_struct_metas *tab = btf->struct_meta_tab; |
1703 | |
1704 | btf_struct_metas_free(tab); |
1705 | btf->struct_meta_tab = NULL; |
1706 | } |
1707 | |
1708 | static void btf_free_struct_ops_tab(struct btf *btf) |
1709 | { |
1710 | struct btf_struct_ops_tab *tab = btf->struct_ops_tab; |
1711 | u32 i; |
1712 | |
1713 | if (!tab) |
1714 | return; |
1715 | |
1716 | for (i = 0; i < tab->cnt; i++) |
1717 | bpf_struct_ops_desc_release(st_ops_desc: &tab->ops[i]); |
1718 | |
1719 | kfree(objp: tab); |
1720 | btf->struct_ops_tab = NULL; |
1721 | } |
1722 | |
1723 | static void btf_free(struct btf *btf) |
1724 | { |
1725 | btf_free_struct_meta_tab(btf); |
1726 | btf_free_dtor_kfunc_tab(btf); |
1727 | btf_free_kfunc_set_tab(btf); |
1728 | btf_free_struct_ops_tab(btf); |
1729 | kvfree(addr: btf->types); |
1730 | kvfree(addr: btf->resolved_sizes); |
1731 | kvfree(addr: btf->resolved_ids); |
1732 | /* vmlinux does not allocate btf->data, it simply points it at |
1733 | * __start_BTF. |
1734 | */ |
1735 | if (!btf_is_vmlinux(btf)) |
1736 | kvfree(addr: btf->data); |
1737 | kvfree(addr: btf->base_id_map); |
1738 | kfree(objp: btf); |
1739 | } |
1740 | |
1741 | static void btf_free_rcu(struct rcu_head *rcu) |
1742 | { |
1743 | struct btf *btf = container_of(rcu, struct btf, rcu); |
1744 | |
1745 | btf_free(btf); |
1746 | } |
1747 | |
1748 | const char *btf_get_name(const struct btf *btf) |
1749 | { |
1750 | return btf->name; |
1751 | } |
1752 | |
1753 | void btf_get(struct btf *btf) |
1754 | { |
1755 | refcount_inc(r: &btf->refcnt); |
1756 | } |
1757 | |
1758 | void btf_put(struct btf *btf) |
1759 | { |
1760 | if (btf && refcount_dec_and_test(r: &btf->refcnt)) { |
1761 | btf_free_id(btf); |
1762 | call_rcu(head: &btf->rcu, func: btf_free_rcu); |
1763 | } |
1764 | } |
1765 | |
1766 | struct btf *btf_base_btf(const struct btf *btf) |
1767 | { |
1768 | return btf->base_btf; |
1769 | } |
1770 | |
1771 | const struct btf_header *btf_header(const struct btf *btf) |
1772 | { |
1773 | return &btf->hdr; |
1774 | } |
1775 | |
1776 | void btf_set_base_btf(struct btf *btf, const struct btf *base_btf) |
1777 | { |
1778 | btf->base_btf = (struct btf *)base_btf; |
1779 | btf->start_id = btf_nr_types(btf: base_btf); |
1780 | btf->start_str_off = base_btf->hdr.str_len; |
1781 | } |
1782 | |
1783 | static int env_resolve_init(struct btf_verifier_env *env) |
1784 | { |
1785 | struct btf *btf = env->btf; |
1786 | u32 nr_types = btf->nr_types; |
1787 | u32 *resolved_sizes = NULL; |
1788 | u32 *resolved_ids = NULL; |
1789 | u8 *visit_states = NULL; |
1790 | |
1791 | resolved_sizes = kvcalloc(nr_types, sizeof(*resolved_sizes), |
1792 | GFP_KERNEL | __GFP_NOWARN); |
1793 | if (!resolved_sizes) |
1794 | goto nomem; |
1795 | |
1796 | resolved_ids = kvcalloc(nr_types, sizeof(*resolved_ids), |
1797 | GFP_KERNEL | __GFP_NOWARN); |
1798 | if (!resolved_ids) |
1799 | goto nomem; |
1800 | |
1801 | visit_states = kvcalloc(nr_types, sizeof(*visit_states), |
1802 | GFP_KERNEL | __GFP_NOWARN); |
1803 | if (!visit_states) |
1804 | goto nomem; |
1805 | |
1806 | btf->resolved_sizes = resolved_sizes; |
1807 | btf->resolved_ids = resolved_ids; |
1808 | env->visit_states = visit_states; |
1809 | |
1810 | return 0; |
1811 | |
1812 | nomem: |
1813 | kvfree(addr: resolved_sizes); |
1814 | kvfree(addr: resolved_ids); |
1815 | kvfree(addr: visit_states); |
1816 | return -ENOMEM; |
1817 | } |
1818 | |
1819 | static void btf_verifier_env_free(struct btf_verifier_env *env) |
1820 | { |
1821 | kvfree(addr: env->visit_states); |
1822 | kfree(objp: env); |
1823 | } |
1824 | |
1825 | static bool env_type_is_resolve_sink(const struct btf_verifier_env *env, |
1826 | const struct btf_type *next_type) |
1827 | { |
1828 | switch (env->resolve_mode) { |
1829 | case RESOLVE_TBD: |
1830 | /* int, enum or void is a sink */ |
1831 | return !btf_type_needs_resolve(t: next_type); |
1832 | case RESOLVE_PTR: |
1833 | /* int, enum, void, struct, array, func or func_proto is a sink |
1834 | * for ptr |
1835 | */ |
1836 | return !btf_type_is_modifier(t: next_type) && |
1837 | !btf_type_is_ptr(t: next_type); |
1838 | case RESOLVE_STRUCT_OR_ARRAY: |
1839 | /* int, enum, void, ptr, func or func_proto is a sink |
1840 | * for struct and array |
1841 | */ |
1842 | return !btf_type_is_modifier(t: next_type) && |
1843 | !btf_type_is_array(t: next_type) && |
1844 | !btf_type_is_struct(t: next_type); |
1845 | default: |
1846 | BUG(); |
1847 | } |
1848 | } |
1849 | |
1850 | static bool env_type_is_resolved(const struct btf_verifier_env *env, |
1851 | u32 type_id) |
1852 | { |
1853 | /* base BTF types should be resolved by now */ |
1854 | if (type_id < env->btf->start_id) |
1855 | return true; |
1856 | |
1857 | return env->visit_states[type_id - env->btf->start_id] == RESOLVED; |
1858 | } |
1859 | |
1860 | static int env_stack_push(struct btf_verifier_env *env, |
1861 | const struct btf_type *t, u32 type_id) |
1862 | { |
1863 | const struct btf *btf = env->btf; |
1864 | struct resolve_vertex *v; |
1865 | |
1866 | if (env->top_stack == MAX_RESOLVE_DEPTH) |
1867 | return -E2BIG; |
1868 | |
1869 | if (type_id < btf->start_id |
1870 | || env->visit_states[type_id - btf->start_id] != NOT_VISITED) |
1871 | return -EEXIST; |
1872 | |
1873 | env->visit_states[type_id - btf->start_id] = VISITED; |
1874 | |
1875 | v = &env->stack[env->top_stack++]; |
1876 | v->t = t; |
1877 | v->type_id = type_id; |
1878 | v->next_member = 0; |
1879 | |
1880 | if (env->resolve_mode == RESOLVE_TBD) { |
1881 | if (btf_type_is_ptr(t)) |
1882 | env->resolve_mode = RESOLVE_PTR; |
1883 | else if (btf_type_is_struct(t) || btf_type_is_array(t)) |
1884 | env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY; |
1885 | } |
1886 | |
1887 | return 0; |
1888 | } |
1889 | |
1890 | static void env_stack_set_next_member(struct btf_verifier_env *env, |
1891 | u16 next_member) |
1892 | { |
1893 | env->stack[env->top_stack - 1].next_member = next_member; |
1894 | } |
1895 | |
1896 | static void env_stack_pop_resolved(struct btf_verifier_env *env, |
1897 | u32 resolved_type_id, |
1898 | u32 resolved_size) |
1899 | { |
1900 | u32 type_id = env->stack[--(env->top_stack)].type_id; |
1901 | struct btf *btf = env->btf; |
1902 | |
1903 | type_id -= btf->start_id; /* adjust to local type id */ |
1904 | btf->resolved_sizes[type_id] = resolved_size; |
1905 | btf->resolved_ids[type_id] = resolved_type_id; |
1906 | env->visit_states[type_id] = RESOLVED; |
1907 | } |
1908 | |
1909 | static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env) |
1910 | { |
1911 | return env->top_stack ? &env->stack[env->top_stack - 1] : NULL; |
1912 | } |
1913 | |
1914 | /* Resolve the size of a passed-in "type" |
1915 | * |
1916 | * type: is an array (e.g. u32 array[x][y]) |
1917 | * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY, |
1918 | * *type_size: (x * y * sizeof(u32)). Hence, *type_size always |
1919 | * corresponds to the return type. |
1920 | * *elem_type: u32 |
1921 | * *elem_id: id of u32 |
1922 | * *total_nelems: (x * y). Hence, individual elem size is |
1923 | * (*type_size / *total_nelems) |
1924 | * *type_id: id of type if it's changed within the function, 0 if not |
1925 | * |
1926 | * type: is not an array (e.g. const struct X) |
1927 | * return type: type "struct X" |
1928 | * *type_size: sizeof(struct X) |
1929 | * *elem_type: same as return type ("struct X") |
1930 | * *elem_id: 0 |
1931 | * *total_nelems: 1 |
1932 | * *type_id: id of type if it's changed within the function, 0 if not |
1933 | */ |
1934 | static const struct btf_type * |
1935 | __btf_resolve_size(const struct btf *btf, const struct btf_type *type, |
1936 | u32 *type_size, const struct btf_type **elem_type, |
1937 | u32 *elem_id, u32 *total_nelems, u32 *type_id) |
1938 | { |
1939 | const struct btf_type *array_type = NULL; |
1940 | const struct btf_array *array = NULL; |
1941 | u32 i, size, nelems = 1, id = 0; |
1942 | |
1943 | for (i = 0; i < MAX_RESOLVE_DEPTH; i++) { |
1944 | switch (BTF_INFO_KIND(type->info)) { |
1945 | /* type->size can be used */ |
1946 | case BTF_KIND_INT: |
1947 | case BTF_KIND_STRUCT: |
1948 | case BTF_KIND_UNION: |
1949 | case BTF_KIND_ENUM: |
1950 | case BTF_KIND_FLOAT: |
1951 | case BTF_KIND_ENUM64: |
1952 | size = type->size; |
1953 | goto resolved; |
1954 | |
1955 | case BTF_KIND_PTR: |
1956 | size = sizeof(void *); |
1957 | goto resolved; |
1958 | |
1959 | /* Modifiers */ |
1960 | case BTF_KIND_TYPEDEF: |
1961 | case BTF_KIND_VOLATILE: |
1962 | case BTF_KIND_CONST: |
1963 | case BTF_KIND_RESTRICT: |
1964 | case BTF_KIND_TYPE_TAG: |
1965 | id = type->type; |
1966 | type = btf_type_by_id(btf, type->type); |
1967 | break; |
1968 | |
1969 | case BTF_KIND_ARRAY: |
1970 | if (!array_type) |
1971 | array_type = type; |
1972 | array = btf_type_array(t: type); |
1973 | if (nelems && array->nelems > U32_MAX / nelems) |
1974 | return ERR_PTR(error: -EINVAL); |
1975 | nelems *= array->nelems; |
1976 | type = btf_type_by_id(btf, array->type); |
1977 | break; |
1978 | |
1979 | /* type without size */ |
1980 | default: |
1981 | return ERR_PTR(error: -EINVAL); |
1982 | } |
1983 | } |
1984 | |
1985 | return ERR_PTR(error: -EINVAL); |
1986 | |
1987 | resolved: |
1988 | if (nelems && size > U32_MAX / nelems) |
1989 | return ERR_PTR(error: -EINVAL); |
1990 | |
1991 | *type_size = nelems * size; |
1992 | if (total_nelems) |
1993 | *total_nelems = nelems; |
1994 | if (elem_type) |
1995 | *elem_type = type; |
1996 | if (elem_id) |
1997 | *elem_id = array ? array->type : 0; |
1998 | if (type_id && id) |
1999 | *type_id = id; |
2000 | |
2001 | return array_type ? : type; |
2002 | } |
2003 | |
2004 | const struct btf_type * |
2005 | btf_resolve_size(const struct btf *btf, const struct btf_type *type, |
2006 | u32 *type_size) |
2007 | { |
2008 | return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL); |
2009 | } |
2010 | |
2011 | static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id) |
2012 | { |
2013 | while (type_id < btf->start_id) |
2014 | btf = btf->base_btf; |
2015 | |
2016 | return btf->resolved_ids[type_id - btf->start_id]; |
2017 | } |
2018 | |
2019 | /* The input param "type_id" must point to a needs_resolve type */ |
2020 | static const struct btf_type *btf_type_id_resolve(const struct btf *btf, |
2021 | u32 *type_id) |
2022 | { |
2023 | *type_id = btf_resolved_type_id(btf, type_id: *type_id); |
2024 | return btf_type_by_id(btf, *type_id); |
2025 | } |
2026 | |
2027 | static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id) |
2028 | { |
2029 | while (type_id < btf->start_id) |
2030 | btf = btf->base_btf; |
2031 | |
2032 | return btf->resolved_sizes[type_id - btf->start_id]; |
2033 | } |
2034 | |
2035 | const struct btf_type *btf_type_id_size(const struct btf *btf, |
2036 | u32 *type_id, u32 *ret_size) |
2037 | { |
2038 | const struct btf_type *size_type; |
2039 | u32 size_type_id = *type_id; |
2040 | u32 size = 0; |
2041 | |
2042 | size_type = btf_type_by_id(btf, size_type_id); |
2043 | if (btf_type_nosize_or_null(t: size_type)) |
2044 | return NULL; |
2045 | |
2046 | if (btf_type_has_size(t: size_type)) { |
2047 | size = size_type->size; |
2048 | } else if (btf_type_is_array(t: size_type)) { |
2049 | size = btf_resolved_type_size(btf, type_id: size_type_id); |
2050 | } else if (btf_type_is_ptr(t: size_type)) { |
2051 | size = sizeof(void *); |
2052 | } else { |
2053 | if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) && |
2054 | !btf_type_is_var(size_type))) |
2055 | return NULL; |
2056 | |
2057 | size_type_id = btf_resolved_type_id(btf, type_id: size_type_id); |
2058 | size_type = btf_type_by_id(btf, size_type_id); |
2059 | if (btf_type_nosize_or_null(t: size_type)) |
2060 | return NULL; |
2061 | else if (btf_type_has_size(t: size_type)) |
2062 | size = size_type->size; |
2063 | else if (btf_type_is_array(t: size_type)) |
2064 | size = btf_resolved_type_size(btf, type_id: size_type_id); |
2065 | else if (btf_type_is_ptr(t: size_type)) |
2066 | size = sizeof(void *); |
2067 | else |
2068 | return NULL; |
2069 | } |
2070 | |
2071 | *type_id = size_type_id; |
2072 | if (ret_size) |
2073 | *ret_size = size; |
2074 | |
2075 | return size_type; |
2076 | } |
2077 | |
2078 | static int btf_df_check_member(struct btf_verifier_env *env, |
2079 | const struct btf_type *struct_type, |
2080 | const struct btf_member *member, |
2081 | const struct btf_type *member_type) |
2082 | { |
2083 | btf_verifier_log_basic(env, struct_type, |
2084 | "Unsupported check_member"); |
2085 | return -EINVAL; |
2086 | } |
2087 | |
2088 | static int btf_df_check_kflag_member(struct btf_verifier_env *env, |
2089 | const struct btf_type *struct_type, |
2090 | const struct btf_member *member, |
2091 | const struct btf_type *member_type) |
2092 | { |
2093 | btf_verifier_log_basic(env, struct_type, |
2094 | "Unsupported check_kflag_member"); |
2095 | return -EINVAL; |
2096 | } |
2097 | |
2098 | /* Used for ptr, array struct/union and float type members. |
2099 | * int, enum and modifier types have their specific callback functions. |
2100 | */ |
2101 | static int btf_generic_check_kflag_member(struct btf_verifier_env *env, |
2102 | const struct btf_type *struct_type, |
2103 | const struct btf_member *member, |
2104 | const struct btf_type *member_type) |
2105 | { |
2106 | if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) { |
2107 | btf_verifier_log_member(env, struct_type, member, |
2108 | fmt: "Invalid member bitfield_size"); |
2109 | return -EINVAL; |
2110 | } |
2111 | |
2112 | /* bitfield size is 0, so member->offset represents bit offset only. |
2113 | * It is safe to call non kflag check_member variants. |
2114 | */ |
2115 | return btf_type_ops(t: member_type)->check_member(env, struct_type, |
2116 | member, |
2117 | member_type); |
2118 | } |
2119 | |
2120 | static int btf_df_resolve(struct btf_verifier_env *env, |
2121 | const struct resolve_vertex *v) |
2122 | { |
2123 | btf_verifier_log_basic(env, v->t, "Unsupported resolve"); |
2124 | return -EINVAL; |
2125 | } |
2126 | |
2127 | static void btf_df_show(const struct btf *btf, const struct btf_type *t, |
2128 | u32 type_id, void *data, u8 bits_offsets, |
2129 | struct btf_show *show) |
2130 | { |
2131 | btf_show(show, fmt: "<unsupported kind:%u>", BTF_INFO_KIND(t->info)); |
2132 | } |
2133 | |
2134 | static int btf_int_check_member(struct btf_verifier_env *env, |
2135 | const struct btf_type *struct_type, |
2136 | const struct btf_member *member, |
2137 | const struct btf_type *member_type) |
2138 | { |
2139 | u32 int_data = btf_type_int(t: member_type); |
2140 | u32 struct_bits_off = member->offset; |
2141 | u32 struct_size = struct_type->size; |
2142 | u32 nr_copy_bits; |
2143 | u32 bytes_offset; |
2144 | |
2145 | if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) { |
2146 | btf_verifier_log_member(env, struct_type, member, |
2147 | fmt: "bits_offset exceeds U32_MAX"); |
2148 | return -EINVAL; |
2149 | } |
2150 | |
2151 | struct_bits_off += BTF_INT_OFFSET(int_data); |
2152 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
2153 | nr_copy_bits = BTF_INT_BITS(int_data) + |
2154 | BITS_PER_BYTE_MASKED(struct_bits_off); |
2155 | |
2156 | if (nr_copy_bits > BITS_PER_U128) { |
2157 | btf_verifier_log_member(env, struct_type, member, |
2158 | fmt: "nr_copy_bits exceeds 128"); |
2159 | return -EINVAL; |
2160 | } |
2161 | |
2162 | if (struct_size < bytes_offset || |
2163 | struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { |
2164 | btf_verifier_log_member(env, struct_type, member, |
2165 | fmt: "Member exceeds struct_size"); |
2166 | return -EINVAL; |
2167 | } |
2168 | |
2169 | return 0; |
2170 | } |
2171 | |
2172 | static int btf_int_check_kflag_member(struct btf_verifier_env *env, |
2173 | const struct btf_type *struct_type, |
2174 | const struct btf_member *member, |
2175 | const struct btf_type *member_type) |
2176 | { |
2177 | u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset; |
2178 | u32 int_data = btf_type_int(t: member_type); |
2179 | u32 struct_size = struct_type->size; |
2180 | u32 nr_copy_bits; |
2181 | |
2182 | /* a regular int type is required for the kflag int member */ |
2183 | if (!btf_type_int_is_regular(t: member_type)) { |
2184 | btf_verifier_log_member(env, struct_type, member, |
2185 | fmt: "Invalid member base type"); |
2186 | return -EINVAL; |
2187 | } |
2188 | |
2189 | /* check sanity of bitfield size */ |
2190 | nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); |
2191 | struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); |
2192 | nr_int_data_bits = BTF_INT_BITS(int_data); |
2193 | if (!nr_bits) { |
2194 | /* Not a bitfield member, member offset must be at byte |
2195 | * boundary. |
2196 | */ |
2197 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
2198 | btf_verifier_log_member(env, struct_type, member, |
2199 | fmt: "Invalid member offset"); |
2200 | return -EINVAL; |
2201 | } |
2202 | |
2203 | nr_bits = nr_int_data_bits; |
2204 | } else if (nr_bits > nr_int_data_bits) { |
2205 | btf_verifier_log_member(env, struct_type, member, |
2206 | fmt: "Invalid member bitfield_size"); |
2207 | return -EINVAL; |
2208 | } |
2209 | |
2210 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
2211 | nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off); |
2212 | if (nr_copy_bits > BITS_PER_U128) { |
2213 | btf_verifier_log_member(env, struct_type, member, |
2214 | fmt: "nr_copy_bits exceeds 128"); |
2215 | return -EINVAL; |
2216 | } |
2217 | |
2218 | if (struct_size < bytes_offset || |
2219 | struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { |
2220 | btf_verifier_log_member(env, struct_type, member, |
2221 | fmt: "Member exceeds struct_size"); |
2222 | return -EINVAL; |
2223 | } |
2224 | |
2225 | return 0; |
2226 | } |
2227 | |
2228 | static s32 btf_int_check_meta(struct btf_verifier_env *env, |
2229 | const struct btf_type *t, |
2230 | u32 meta_left) |
2231 | { |
2232 | u32 int_data, nr_bits, meta_needed = sizeof(int_data); |
2233 | u16 encoding; |
2234 | |
2235 | if (meta_left < meta_needed) { |
2236 | btf_verifier_log_basic(env, t, |
2237 | "meta_left:%u meta_needed:%u", |
2238 | meta_left, meta_needed); |
2239 | return -EINVAL; |
2240 | } |
2241 | |
2242 | if (btf_type_vlen(t)) { |
2243 | btf_verifier_log_type(env, t, "vlen != 0"); |
2244 | return -EINVAL; |
2245 | } |
2246 | |
2247 | if (btf_type_kflag(t)) { |
2248 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); |
2249 | return -EINVAL; |
2250 | } |
2251 | |
2252 | int_data = btf_type_int(t); |
2253 | if (int_data & ~BTF_INT_MASK) { |
2254 | btf_verifier_log_basic(env, t, "Invalid int_data:%x", |
2255 | int_data); |
2256 | return -EINVAL; |
2257 | } |
2258 | |
2259 | nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data); |
2260 | |
2261 | if (nr_bits > BITS_PER_U128) { |
2262 | btf_verifier_log_type(env, t, "nr_bits exceeds %zu", |
2263 | BITS_PER_U128); |
2264 | return -EINVAL; |
2265 | } |
2266 | |
2267 | if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) { |
2268 | btf_verifier_log_type(env, t, "nr_bits exceeds type_size"); |
2269 | return -EINVAL; |
2270 | } |
2271 | |
2272 | /* |
2273 | * Only one of the encoding bits is allowed and it |
2274 | * should be sufficient for the pretty print purpose (i.e. decoding). |
2275 | * Multiple bits can be allowed later if it is found |
2276 | * to be insufficient. |
2277 | */ |
2278 | encoding = BTF_INT_ENCODING(int_data); |
2279 | if (encoding && |
2280 | encoding != BTF_INT_SIGNED && |
2281 | encoding != BTF_INT_CHAR && |
2282 | encoding != BTF_INT_BOOL) { |
2283 | btf_verifier_log_type(env, t, "Unsupported encoding"); |
2284 | return -ENOTSUPP; |
2285 | } |
2286 | |
2287 | btf_verifier_log_type(env, t, NULL); |
2288 | |
2289 | return meta_needed; |
2290 | } |
2291 | |
2292 | static void btf_int_log(struct btf_verifier_env *env, |
2293 | const struct btf_type *t) |
2294 | { |
2295 | int int_data = btf_type_int(t); |
2296 | |
2297 | btf_verifier_log(env, |
2298 | fmt: "size=%u bits_offset=%u nr_bits=%u encoding=%s", |
2299 | t->size, BTF_INT_OFFSET(int_data), |
2300 | BTF_INT_BITS(int_data), |
2301 | btf_int_encoding_str(BTF_INT_ENCODING(int_data))); |
2302 | } |
2303 | |
2304 | static void btf_int128_print(struct btf_show *show, void *data) |
2305 | { |
2306 | /* data points to a __int128 number. |
2307 | * Suppose |
2308 | * int128_num = *(__int128 *)data; |
2309 | * The below formulas shows what upper_num and lower_num represents: |
2310 | * upper_num = int128_num >> 64; |
2311 | * lower_num = int128_num & 0xffffffffFFFFFFFFULL; |
2312 | */ |
2313 | u64 upper_num, lower_num; |
2314 | |
2315 | #ifdef __BIG_ENDIAN_BITFIELD |
2316 | upper_num = *(u64 *)data; |
2317 | lower_num = *(u64 *)(data + 8); |
2318 | #else |
2319 | upper_num = *(u64 *)(data + 8); |
2320 | lower_num = *(u64 *)data; |
2321 | #endif |
2322 | if (upper_num == 0) |
2323 | btf_show_type_value(show, "0x%llx", lower_num); |
2324 | else |
2325 | btf_show_type_values(show, "0x%llx%016llx", upper_num, |
2326 | lower_num); |
2327 | } |
2328 | |
2329 | static void btf_int128_shift(u64 *print_num, u16 left_shift_bits, |
2330 | u16 right_shift_bits) |
2331 | { |
2332 | u64 upper_num, lower_num; |
2333 | |
2334 | #ifdef __BIG_ENDIAN_BITFIELD |
2335 | upper_num = print_num[0]; |
2336 | lower_num = print_num[1]; |
2337 | #else |
2338 | upper_num = print_num[1]; |
2339 | lower_num = print_num[0]; |
2340 | #endif |
2341 | |
2342 | /* shake out un-needed bits by shift/or operations */ |
2343 | if (left_shift_bits >= 64) { |
2344 | upper_num = lower_num << (left_shift_bits - 64); |
2345 | lower_num = 0; |
2346 | } else { |
2347 | upper_num = (upper_num << left_shift_bits) | |
2348 | (lower_num >> (64 - left_shift_bits)); |
2349 | lower_num = lower_num << left_shift_bits; |
2350 | } |
2351 | |
2352 | if (right_shift_bits >= 64) { |
2353 | lower_num = upper_num >> (right_shift_bits - 64); |
2354 | upper_num = 0; |
2355 | } else { |
2356 | lower_num = (lower_num >> right_shift_bits) | |
2357 | (upper_num << (64 - right_shift_bits)); |
2358 | upper_num = upper_num >> right_shift_bits; |
2359 | } |
2360 | |
2361 | #ifdef __BIG_ENDIAN_BITFIELD |
2362 | print_num[0] = upper_num; |
2363 | print_num[1] = lower_num; |
2364 | #else |
2365 | print_num[0] = lower_num; |
2366 | print_num[1] = upper_num; |
2367 | #endif |
2368 | } |
2369 | |
2370 | static void btf_bitfield_show(void *data, u8 bits_offset, |
2371 | u8 nr_bits, struct btf_show *show) |
2372 | { |
2373 | u16 left_shift_bits, right_shift_bits; |
2374 | u8 nr_copy_bytes; |
2375 | u8 nr_copy_bits; |
2376 | u64 print_num[2] = {}; |
2377 | |
2378 | nr_copy_bits = nr_bits + bits_offset; |
2379 | nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); |
2380 | |
2381 | memcpy(print_num, data, nr_copy_bytes); |
2382 | |
2383 | #ifdef __BIG_ENDIAN_BITFIELD |
2384 | left_shift_bits = bits_offset; |
2385 | #else |
2386 | left_shift_bits = BITS_PER_U128 - nr_copy_bits; |
2387 | #endif |
2388 | right_shift_bits = BITS_PER_U128 - nr_bits; |
2389 | |
2390 | btf_int128_shift(print_num, left_shift_bits, right_shift_bits); |
2391 | btf_int128_print(show, data: print_num); |
2392 | } |
2393 | |
2394 | |
2395 | static void btf_int_bits_show(const struct btf *btf, |
2396 | const struct btf_type *t, |
2397 | void *data, u8 bits_offset, |
2398 | struct btf_show *show) |
2399 | { |
2400 | u32 int_data = btf_type_int(t); |
2401 | u8 nr_bits = BTF_INT_BITS(int_data); |
2402 | u8 total_bits_offset; |
2403 | |
2404 | /* |
2405 | * bits_offset is at most 7. |
2406 | * BTF_INT_OFFSET() cannot exceed 128 bits. |
2407 | */ |
2408 | total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); |
2409 | data += BITS_ROUNDDOWN_BYTES(total_bits_offset); |
2410 | bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); |
2411 | btf_bitfield_show(data, bits_offset, nr_bits, show); |
2412 | } |
2413 | |
2414 | static void btf_int_show(const struct btf *btf, const struct btf_type *t, |
2415 | u32 type_id, void *data, u8 bits_offset, |
2416 | struct btf_show *show) |
2417 | { |
2418 | u32 int_data = btf_type_int(t); |
2419 | u8 encoding = BTF_INT_ENCODING(int_data); |
2420 | bool sign = encoding & BTF_INT_SIGNED; |
2421 | u8 nr_bits = BTF_INT_BITS(int_data); |
2422 | void *safe_data; |
2423 | |
2424 | safe_data = btf_show_start_type(show, t, type_id, data); |
2425 | if (!safe_data) |
2426 | return; |
2427 | |
2428 | if (bits_offset || BTF_INT_OFFSET(int_data) || |
2429 | BITS_PER_BYTE_MASKED(nr_bits)) { |
2430 | btf_int_bits_show(btf, t, data: safe_data, bits_offset, show); |
2431 | goto out; |
2432 | } |
2433 | |
2434 | switch (nr_bits) { |
2435 | case 128: |
2436 | btf_int128_print(show, data: safe_data); |
2437 | break; |
2438 | case 64: |
2439 | if (sign) |
2440 | btf_show_type_value(show, "%lld", *(s64 *)safe_data); |
2441 | else |
2442 | btf_show_type_value(show, "%llu", *(u64 *)safe_data); |
2443 | break; |
2444 | case 32: |
2445 | if (sign) |
2446 | btf_show_type_value(show, "%d", *(s32 *)safe_data); |
2447 | else |
2448 | btf_show_type_value(show, "%u", *(u32 *)safe_data); |
2449 | break; |
2450 | case 16: |
2451 | if (sign) |
2452 | btf_show_type_value(show, "%d", *(s16 *)safe_data); |
2453 | else |
2454 | btf_show_type_value(show, "%u", *(u16 *)safe_data); |
2455 | break; |
2456 | case 8: |
2457 | if (show->state.array_encoding == BTF_INT_CHAR) { |
2458 | /* check for null terminator */ |
2459 | if (show->state.array_terminated) |
2460 | break; |
2461 | if (*(char *)data == '\0') { |
2462 | show->state.array_terminated = 1; |
2463 | break; |
2464 | } |
2465 | if (isprint(*(char *)data)) { |
2466 | btf_show_type_value(show, "'%c'", |
2467 | *(char *)safe_data); |
2468 | break; |
2469 | } |
2470 | } |
2471 | if (sign) |
2472 | btf_show_type_value(show, "%d", *(s8 *)safe_data); |
2473 | else |
2474 | btf_show_type_value(show, "%u", *(u8 *)safe_data); |
2475 | break; |
2476 | default: |
2477 | btf_int_bits_show(btf, t, data: safe_data, bits_offset, show); |
2478 | break; |
2479 | } |
2480 | out: |
2481 | btf_show_end_type(show); |
2482 | } |
2483 | |
2484 | static const struct btf_kind_operations int_ops = { |
2485 | .check_meta = btf_int_check_meta, |
2486 | .resolve = btf_df_resolve, |
2487 | .check_member = btf_int_check_member, |
2488 | .check_kflag_member = btf_int_check_kflag_member, |
2489 | .log_details = btf_int_log, |
2490 | .show = btf_int_show, |
2491 | }; |
2492 | |
2493 | static int btf_modifier_check_member(struct btf_verifier_env *env, |
2494 | const struct btf_type *struct_type, |
2495 | const struct btf_member *member, |
2496 | const struct btf_type *member_type) |
2497 | { |
2498 | const struct btf_type *resolved_type; |
2499 | u32 resolved_type_id = member->type; |
2500 | struct btf_member resolved_member; |
2501 | struct btf *btf = env->btf; |
2502 | |
2503 | resolved_type = btf_type_id_size(btf, type_id: &resolved_type_id, NULL); |
2504 | if (!resolved_type) { |
2505 | btf_verifier_log_member(env, struct_type, member, |
2506 | fmt: "Invalid member"); |
2507 | return -EINVAL; |
2508 | } |
2509 | |
2510 | resolved_member = *member; |
2511 | resolved_member.type = resolved_type_id; |
2512 | |
2513 | return btf_type_ops(t: resolved_type)->check_member(env, struct_type, |
2514 | &resolved_member, |
2515 | resolved_type); |
2516 | } |
2517 | |
2518 | static int btf_modifier_check_kflag_member(struct btf_verifier_env *env, |
2519 | const struct btf_type *struct_type, |
2520 | const struct btf_member *member, |
2521 | const struct btf_type *member_type) |
2522 | { |
2523 | const struct btf_type *resolved_type; |
2524 | u32 resolved_type_id = member->type; |
2525 | struct btf_member resolved_member; |
2526 | struct btf *btf = env->btf; |
2527 | |
2528 | resolved_type = btf_type_id_size(btf, type_id: &resolved_type_id, NULL); |
2529 | if (!resolved_type) { |
2530 | btf_verifier_log_member(env, struct_type, member, |
2531 | fmt: "Invalid member"); |
2532 | return -EINVAL; |
2533 | } |
2534 | |
2535 | resolved_member = *member; |
2536 | resolved_member.type = resolved_type_id; |
2537 | |
2538 | return btf_type_ops(t: resolved_type)->check_kflag_member(env, struct_type, |
2539 | &resolved_member, |
2540 | resolved_type); |
2541 | } |
2542 | |
2543 | static int btf_ptr_check_member(struct btf_verifier_env *env, |
2544 | const struct btf_type *struct_type, |
2545 | const struct btf_member *member, |
2546 | const struct btf_type *member_type) |
2547 | { |
2548 | u32 struct_size, struct_bits_off, bytes_offset; |
2549 | |
2550 | struct_size = struct_type->size; |
2551 | struct_bits_off = member->offset; |
2552 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
2553 | |
2554 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
2555 | btf_verifier_log_member(env, struct_type, member, |
2556 | fmt: "Member is not byte aligned"); |
2557 | return -EINVAL; |
2558 | } |
2559 | |
2560 | if (struct_size - bytes_offset < sizeof(void *)) { |
2561 | btf_verifier_log_member(env, struct_type, member, |
2562 | fmt: "Member exceeds struct_size"); |
2563 | return -EINVAL; |
2564 | } |
2565 | |
2566 | return 0; |
2567 | } |
2568 | |
2569 | static int btf_ref_type_check_meta(struct btf_verifier_env *env, |
2570 | const struct btf_type *t, |
2571 | u32 meta_left) |
2572 | { |
2573 | const char *value; |
2574 | |
2575 | if (btf_type_vlen(t)) { |
2576 | btf_verifier_log_type(env, t, "vlen != 0"); |
2577 | return -EINVAL; |
2578 | } |
2579 | |
2580 | if (btf_type_kflag(t) && !btf_type_is_type_tag(t)) { |
2581 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); |
2582 | return -EINVAL; |
2583 | } |
2584 | |
2585 | if (!BTF_TYPE_ID_VALID(t->type)) { |
2586 | btf_verifier_log_type(env, t, "Invalid type_id"); |
2587 | return -EINVAL; |
2588 | } |
2589 | |
2590 | /* typedef/type_tag type must have a valid name, and other ref types, |
2591 | * volatile, const, restrict, should have a null name. |
2592 | */ |
2593 | if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) { |
2594 | if (!t->name_off || |
2595 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
2596 | btf_verifier_log_type(env, t, "Invalid name"); |
2597 | return -EINVAL; |
2598 | } |
2599 | } else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) { |
2600 | value = btf_name_by_offset(btf: env->btf, offset: t->name_off); |
2601 | if (!value || !value[0]) { |
2602 | btf_verifier_log_type(env, t, "Invalid name"); |
2603 | return -EINVAL; |
2604 | } |
2605 | } else { |
2606 | if (t->name_off) { |
2607 | btf_verifier_log_type(env, t, "Invalid name"); |
2608 | return -EINVAL; |
2609 | } |
2610 | } |
2611 | |
2612 | btf_verifier_log_type(env, t, NULL); |
2613 | |
2614 | return 0; |
2615 | } |
2616 | |
2617 | static int btf_modifier_resolve(struct btf_verifier_env *env, |
2618 | const struct resolve_vertex *v) |
2619 | { |
2620 | const struct btf_type *t = v->t; |
2621 | const struct btf_type *next_type; |
2622 | u32 next_type_id = t->type; |
2623 | struct btf *btf = env->btf; |
2624 | |
2625 | next_type = btf_type_by_id(btf, next_type_id); |
2626 | if (!next_type || btf_type_is_resolve_source_only(t: next_type)) { |
2627 | btf_verifier_log_type(env, v->t, "Invalid type_id"); |
2628 | return -EINVAL; |
2629 | } |
2630 | |
2631 | if (!env_type_is_resolve_sink(env, next_type) && |
2632 | !env_type_is_resolved(env, type_id: next_type_id)) |
2633 | return env_stack_push(env, t: next_type, type_id: next_type_id); |
2634 | |
2635 | /* Figure out the resolved next_type_id with size. |
2636 | * They will be stored in the current modifier's |
2637 | * resolved_ids and resolved_sizes such that it can |
2638 | * save us a few type-following when we use it later (e.g. in |
2639 | * pretty print). |
2640 | */ |
2641 | if (!btf_type_id_size(btf, type_id: &next_type_id, NULL)) { |
2642 | if (env_type_is_resolved(env, type_id: next_type_id)) |
2643 | next_type = btf_type_id_resolve(btf, type_id: &next_type_id); |
2644 | |
2645 | /* "typedef void new_void", "const void"...etc */ |
2646 | if (!btf_type_is_void(t: next_type) && |
2647 | !btf_type_is_fwd(t: next_type) && |
2648 | !btf_type_is_func_proto(t: next_type)) { |
2649 | btf_verifier_log_type(env, v->t, "Invalid type_id"); |
2650 | return -EINVAL; |
2651 | } |
2652 | } |
2653 | |
2654 | env_stack_pop_resolved(env, resolved_type_id: next_type_id, resolved_size: 0); |
2655 | |
2656 | return 0; |
2657 | } |
2658 | |
2659 | static int btf_var_resolve(struct btf_verifier_env *env, |
2660 | const struct resolve_vertex *v) |
2661 | { |
2662 | const struct btf_type *next_type; |
2663 | const struct btf_type *t = v->t; |
2664 | u32 next_type_id = t->type; |
2665 | struct btf *btf = env->btf; |
2666 | |
2667 | next_type = btf_type_by_id(btf, next_type_id); |
2668 | if (!next_type || btf_type_is_resolve_source_only(t: next_type)) { |
2669 | btf_verifier_log_type(env, v->t, "Invalid type_id"); |
2670 | return -EINVAL; |
2671 | } |
2672 | |
2673 | if (!env_type_is_resolve_sink(env, next_type) && |
2674 | !env_type_is_resolved(env, type_id: next_type_id)) |
2675 | return env_stack_push(env, t: next_type, type_id: next_type_id); |
2676 | |
2677 | if (btf_type_is_modifier(t: next_type)) { |
2678 | const struct btf_type *resolved_type; |
2679 | u32 resolved_type_id; |
2680 | |
2681 | resolved_type_id = next_type_id; |
2682 | resolved_type = btf_type_id_resolve(btf, type_id: &resolved_type_id); |
2683 | |
2684 | if (btf_type_is_ptr(t: resolved_type) && |
2685 | !env_type_is_resolve_sink(env, next_type: resolved_type) && |
2686 | !env_type_is_resolved(env, type_id: resolved_type_id)) |
2687 | return env_stack_push(env, t: resolved_type, |
2688 | type_id: resolved_type_id); |
2689 | } |
2690 | |
2691 | /* We must resolve to something concrete at this point, no |
2692 | * forward types or similar that would resolve to size of |
2693 | * zero is allowed. |
2694 | */ |
2695 | if (!btf_type_id_size(btf, type_id: &next_type_id, NULL)) { |
2696 | btf_verifier_log_type(env, v->t, "Invalid type_id"); |
2697 | return -EINVAL; |
2698 | } |
2699 | |
2700 | env_stack_pop_resolved(env, resolved_type_id: next_type_id, resolved_size: 0); |
2701 | |
2702 | return 0; |
2703 | } |
2704 | |
2705 | static int btf_ptr_resolve(struct btf_verifier_env *env, |
2706 | const struct resolve_vertex *v) |
2707 | { |
2708 | const struct btf_type *next_type; |
2709 | const struct btf_type *t = v->t; |
2710 | u32 next_type_id = t->type; |
2711 | struct btf *btf = env->btf; |
2712 | |
2713 | next_type = btf_type_by_id(btf, next_type_id); |
2714 | if (!next_type || btf_type_is_resolve_source_only(t: next_type)) { |
2715 | btf_verifier_log_type(env, v->t, "Invalid type_id"); |
2716 | return -EINVAL; |
2717 | } |
2718 | |
2719 | if (!env_type_is_resolve_sink(env, next_type) && |
2720 | !env_type_is_resolved(env, type_id: next_type_id)) |
2721 | return env_stack_push(env, t: next_type, type_id: next_type_id); |
2722 | |
2723 | /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY, |
2724 | * the modifier may have stopped resolving when it was resolved |
2725 | * to a ptr (last-resolved-ptr). |
2726 | * |
2727 | * We now need to continue from the last-resolved-ptr to |
2728 | * ensure the last-resolved-ptr will not referring back to |
2729 | * the current ptr (t). |
2730 | */ |
2731 | if (btf_type_is_modifier(t: next_type)) { |
2732 | const struct btf_type *resolved_type; |
2733 | u32 resolved_type_id; |
2734 | |
2735 | resolved_type_id = next_type_id; |
2736 | resolved_type = btf_type_id_resolve(btf, type_id: &resolved_type_id); |
2737 | |
2738 | if (btf_type_is_ptr(t: resolved_type) && |
2739 | !env_type_is_resolve_sink(env, next_type: resolved_type) && |
2740 | !env_type_is_resolved(env, type_id: resolved_type_id)) |
2741 | return env_stack_push(env, t: resolved_type, |
2742 | type_id: resolved_type_id); |
2743 | } |
2744 | |
2745 | if (!btf_type_id_size(btf, type_id: &next_type_id, NULL)) { |
2746 | if (env_type_is_resolved(env, type_id: next_type_id)) |
2747 | next_type = btf_type_id_resolve(btf, type_id: &next_type_id); |
2748 | |
2749 | if (!btf_type_is_void(t: next_type) && |
2750 | !btf_type_is_fwd(t: next_type) && |
2751 | !btf_type_is_func_proto(t: next_type)) { |
2752 | btf_verifier_log_type(env, v->t, "Invalid type_id"); |
2753 | return -EINVAL; |
2754 | } |
2755 | } |
2756 | |
2757 | env_stack_pop_resolved(env, resolved_type_id: next_type_id, resolved_size: 0); |
2758 | |
2759 | return 0; |
2760 | } |
2761 | |
2762 | static void btf_modifier_show(const struct btf *btf, |
2763 | const struct btf_type *t, |
2764 | u32 type_id, void *data, |
2765 | u8 bits_offset, struct btf_show *show) |
2766 | { |
2767 | if (btf->resolved_ids) |
2768 | t = btf_type_id_resolve(btf, type_id: &type_id); |
2769 | else |
2770 | t = btf_type_skip_modifiers(btf, id: type_id, NULL); |
2771 | |
2772 | btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show); |
2773 | } |
2774 | |
2775 | static void btf_var_show(const struct btf *btf, const struct btf_type *t, |
2776 | u32 type_id, void *data, u8 bits_offset, |
2777 | struct btf_show *show) |
2778 | { |
2779 | t = btf_type_id_resolve(btf, type_id: &type_id); |
2780 | |
2781 | btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show); |
2782 | } |
2783 | |
2784 | static void btf_ptr_show(const struct btf *btf, const struct btf_type *t, |
2785 | u32 type_id, void *data, u8 bits_offset, |
2786 | struct btf_show *show) |
2787 | { |
2788 | void *safe_data; |
2789 | |
2790 | safe_data = btf_show_start_type(show, t, type_id, data); |
2791 | if (!safe_data) |
2792 | return; |
2793 | |
2794 | /* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */ |
2795 | if (show->flags & BTF_SHOW_PTR_RAW) |
2796 | btf_show_type_value(show, "0x%px", *(void **)safe_data); |
2797 | else |
2798 | btf_show_type_value(show, "0x%p", *(void **)safe_data); |
2799 | btf_show_end_type(show); |
2800 | } |
2801 | |
2802 | static void btf_ref_type_log(struct btf_verifier_env *env, |
2803 | const struct btf_type *t) |
2804 | { |
2805 | btf_verifier_log(env, fmt: "type_id=%u", t->type); |
2806 | } |
2807 | |
2808 | static const struct btf_kind_operations modifier_ops = { |
2809 | .check_meta = btf_ref_type_check_meta, |
2810 | .resolve = btf_modifier_resolve, |
2811 | .check_member = btf_modifier_check_member, |
2812 | .check_kflag_member = btf_modifier_check_kflag_member, |
2813 | .log_details = btf_ref_type_log, |
2814 | .show = btf_modifier_show, |
2815 | }; |
2816 | |
2817 | static const struct btf_kind_operations ptr_ops = { |
2818 | .check_meta = btf_ref_type_check_meta, |
2819 | .resolve = btf_ptr_resolve, |
2820 | .check_member = btf_ptr_check_member, |
2821 | .check_kflag_member = btf_generic_check_kflag_member, |
2822 | .log_details = btf_ref_type_log, |
2823 | .show = btf_ptr_show, |
2824 | }; |
2825 | |
2826 | static s32 btf_fwd_check_meta(struct btf_verifier_env *env, |
2827 | const struct btf_type *t, |
2828 | u32 meta_left) |
2829 | { |
2830 | if (btf_type_vlen(t)) { |
2831 | btf_verifier_log_type(env, t, "vlen != 0"); |
2832 | return -EINVAL; |
2833 | } |
2834 | |
2835 | if (t->type) { |
2836 | btf_verifier_log_type(env, t, "type != 0"); |
2837 | return -EINVAL; |
2838 | } |
2839 | |
2840 | /* fwd type must have a valid name */ |
2841 | if (!t->name_off || |
2842 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
2843 | btf_verifier_log_type(env, t, "Invalid name"); |
2844 | return -EINVAL; |
2845 | } |
2846 | |
2847 | btf_verifier_log_type(env, t, NULL); |
2848 | |
2849 | return 0; |
2850 | } |
2851 | |
2852 | static void btf_fwd_type_log(struct btf_verifier_env *env, |
2853 | const struct btf_type *t) |
2854 | { |
2855 | btf_verifier_log(env, fmt: "%s", btf_type_kflag(t) ? "union": "struct"); |
2856 | } |
2857 | |
2858 | static const struct btf_kind_operations fwd_ops = { |
2859 | .check_meta = btf_fwd_check_meta, |
2860 | .resolve = btf_df_resolve, |
2861 | .check_member = btf_df_check_member, |
2862 | .check_kflag_member = btf_df_check_kflag_member, |
2863 | .log_details = btf_fwd_type_log, |
2864 | .show = btf_df_show, |
2865 | }; |
2866 | |
2867 | static int btf_array_check_member(struct btf_verifier_env *env, |
2868 | const struct btf_type *struct_type, |
2869 | const struct btf_member *member, |
2870 | const struct btf_type *member_type) |
2871 | { |
2872 | u32 struct_bits_off = member->offset; |
2873 | u32 struct_size, bytes_offset; |
2874 | u32 array_type_id, array_size; |
2875 | struct btf *btf = env->btf; |
2876 | |
2877 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
2878 | btf_verifier_log_member(env, struct_type, member, |
2879 | fmt: "Member is not byte aligned"); |
2880 | return -EINVAL; |
2881 | } |
2882 | |
2883 | array_type_id = member->type; |
2884 | btf_type_id_size(btf, type_id: &array_type_id, ret_size: &array_size); |
2885 | struct_size = struct_type->size; |
2886 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
2887 | if (struct_size - bytes_offset < array_size) { |
2888 | btf_verifier_log_member(env, struct_type, member, |
2889 | fmt: "Member exceeds struct_size"); |
2890 | return -EINVAL; |
2891 | } |
2892 | |
2893 | return 0; |
2894 | } |
2895 | |
2896 | static s32 btf_array_check_meta(struct btf_verifier_env *env, |
2897 | const struct btf_type *t, |
2898 | u32 meta_left) |
2899 | { |
2900 | const struct btf_array *array = btf_type_array(t); |
2901 | u32 meta_needed = sizeof(*array); |
2902 | |
2903 | if (meta_left < meta_needed) { |
2904 | btf_verifier_log_basic(env, t, |
2905 | "meta_left:%u meta_needed:%u", |
2906 | meta_left, meta_needed); |
2907 | return -EINVAL; |
2908 | } |
2909 | |
2910 | /* array type should not have a name */ |
2911 | if (t->name_off) { |
2912 | btf_verifier_log_type(env, t, "Invalid name"); |
2913 | return -EINVAL; |
2914 | } |
2915 | |
2916 | if (btf_type_vlen(t)) { |
2917 | btf_verifier_log_type(env, t, "vlen != 0"); |
2918 | return -EINVAL; |
2919 | } |
2920 | |
2921 | if (btf_type_kflag(t)) { |
2922 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); |
2923 | return -EINVAL; |
2924 | } |
2925 | |
2926 | if (t->size) { |
2927 | btf_verifier_log_type(env, t, "size != 0"); |
2928 | return -EINVAL; |
2929 | } |
2930 | |
2931 | /* Array elem type and index type cannot be in type void, |
2932 | * so !array->type and !array->index_type are not allowed. |
2933 | */ |
2934 | if (!array->type || !BTF_TYPE_ID_VALID(array->type)) { |
2935 | btf_verifier_log_type(env, t, "Invalid elem"); |
2936 | return -EINVAL; |
2937 | } |
2938 | |
2939 | if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) { |
2940 | btf_verifier_log_type(env, t, "Invalid index"); |
2941 | return -EINVAL; |
2942 | } |
2943 | |
2944 | btf_verifier_log_type(env, t, NULL); |
2945 | |
2946 | return meta_needed; |
2947 | } |
2948 | |
2949 | static int btf_array_resolve(struct btf_verifier_env *env, |
2950 | const struct resolve_vertex *v) |
2951 | { |
2952 | const struct btf_array *array = btf_type_array(t: v->t); |
2953 | const struct btf_type *elem_type, *index_type; |
2954 | u32 elem_type_id, index_type_id; |
2955 | struct btf *btf = env->btf; |
2956 | u32 elem_size; |
2957 | |
2958 | /* Check array->index_type */ |
2959 | index_type_id = array->index_type; |
2960 | index_type = btf_type_by_id(btf, index_type_id); |
2961 | if (btf_type_nosize_or_null(t: index_type) || |
2962 | btf_type_is_resolve_source_only(t: index_type)) { |
2963 | btf_verifier_log_type(env, v->t, "Invalid index"); |
2964 | return -EINVAL; |
2965 | } |
2966 | |
2967 | if (!env_type_is_resolve_sink(env, next_type: index_type) && |
2968 | !env_type_is_resolved(env, type_id: index_type_id)) |
2969 | return env_stack_push(env, t: index_type, type_id: index_type_id); |
2970 | |
2971 | index_type = btf_type_id_size(btf, type_id: &index_type_id, NULL); |
2972 | if (!index_type || !btf_type_is_int(t: index_type) || |
2973 | !btf_type_int_is_regular(t: index_type)) { |
2974 | btf_verifier_log_type(env, v->t, "Invalid index"); |
2975 | return -EINVAL; |
2976 | } |
2977 | |
2978 | /* Check array->type */ |
2979 | elem_type_id = array->type; |
2980 | elem_type = btf_type_by_id(btf, elem_type_id); |
2981 | if (btf_type_nosize_or_null(t: elem_type) || |
2982 | btf_type_is_resolve_source_only(t: elem_type)) { |
2983 | btf_verifier_log_type(env, v->t, |
2984 | "Invalid elem"); |
2985 | return -EINVAL; |
2986 | } |
2987 | |
2988 | if (!env_type_is_resolve_sink(env, next_type: elem_type) && |
2989 | !env_type_is_resolved(env, type_id: elem_type_id)) |
2990 | return env_stack_push(env, t: elem_type, type_id: elem_type_id); |
2991 | |
2992 | elem_type = btf_type_id_size(btf, type_id: &elem_type_id, ret_size: &elem_size); |
2993 | if (!elem_type) { |
2994 | btf_verifier_log_type(env, v->t, "Invalid elem"); |
2995 | return -EINVAL; |
2996 | } |
2997 | |
2998 | if (btf_type_is_int(t: elem_type) && !btf_type_int_is_regular(t: elem_type)) { |
2999 | btf_verifier_log_type(env, v->t, "Invalid array of int"); |
3000 | return -EINVAL; |
3001 | } |
3002 | |
3003 | if (array->nelems && elem_size > U32_MAX / array->nelems) { |
3004 | btf_verifier_log_type(env, v->t, |
3005 | "Array size overflows U32_MAX"); |
3006 | return -EINVAL; |
3007 | } |
3008 | |
3009 | env_stack_pop_resolved(env, resolved_type_id: elem_type_id, resolved_size: elem_size * array->nelems); |
3010 | |
3011 | return 0; |
3012 | } |
3013 | |
3014 | static void btf_array_log(struct btf_verifier_env *env, |
3015 | const struct btf_type *t) |
3016 | { |
3017 | const struct btf_array *array = btf_type_array(t); |
3018 | |
3019 | btf_verifier_log(env, fmt: "type_id=%u index_type_id=%u nr_elems=%u", |
3020 | array->type, array->index_type, array->nelems); |
3021 | } |
3022 | |
3023 | static void __btf_array_show(const struct btf *btf, const struct btf_type *t, |
3024 | u32 type_id, void *data, u8 bits_offset, |
3025 | struct btf_show *show) |
3026 | { |
3027 | const struct btf_array *array = btf_type_array(t); |
3028 | const struct btf_kind_operations *elem_ops; |
3029 | const struct btf_type *elem_type; |
3030 | u32 i, elem_size = 0, elem_type_id; |
3031 | u16 encoding = 0; |
3032 | |
3033 | elem_type_id = array->type; |
3034 | elem_type = btf_type_skip_modifiers(btf, id: elem_type_id, NULL); |
3035 | if (elem_type && btf_type_has_size(t: elem_type)) |
3036 | elem_size = elem_type->size; |
3037 | |
3038 | if (elem_type && btf_type_is_int(t: elem_type)) { |
3039 | u32 int_type = btf_type_int(t: elem_type); |
3040 | |
3041 | encoding = BTF_INT_ENCODING(int_type); |
3042 | |
3043 | /* |
3044 | * BTF_INT_CHAR encoding never seems to be set for |
3045 | * char arrays, so if size is 1 and element is |
3046 | * printable as a char, we'll do that. |
3047 | */ |
3048 | if (elem_size == 1) |
3049 | encoding = BTF_INT_CHAR; |
3050 | } |
3051 | |
3052 | if (!btf_show_start_array_type(show, t, type_id, array_encoding: encoding, data)) |
3053 | return; |
3054 | |
3055 | if (!elem_type) |
3056 | goto out; |
3057 | elem_ops = btf_type_ops(t: elem_type); |
3058 | |
3059 | for (i = 0; i < array->nelems; i++) { |
3060 | |
3061 | btf_show_start_array_member(show); |
3062 | |
3063 | elem_ops->show(btf, elem_type, elem_type_id, data, |
3064 | bits_offset, show); |
3065 | data += elem_size; |
3066 | |
3067 | btf_show_end_array_member(show); |
3068 | |
3069 | if (show->state.array_terminated) |
3070 | break; |
3071 | } |
3072 | out: |
3073 | btf_show_end_array_type(show); |
3074 | } |
3075 | |
3076 | static void btf_array_show(const struct btf *btf, const struct btf_type *t, |
3077 | u32 type_id, void *data, u8 bits_offset, |
3078 | struct btf_show *show) |
3079 | { |
3080 | const struct btf_member *m = show->state.member; |
3081 | |
3082 | /* |
3083 | * First check if any members would be shown (are non-zero). |
3084 | * See comments above "struct btf_show" definition for more |
3085 | * details on how this works at a high-level. |
3086 | */ |
3087 | if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) { |
3088 | if (!show->state.depth_check) { |
3089 | show->state.depth_check = show->state.depth + 1; |
3090 | show->state.depth_to_show = 0; |
3091 | } |
3092 | __btf_array_show(btf, t, type_id, data, bits_offset, show); |
3093 | show->state.member = m; |
3094 | |
3095 | if (show->state.depth_check != show->state.depth + 1) |
3096 | return; |
3097 | show->state.depth_check = 0; |
3098 | |
3099 | if (show->state.depth_to_show <= show->state.depth) |
3100 | return; |
3101 | /* |
3102 | * Reaching here indicates we have recursed and found |
3103 | * non-zero array member(s). |
3104 | */ |
3105 | } |
3106 | __btf_array_show(btf, t, type_id, data, bits_offset, show); |
3107 | } |
3108 | |
3109 | static const struct btf_kind_operations array_ops = { |
3110 | .check_meta = btf_array_check_meta, |
3111 | .resolve = btf_array_resolve, |
3112 | .check_member = btf_array_check_member, |
3113 | .check_kflag_member = btf_generic_check_kflag_member, |
3114 | .log_details = btf_array_log, |
3115 | .show = btf_array_show, |
3116 | }; |
3117 | |
3118 | static int btf_struct_check_member(struct btf_verifier_env *env, |
3119 | const struct btf_type *struct_type, |
3120 | const struct btf_member *member, |
3121 | const struct btf_type *member_type) |
3122 | { |
3123 | u32 struct_bits_off = member->offset; |
3124 | u32 struct_size, bytes_offset; |
3125 | |
3126 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
3127 | btf_verifier_log_member(env, struct_type, member, |
3128 | fmt: "Member is not byte aligned"); |
3129 | return -EINVAL; |
3130 | } |
3131 | |
3132 | struct_size = struct_type->size; |
3133 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
3134 | if (struct_size - bytes_offset < member_type->size) { |
3135 | btf_verifier_log_member(env, struct_type, member, |
3136 | fmt: "Member exceeds struct_size"); |
3137 | return -EINVAL; |
3138 | } |
3139 | |
3140 | return 0; |
3141 | } |
3142 | |
3143 | static s32 btf_struct_check_meta(struct btf_verifier_env *env, |
3144 | const struct btf_type *t, |
3145 | u32 meta_left) |
3146 | { |
3147 | bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION; |
3148 | const struct btf_member *member; |
3149 | u32 meta_needed, last_offset; |
3150 | struct btf *btf = env->btf; |
3151 | u32 struct_size = t->size; |
3152 | u32 offset; |
3153 | u16 i; |
3154 | |
3155 | meta_needed = btf_type_vlen(t) * sizeof(*member); |
3156 | if (meta_left < meta_needed) { |
3157 | btf_verifier_log_basic(env, t, |
3158 | "meta_left:%u meta_needed:%u", |
3159 | meta_left, meta_needed); |
3160 | return -EINVAL; |
3161 | } |
3162 | |
3163 | /* struct type either no name or a valid one */ |
3164 | if (t->name_off && |
3165 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
3166 | btf_verifier_log_type(env, t, "Invalid name"); |
3167 | return -EINVAL; |
3168 | } |
3169 | |
3170 | btf_verifier_log_type(env, t, NULL); |
3171 | |
3172 | last_offset = 0; |
3173 | for_each_member(i, t, member) { |
3174 | if (!btf_name_offset_valid(btf, offset: member->name_off)) { |
3175 | btf_verifier_log_member(env, struct_type: t, member, |
3176 | fmt: "Invalid member name_offset:%u", |
3177 | member->name_off); |
3178 | return -EINVAL; |
3179 | } |
3180 | |
3181 | /* struct member either no name or a valid one */ |
3182 | if (member->name_off && |
3183 | !btf_name_valid_identifier(btf, offset: member->name_off)) { |
3184 | btf_verifier_log_member(env, struct_type: t, member, fmt: "Invalid name"); |
3185 | return -EINVAL; |
3186 | } |
3187 | /* A member cannot be in type void */ |
3188 | if (!member->type || !BTF_TYPE_ID_VALID(member->type)) { |
3189 | btf_verifier_log_member(env, struct_type: t, member, |
3190 | fmt: "Invalid type_id"); |
3191 | return -EINVAL; |
3192 | } |
3193 | |
3194 | offset = __btf_member_bit_offset(struct_type: t, member); |
3195 | if (is_union && offset) { |
3196 | btf_verifier_log_member(env, struct_type: t, member, |
3197 | fmt: "Invalid member bits_offset"); |
3198 | return -EINVAL; |
3199 | } |
3200 | |
3201 | /* |
3202 | * ">" instead of ">=" because the last member could be |
3203 | * "char a[0];" |
3204 | */ |
3205 | if (last_offset > offset) { |
3206 | btf_verifier_log_member(env, struct_type: t, member, |
3207 | fmt: "Invalid member bits_offset"); |
3208 | return -EINVAL; |
3209 | } |
3210 | |
3211 | if (BITS_ROUNDUP_BYTES(offset) > struct_size) { |
3212 | btf_verifier_log_member(env, struct_type: t, member, |
3213 | fmt: "Member bits_offset exceeds its struct size"); |
3214 | return -EINVAL; |
3215 | } |
3216 | |
3217 | btf_verifier_log_member(env, struct_type: t, member, NULL); |
3218 | last_offset = offset; |
3219 | } |
3220 | |
3221 | return meta_needed; |
3222 | } |
3223 | |
3224 | static int btf_struct_resolve(struct btf_verifier_env *env, |
3225 | const struct resolve_vertex *v) |
3226 | { |
3227 | const struct btf_member *member; |
3228 | int err; |
3229 | u16 i; |
3230 | |
3231 | /* Before continue resolving the next_member, |
3232 | * ensure the last member is indeed resolved to a |
3233 | * type with size info. |
3234 | */ |
3235 | if (v->next_member) { |
3236 | const struct btf_type *last_member_type; |
3237 | const struct btf_member *last_member; |
3238 | u32 last_member_type_id; |
3239 | |
3240 | last_member = btf_type_member(t: v->t) + v->next_member - 1; |
3241 | last_member_type_id = last_member->type; |
3242 | if (WARN_ON_ONCE(!env_type_is_resolved(env, |
3243 | last_member_type_id))) |
3244 | return -EINVAL; |
3245 | |
3246 | last_member_type = btf_type_by_id(env->btf, |
3247 | last_member_type_id); |
3248 | if (btf_type_kflag(t: v->t)) |
3249 | err = btf_type_ops(t: last_member_type)->check_kflag_member(env, v->t, |
3250 | last_member, |
3251 | last_member_type); |
3252 | else |
3253 | err = btf_type_ops(t: last_member_type)->check_member(env, v->t, |
3254 | last_member, |
3255 | last_member_type); |
3256 | if (err) |
3257 | return err; |
3258 | } |
3259 | |
3260 | for_each_member_from(i, v->next_member, v->t, member) { |
3261 | u32 member_type_id = member->type; |
3262 | const struct btf_type *member_type = btf_type_by_id(env->btf, |
3263 | member_type_id); |
3264 | |
3265 | if (btf_type_nosize_or_null(t: member_type) || |
3266 | btf_type_is_resolve_source_only(t: member_type)) { |
3267 | btf_verifier_log_member(env, struct_type: v->t, member, |
3268 | fmt: "Invalid member"); |
3269 | return -EINVAL; |
3270 | } |
3271 | |
3272 | if (!env_type_is_resolve_sink(env, next_type: member_type) && |
3273 | !env_type_is_resolved(env, type_id: member_type_id)) { |
3274 | env_stack_set_next_member(env, next_member: i + 1); |
3275 | return env_stack_push(env, t: member_type, type_id: member_type_id); |
3276 | } |
3277 | |
3278 | if (btf_type_kflag(t: v->t)) |
3279 | err = btf_type_ops(t: member_type)->check_kflag_member(env, v->t, |
3280 | member, |
3281 | member_type); |
3282 | else |
3283 | err = btf_type_ops(t: member_type)->check_member(env, v->t, |
3284 | member, |
3285 | member_type); |
3286 | if (err) |
3287 | return err; |
3288 | } |
3289 | |
3290 | env_stack_pop_resolved(env, resolved_type_id: 0, resolved_size: 0); |
3291 | |
3292 | return 0; |
3293 | } |
3294 | |
3295 | static void btf_struct_log(struct btf_verifier_env *env, |
3296 | const struct btf_type *t) |
3297 | { |
3298 | btf_verifier_log(env, fmt: "size=%u vlen=%u", t->size, btf_type_vlen(t)); |
3299 | } |
3300 | |
3301 | enum { |
3302 | BTF_FIELD_IGNORE = 0, |
3303 | BTF_FIELD_FOUND = 1, |
3304 | }; |
3305 | |
3306 | struct btf_field_info { |
3307 | enum btf_field_type type; |
3308 | u32 off; |
3309 | union { |
3310 | struct { |
3311 | u32 type_id; |
3312 | } kptr; |
3313 | struct { |
3314 | const char *node_name; |
3315 | u32 value_btf_id; |
3316 | } graph_root; |
3317 | }; |
3318 | }; |
3319 | |
3320 | static int btf_find_struct(const struct btf *btf, const struct btf_type *t, |
3321 | u32 off, int sz, enum btf_field_type field_type, |
3322 | struct btf_field_info *info) |
3323 | { |
3324 | if (!__btf_type_is_struct(t)) |
3325 | return BTF_FIELD_IGNORE; |
3326 | if (t->size != sz) |
3327 | return BTF_FIELD_IGNORE; |
3328 | info->type = field_type; |
3329 | info->off = off; |
3330 | return BTF_FIELD_FOUND; |
3331 | } |
3332 | |
3333 | static int btf_find_kptr(const struct btf *btf, const struct btf_type *t, |
3334 | u32 off, int sz, struct btf_field_info *info, u32 field_mask) |
3335 | { |
3336 | enum btf_field_type type; |
3337 | const char *tag_value; |
3338 | bool is_type_tag; |
3339 | u32 res_id; |
3340 | |
3341 | /* Permit modifiers on the pointer itself */ |
3342 | if (btf_type_is_volatile(t)) |
3343 | t = btf_type_by_id(btf, t->type); |
3344 | /* For PTR, sz is always == 8 */ |
3345 | if (!btf_type_is_ptr(t)) |
3346 | return BTF_FIELD_IGNORE; |
3347 | t = btf_type_by_id(btf, t->type); |
3348 | is_type_tag = btf_type_is_type_tag(t) && !btf_type_kflag(t); |
3349 | if (!is_type_tag) |
3350 | return BTF_FIELD_IGNORE; |
3351 | /* Reject extra tags */ |
3352 | if (btf_type_is_type_tag(t: btf_type_by_id(btf, t->type))) |
3353 | return -EINVAL; |
3354 | tag_value = __btf_name_by_offset(btf, offset: t->name_off); |
3355 | if (!strcmp("kptr_untrusted", tag_value)) |
3356 | type = BPF_KPTR_UNREF; |
3357 | else if (!strcmp("kptr", tag_value)) |
3358 | type = BPF_KPTR_REF; |
3359 | else if (!strcmp("percpu_kptr", tag_value)) |
3360 | type = BPF_KPTR_PERCPU; |
3361 | else if (!strcmp("uptr", tag_value)) |
3362 | type = BPF_UPTR; |
3363 | else |
3364 | return -EINVAL; |
3365 | |
3366 | if (!(type & field_mask)) |
3367 | return BTF_FIELD_IGNORE; |
3368 | |
3369 | /* Get the base type */ |
3370 | t = btf_type_skip_modifiers(btf, id: t->type, res_id: &res_id); |
3371 | /* Only pointer to struct is allowed */ |
3372 | if (!__btf_type_is_struct(t)) |
3373 | return -EINVAL; |
3374 | |
3375 | info->type = type; |
3376 | info->off = off; |
3377 | info->kptr.type_id = res_id; |
3378 | return BTF_FIELD_FOUND; |
3379 | } |
3380 | |
3381 | int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt, |
3382 | int comp_idx, const char *tag_key, int last_id) |
3383 | { |
3384 | int len = strlen(tag_key); |
3385 | int i, n; |
3386 | |
3387 | for (i = last_id + 1, n = btf_nr_types(btf); i < n; i++) { |
3388 | const struct btf_type *t = btf_type_by_id(btf, i); |
3389 | |
3390 | if (!btf_type_is_decl_tag(t)) |
3391 | continue; |
3392 | if (pt != btf_type_by_id(btf, t->type)) |
3393 | continue; |
3394 | if (btf_type_decl_tag(t)->component_idx != comp_idx) |
3395 | continue; |
3396 | if (strncmp(__btf_name_by_offset(btf, offset: t->name_off), tag_key, len)) |
3397 | continue; |
3398 | return i; |
3399 | } |
3400 | return -ENOENT; |
3401 | } |
3402 | |
3403 | const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt, |
3404 | int comp_idx, const char *tag_key) |
3405 | { |
3406 | const char *value = NULL; |
3407 | const struct btf_type *t; |
3408 | int len, id; |
3409 | |
3410 | id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key, last_id: 0); |
3411 | if (id < 0) |
3412 | return ERR_PTR(error: id); |
3413 | |
3414 | t = btf_type_by_id(btf, id); |
3415 | len = strlen(tag_key); |
3416 | value = __btf_name_by_offset(btf, offset: t->name_off) + len; |
3417 | |
3418 | /* Prevent duplicate entries for same type */ |
3419 | id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key, last_id: id); |
3420 | if (id >= 0) |
3421 | return ERR_PTR(error: -EEXIST); |
3422 | |
3423 | return value; |
3424 | } |
3425 | |
3426 | static int |
3427 | btf_find_graph_root(const struct btf *btf, const struct btf_type *pt, |
3428 | const struct btf_type *t, int comp_idx, u32 off, |
3429 | int sz, struct btf_field_info *info, |
3430 | enum btf_field_type head_type) |
3431 | { |
3432 | const char *node_field_name; |
3433 | const char *value_type; |
3434 | s32 id; |
3435 | |
3436 | if (!__btf_type_is_struct(t)) |
3437 | return BTF_FIELD_IGNORE; |
3438 | if (t->size != sz) |
3439 | return BTF_FIELD_IGNORE; |
3440 | value_type = btf_find_decl_tag_value(btf, pt, comp_idx, tag_key: "contains:"); |
3441 | if (IS_ERR(ptr: value_type)) |
3442 | return -EINVAL; |
3443 | node_field_name = strstr(value_type, ":"); |
3444 | if (!node_field_name) |
3445 | return -EINVAL; |
3446 | value_type = kstrndup(s: value_type, len: node_field_name - value_type, GFP_KERNEL | __GFP_NOWARN); |
3447 | if (!value_type) |
3448 | return -ENOMEM; |
3449 | id = btf_find_by_name_kind(btf, name: value_type, kind: BTF_KIND_STRUCT); |
3450 | kfree(objp: value_type); |
3451 | if (id < 0) |
3452 | return id; |
3453 | node_field_name++; |
3454 | if (str_is_empty(s: node_field_name)) |
3455 | return -EINVAL; |
3456 | info->type = head_type; |
3457 | info->off = off; |
3458 | info->graph_root.value_btf_id = id; |
3459 | info->graph_root.node_name = node_field_name; |
3460 | return BTF_FIELD_FOUND; |
3461 | } |
3462 | |
3463 | #define field_mask_test_name(field_type, field_type_str) \ |
3464 | if (field_mask & field_type && !strcmp(name, field_type_str)) { \ |
3465 | type = field_type; \ |
3466 | goto end; \ |
3467 | } |
3468 | |
3469 | static int btf_get_field_type(const struct btf *btf, const struct btf_type *var_type, |
3470 | u32 field_mask, u32 *seen_mask, |
3471 | int *align, int *sz) |
3472 | { |
3473 | int type = 0; |
3474 | const char *name = __btf_name_by_offset(btf, offset: var_type->name_off); |
3475 | |
3476 | if (field_mask & BPF_SPIN_LOCK) { |
3477 | if (!strcmp(name, "bpf_spin_lock")) { |
3478 | if (*seen_mask & BPF_SPIN_LOCK) |
3479 | return -E2BIG; |
3480 | *seen_mask |= BPF_SPIN_LOCK; |
3481 | type = BPF_SPIN_LOCK; |
3482 | goto end; |
3483 | } |
3484 | } |
3485 | if (field_mask & BPF_RES_SPIN_LOCK) { |
3486 | if (!strcmp(name, "bpf_res_spin_lock")) { |
3487 | if (*seen_mask & BPF_RES_SPIN_LOCK) |
3488 | return -E2BIG; |
3489 | *seen_mask |= BPF_RES_SPIN_LOCK; |
3490 | type = BPF_RES_SPIN_LOCK; |
3491 | goto end; |
3492 | } |
3493 | } |
3494 | if (field_mask & BPF_TIMER) { |
3495 | if (!strcmp(name, "bpf_timer")) { |
3496 | if (*seen_mask & BPF_TIMER) |
3497 | return -E2BIG; |
3498 | *seen_mask |= BPF_TIMER; |
3499 | type = BPF_TIMER; |
3500 | goto end; |
3501 | } |
3502 | } |
3503 | if (field_mask & BPF_WORKQUEUE) { |
3504 | if (!strcmp(name, "bpf_wq")) { |
3505 | if (*seen_mask & BPF_WORKQUEUE) |
3506 | return -E2BIG; |
3507 | *seen_mask |= BPF_WORKQUEUE; |
3508 | type = BPF_WORKQUEUE; |
3509 | goto end; |
3510 | } |
3511 | } |
3512 | field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head"); |
3513 | field_mask_test_name(BPF_LIST_NODE, "bpf_list_node"); |
3514 | field_mask_test_name(BPF_RB_ROOT, "bpf_rb_root"); |
3515 | field_mask_test_name(BPF_RB_NODE, "bpf_rb_node"); |
3516 | field_mask_test_name(BPF_REFCOUNT, "bpf_refcount"); |
3517 | |
3518 | /* Only return BPF_KPTR when all other types with matchable names fail */ |
3519 | if (field_mask & (BPF_KPTR | BPF_UPTR) && !__btf_type_is_struct(t: var_type)) { |
3520 | type = BPF_KPTR_REF; |
3521 | goto end; |
3522 | } |
3523 | return 0; |
3524 | end: |
3525 | *sz = btf_field_type_size(type); |
3526 | *align = btf_field_type_align(type); |
3527 | return type; |
3528 | } |
3529 | |
3530 | #undef field_mask_test_name |
3531 | |
3532 | /* Repeat a number of fields for a specified number of times. |
3533 | * |
3534 | * Copy the fields starting from the first field and repeat them for |
3535 | * repeat_cnt times. The fields are repeated by adding the offset of each |
3536 | * field with |
3537 | * (i + 1) * elem_size |
3538 | * where i is the repeat index and elem_size is the size of an element. |
3539 | */ |
3540 | static int btf_repeat_fields(struct btf_field_info *info, int info_cnt, |
3541 | u32 field_cnt, u32 repeat_cnt, u32 elem_size) |
3542 | { |
3543 | u32 i, j; |
3544 | u32 cur; |
3545 | |
3546 | /* Ensure not repeating fields that should not be repeated. */ |
3547 | for (i = 0; i < field_cnt; i++) { |
3548 | switch (info[i].type) { |
3549 | case BPF_KPTR_UNREF: |
3550 | case BPF_KPTR_REF: |
3551 | case BPF_KPTR_PERCPU: |
3552 | case BPF_UPTR: |
3553 | case BPF_LIST_HEAD: |
3554 | case BPF_RB_ROOT: |
3555 | break; |
3556 | default: |
3557 | return -EINVAL; |
3558 | } |
3559 | } |
3560 | |
3561 | /* The type of struct size or variable size is u32, |
3562 | * so the multiplication will not overflow. |
3563 | */ |
3564 | if (field_cnt * (repeat_cnt + 1) > info_cnt) |
3565 | return -E2BIG; |
3566 | |
3567 | cur = field_cnt; |
3568 | for (i = 0; i < repeat_cnt; i++) { |
3569 | memcpy(&info[cur], &info[0], field_cnt * sizeof(info[0])); |
3570 | for (j = 0; j < field_cnt; j++) |
3571 | info[cur++].off += (i + 1) * elem_size; |
3572 | } |
3573 | |
3574 | return 0; |
3575 | } |
3576 | |
3577 | static int btf_find_struct_field(const struct btf *btf, |
3578 | const struct btf_type *t, u32 field_mask, |
3579 | struct btf_field_info *info, int info_cnt, |
3580 | u32 level); |
3581 | |
3582 | /* Find special fields in the struct type of a field. |
3583 | * |
3584 | * This function is used to find fields of special types that is not a |
3585 | * global variable or a direct field of a struct type. It also handles the |
3586 | * repetition if it is the element type of an array. |
3587 | */ |
3588 | static int btf_find_nested_struct(const struct btf *btf, const struct btf_type *t, |
3589 | u32 off, u32 nelems, |
3590 | u32 field_mask, struct btf_field_info *info, |
3591 | int info_cnt, u32 level) |
3592 | { |
3593 | int ret, err, i; |
3594 | |
3595 | level++; |
3596 | if (level >= MAX_RESOLVE_DEPTH) |
3597 | return -E2BIG; |
3598 | |
3599 | ret = btf_find_struct_field(btf, t, field_mask, info, info_cnt, level); |
3600 | |
3601 | if (ret <= 0) |
3602 | return ret; |
3603 | |
3604 | /* Shift the offsets of the nested struct fields to the offsets |
3605 | * related to the container. |
3606 | */ |
3607 | for (i = 0; i < ret; i++) |
3608 | info[i].off += off; |
3609 | |
3610 | if (nelems > 1) { |
3611 | err = btf_repeat_fields(info, info_cnt, field_cnt: ret, repeat_cnt: nelems - 1, elem_size: t->size); |
3612 | if (err == 0) |
3613 | ret *= nelems; |
3614 | else |
3615 | ret = err; |
3616 | } |
3617 | |
3618 | return ret; |
3619 | } |
3620 | |
3621 | static int btf_find_field_one(const struct btf *btf, |
3622 | const struct btf_type *var, |
3623 | const struct btf_type *var_type, |
3624 | int var_idx, |
3625 | u32 off, u32 expected_size, |
3626 | u32 field_mask, u32 *seen_mask, |
3627 | struct btf_field_info *info, int info_cnt, |
3628 | u32 level) |
3629 | { |
3630 | int ret, align, sz, field_type; |
3631 | struct btf_field_info tmp; |
3632 | const struct btf_array *array; |
3633 | u32 i, nelems = 1; |
3634 | |
3635 | /* Walk into array types to find the element type and the number of |
3636 | * elements in the (flattened) array. |
3637 | */ |
3638 | for (i = 0; i < MAX_RESOLVE_DEPTH && btf_type_is_array(t: var_type); i++) { |
3639 | array = btf_array(t: var_type); |
3640 | nelems *= array->nelems; |
3641 | var_type = btf_type_by_id(btf, array->type); |
3642 | } |
3643 | if (i == MAX_RESOLVE_DEPTH) |
3644 | return -E2BIG; |
3645 | if (nelems == 0) |
3646 | return 0; |
3647 | |
3648 | field_type = btf_get_field_type(btf, var_type, |
3649 | field_mask, seen_mask, align: &align, sz: &sz); |
3650 | /* Look into variables of struct types */ |
3651 | if (!field_type && __btf_type_is_struct(t: var_type)) { |
3652 | sz = var_type->size; |
3653 | if (expected_size && expected_size != sz * nelems) |
3654 | return 0; |
3655 | ret = btf_find_nested_struct(btf, t: var_type, off, nelems, field_mask, |
3656 | info: &info[0], info_cnt, level); |
3657 | return ret; |
3658 | } |
3659 | |
3660 | if (field_type == 0) |
3661 | return 0; |
3662 | if (field_type < 0) |
3663 | return field_type; |
3664 | |
3665 | if (expected_size && expected_size != sz * nelems) |
3666 | return 0; |
3667 | if (off % align) |
3668 | return 0; |
3669 | |
3670 | switch (field_type) { |
3671 | case BPF_SPIN_LOCK: |
3672 | case BPF_RES_SPIN_LOCK: |
3673 | case BPF_TIMER: |
3674 | case BPF_WORKQUEUE: |
3675 | case BPF_LIST_NODE: |
3676 | case BPF_RB_NODE: |
3677 | case BPF_REFCOUNT: |
3678 | ret = btf_find_struct(btf, t: var_type, off, sz, field_type, |
3679 | info: info_cnt ? &info[0] : &tmp); |
3680 | if (ret < 0) |
3681 | return ret; |
3682 | break; |
3683 | case BPF_KPTR_UNREF: |
3684 | case BPF_KPTR_REF: |
3685 | case BPF_KPTR_PERCPU: |
3686 | case BPF_UPTR: |
3687 | ret = btf_find_kptr(btf, t: var_type, off, sz, |
3688 | info: info_cnt ? &info[0] : &tmp, field_mask); |
3689 | if (ret < 0) |
3690 | return ret; |
3691 | break; |
3692 | case BPF_LIST_HEAD: |
3693 | case BPF_RB_ROOT: |
3694 | ret = btf_find_graph_root(btf, pt: var, t: var_type, |
3695 | comp_idx: var_idx, off, sz, |
3696 | info: info_cnt ? &info[0] : &tmp, |
3697 | head_type: field_type); |
3698 | if (ret < 0) |
3699 | return ret; |
3700 | break; |
3701 | default: |
3702 | return -EFAULT; |
3703 | } |
3704 | |
3705 | if (ret == BTF_FIELD_IGNORE) |
3706 | return 0; |
3707 | if (!info_cnt) |
3708 | return -E2BIG; |
3709 | if (nelems > 1) { |
3710 | ret = btf_repeat_fields(info, info_cnt, field_cnt: 1, repeat_cnt: nelems - 1, elem_size: sz); |
3711 | if (ret < 0) |
3712 | return ret; |
3713 | } |
3714 | return nelems; |
3715 | } |
3716 | |
3717 | static int btf_find_struct_field(const struct btf *btf, |
3718 | const struct btf_type *t, u32 field_mask, |
3719 | struct btf_field_info *info, int info_cnt, |
3720 | u32 level) |
3721 | { |
3722 | int ret, idx = 0; |
3723 | const struct btf_member *member; |
3724 | u32 i, off, seen_mask = 0; |
3725 | |
3726 | for_each_member(i, t, member) { |
3727 | const struct btf_type *member_type = btf_type_by_id(btf, |
3728 | member->type); |
3729 | |
3730 | off = __btf_member_bit_offset(struct_type: t, member); |
3731 | if (off % 8) |
3732 | /* valid C code cannot generate such BTF */ |
3733 | return -EINVAL; |
3734 | off /= 8; |
3735 | |
3736 | ret = btf_find_field_one(btf, var: t, var_type: member_type, var_idx: i, |
3737 | off, expected_size: 0, |
3738 | field_mask, seen_mask: &seen_mask, |
3739 | info: &info[idx], info_cnt: info_cnt - idx, level); |
3740 | if (ret < 0) |
3741 | return ret; |
3742 | idx += ret; |
3743 | } |
3744 | return idx; |
3745 | } |
3746 | |
3747 | static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t, |
3748 | u32 field_mask, struct btf_field_info *info, |
3749 | int info_cnt, u32 level) |
3750 | { |
3751 | int ret, idx = 0; |
3752 | const struct btf_var_secinfo *vsi; |
3753 | u32 i, off, seen_mask = 0; |
3754 | |
3755 | for_each_vsi(i, t, vsi) { |
3756 | const struct btf_type *var = btf_type_by_id(btf, vsi->type); |
3757 | const struct btf_type *var_type = btf_type_by_id(btf, var->type); |
3758 | |
3759 | off = vsi->offset; |
3760 | ret = btf_find_field_one(btf, var, var_type, var_idx: -1, off, expected_size: vsi->size, |
3761 | field_mask, seen_mask: &seen_mask, |
3762 | info: &info[idx], info_cnt: info_cnt - idx, |
3763 | level); |
3764 | if (ret < 0) |
3765 | return ret; |
3766 | idx += ret; |
3767 | } |
3768 | return idx; |
3769 | } |
3770 | |
3771 | static int btf_find_field(const struct btf *btf, const struct btf_type *t, |
3772 | u32 field_mask, struct btf_field_info *info, |
3773 | int info_cnt) |
3774 | { |
3775 | if (__btf_type_is_struct(t)) |
3776 | return btf_find_struct_field(btf, t, field_mask, info, info_cnt, level: 0); |
3777 | else if (btf_type_is_datasec(t)) |
3778 | return btf_find_datasec_var(btf, t, field_mask, info, info_cnt, level: 0); |
3779 | return -EINVAL; |
3780 | } |
3781 | |
3782 | /* Callers have to ensure the life cycle of btf if it is program BTF */ |
3783 | static int btf_parse_kptr(const struct btf *btf, struct btf_field *field, |
3784 | struct btf_field_info *info) |
3785 | { |
3786 | struct module *mod = NULL; |
3787 | const struct btf_type *t; |
3788 | /* If a matching btf type is found in kernel or module BTFs, kptr_ref |
3789 | * is that BTF, otherwise it's program BTF |
3790 | */ |
3791 | struct btf *kptr_btf; |
3792 | int ret; |
3793 | s32 id; |
3794 | |
3795 | /* Find type in map BTF, and use it to look up the matching type |
3796 | * in vmlinux or module BTFs, by name and kind. |
3797 | */ |
3798 | t = btf_type_by_id(btf, info->kptr.type_id); |
3799 | id = bpf_find_btf_id(__btf_name_by_offset(btf, offset: t->name_off), BTF_INFO_KIND(t->info), |
3800 | &kptr_btf); |
3801 | if (id == -ENOENT) { |
3802 | /* btf_parse_kptr should only be called w/ btf = program BTF */ |
3803 | WARN_ON_ONCE(btf_is_kernel(btf)); |
3804 | |
3805 | /* Type exists only in program BTF. Assume that it's a MEM_ALLOC |
3806 | * kptr allocated via bpf_obj_new |
3807 | */ |
3808 | field->kptr.dtor = NULL; |
3809 | id = info->kptr.type_id; |
3810 | kptr_btf = (struct btf *)btf; |
3811 | goto found_dtor; |
3812 | } |
3813 | if (id < 0) |
3814 | return id; |
3815 | |
3816 | /* Find and stash the function pointer for the destruction function that |
3817 | * needs to be eventually invoked from the map free path. |
3818 | */ |
3819 | if (info->type == BPF_KPTR_REF) { |
3820 | const struct btf_type *dtor_func; |
3821 | const char *dtor_func_name; |
3822 | unsigned long addr; |
3823 | s32 dtor_btf_id; |
3824 | |
3825 | /* This call also serves as a whitelist of allowed objects that |
3826 | * can be used as a referenced pointer and be stored in a map at |
3827 | * the same time. |
3828 | */ |
3829 | dtor_btf_id = btf_find_dtor_kfunc(btf: kptr_btf, btf_id: id); |
3830 | if (dtor_btf_id < 0) { |
3831 | ret = dtor_btf_id; |
3832 | goto end_btf; |
3833 | } |
3834 | |
3835 | dtor_func = btf_type_by_id(kptr_btf, dtor_btf_id); |
3836 | if (!dtor_func) { |
3837 | ret = -ENOENT; |
3838 | goto end_btf; |
3839 | } |
3840 | |
3841 | if (btf_is_module(btf: kptr_btf)) { |
3842 | mod = btf_try_get_module(btf: kptr_btf); |
3843 | if (!mod) { |
3844 | ret = -ENXIO; |
3845 | goto end_btf; |
3846 | } |
3847 | } |
3848 | |
3849 | /* We already verified dtor_func to be btf_type_is_func |
3850 | * in register_btf_id_dtor_kfuncs. |
3851 | */ |
3852 | dtor_func_name = __btf_name_by_offset(btf: kptr_btf, offset: dtor_func->name_off); |
3853 | addr = kallsyms_lookup_name(name: dtor_func_name); |
3854 | if (!addr) { |
3855 | ret = -EINVAL; |
3856 | goto end_mod; |
3857 | } |
3858 | field->kptr.dtor = (void *)addr; |
3859 | } |
3860 | |
3861 | found_dtor: |
3862 | field->kptr.btf_id = id; |
3863 | field->kptr.btf = kptr_btf; |
3864 | field->kptr.module = mod; |
3865 | return 0; |
3866 | end_mod: |
3867 | module_put(module: mod); |
3868 | end_btf: |
3869 | btf_put(btf: kptr_btf); |
3870 | return ret; |
3871 | } |
3872 | |
3873 | static int btf_parse_graph_root(const struct btf *btf, |
3874 | struct btf_field *field, |
3875 | struct btf_field_info *info, |
3876 | const char *node_type_name, |
3877 | size_t node_type_align) |
3878 | { |
3879 | const struct btf_type *t, *n = NULL; |
3880 | const struct btf_member *member; |
3881 | u32 offset; |
3882 | int i; |
3883 | |
3884 | t = btf_type_by_id(btf, info->graph_root.value_btf_id); |
3885 | /* We've already checked that value_btf_id is a struct type. We |
3886 | * just need to figure out the offset of the list_node, and |
3887 | * verify its type. |
3888 | */ |
3889 | for_each_member(i, t, member) { |
3890 | if (strcmp(info->graph_root.node_name, |
3891 | __btf_name_by_offset(btf, offset: member->name_off))) |
3892 | continue; |
3893 | /* Invalid BTF, two members with same name */ |
3894 | if (n) |
3895 | return -EINVAL; |
3896 | n = btf_type_by_id(btf, member->type); |
3897 | if (!__btf_type_is_struct(t: n)) |
3898 | return -EINVAL; |
3899 | if (strcmp(node_type_name, __btf_name_by_offset(btf, offset: n->name_off))) |
3900 | return -EINVAL; |
3901 | offset = __btf_member_bit_offset(struct_type: n, member); |
3902 | if (offset % 8) |
3903 | return -EINVAL; |
3904 | offset /= 8; |
3905 | if (offset % node_type_align) |
3906 | return -EINVAL; |
3907 | |
3908 | field->graph_root.btf = (struct btf *)btf; |
3909 | field->graph_root.value_btf_id = info->graph_root.value_btf_id; |
3910 | field->graph_root.node_offset = offset; |
3911 | } |
3912 | if (!n) |
3913 | return -ENOENT; |
3914 | return 0; |
3915 | } |
3916 | |
3917 | static int btf_parse_list_head(const struct btf *btf, struct btf_field *field, |
3918 | struct btf_field_info *info) |
3919 | { |
3920 | return btf_parse_graph_root(btf, field, info, node_type_name: "bpf_list_node", |
3921 | node_type_align: __alignof__(struct bpf_list_node)); |
3922 | } |
3923 | |
3924 | static int btf_parse_rb_root(const struct btf *btf, struct btf_field *field, |
3925 | struct btf_field_info *info) |
3926 | { |
3927 | return btf_parse_graph_root(btf, field, info, node_type_name: "bpf_rb_node", |
3928 | node_type_align: __alignof__(struct bpf_rb_node)); |
3929 | } |
3930 | |
3931 | static int btf_field_cmp(const void *_a, const void *_b, const void *priv) |
3932 | { |
3933 | const struct btf_field *a = (const struct btf_field *)_a; |
3934 | const struct btf_field *b = (const struct btf_field *)_b; |
3935 | |
3936 | if (a->offset < b->offset) |
3937 | return -1; |
3938 | else if (a->offset > b->offset) |
3939 | return 1; |
3940 | return 0; |
3941 | } |
3942 | |
3943 | struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t, |
3944 | u32 field_mask, u32 value_size) |
3945 | { |
3946 | struct btf_field_info info_arr[BTF_FIELDS_MAX]; |
3947 | u32 next_off = 0, field_type_size; |
3948 | struct btf_record *rec; |
3949 | int ret, i, cnt; |
3950 | |
3951 | ret = btf_find_field(btf, t, field_mask, info: info_arr, ARRAY_SIZE(info_arr)); |
3952 | if (ret < 0) |
3953 | return ERR_PTR(error: ret); |
3954 | if (!ret) |
3955 | return NULL; |
3956 | |
3957 | cnt = ret; |
3958 | /* This needs to be kzalloc to zero out padding and unused fields, see |
3959 | * comment in btf_record_equal. |
3960 | */ |
3961 | rec = kzalloc(struct_size(rec, fields, cnt), GFP_KERNEL | __GFP_NOWARN); |
3962 | if (!rec) |
3963 | return ERR_PTR(error: -ENOMEM); |
3964 | |
3965 | rec->spin_lock_off = -EINVAL; |
3966 | rec->res_spin_lock_off = -EINVAL; |
3967 | rec->timer_off = -EINVAL; |
3968 | rec->wq_off = -EINVAL; |
3969 | rec->refcount_off = -EINVAL; |
3970 | for (i = 0; i < cnt; i++) { |
3971 | field_type_size = btf_field_type_size(type: info_arr[i].type); |
3972 | if (info_arr[i].off + field_type_size > value_size) { |
3973 | WARN_ONCE(1, "verifier bug off %d size %d", info_arr[i].off, value_size); |
3974 | ret = -EFAULT; |
3975 | goto end; |
3976 | } |
3977 | if (info_arr[i].off < next_off) { |
3978 | ret = -EEXIST; |
3979 | goto end; |
3980 | } |
3981 | next_off = info_arr[i].off + field_type_size; |
3982 | |
3983 | rec->field_mask |= info_arr[i].type; |
3984 | rec->fields[i].offset = info_arr[i].off; |
3985 | rec->fields[i].type = info_arr[i].type; |
3986 | rec->fields[i].size = field_type_size; |
3987 | |
3988 | switch (info_arr[i].type) { |
3989 | case BPF_SPIN_LOCK: |
3990 | WARN_ON_ONCE(rec->spin_lock_off >= 0); |
3991 | /* Cache offset for faster lookup at runtime */ |
3992 | rec->spin_lock_off = rec->fields[i].offset; |
3993 | break; |
3994 | case BPF_RES_SPIN_LOCK: |
3995 | WARN_ON_ONCE(rec->spin_lock_off >= 0); |
3996 | /* Cache offset for faster lookup at runtime */ |
3997 | rec->res_spin_lock_off = rec->fields[i].offset; |
3998 | break; |
3999 | case BPF_TIMER: |
4000 | WARN_ON_ONCE(rec->timer_off >= 0); |
4001 | /* Cache offset for faster lookup at runtime */ |
4002 | rec->timer_off = rec->fields[i].offset; |
4003 | break; |
4004 | case BPF_WORKQUEUE: |
4005 | WARN_ON_ONCE(rec->wq_off >= 0); |
4006 | /* Cache offset for faster lookup at runtime */ |
4007 | rec->wq_off = rec->fields[i].offset; |
4008 | break; |
4009 | case BPF_REFCOUNT: |
4010 | WARN_ON_ONCE(rec->refcount_off >= 0); |
4011 | /* Cache offset for faster lookup at runtime */ |
4012 | rec->refcount_off = rec->fields[i].offset; |
4013 | break; |
4014 | case BPF_KPTR_UNREF: |
4015 | case BPF_KPTR_REF: |
4016 | case BPF_KPTR_PERCPU: |
4017 | case BPF_UPTR: |
4018 | ret = btf_parse_kptr(btf, field: &rec->fields[i], info: &info_arr[i]); |
4019 | if (ret < 0) |
4020 | goto end; |
4021 | break; |
4022 | case BPF_LIST_HEAD: |
4023 | ret = btf_parse_list_head(btf, field: &rec->fields[i], info: &info_arr[i]); |
4024 | if (ret < 0) |
4025 | goto end; |
4026 | break; |
4027 | case BPF_RB_ROOT: |
4028 | ret = btf_parse_rb_root(btf, field: &rec->fields[i], info: &info_arr[i]); |
4029 | if (ret < 0) |
4030 | goto end; |
4031 | break; |
4032 | case BPF_LIST_NODE: |
4033 | case BPF_RB_NODE: |
4034 | break; |
4035 | default: |
4036 | ret = -EFAULT; |
4037 | goto end; |
4038 | } |
4039 | rec->cnt++; |
4040 | } |
4041 | |
4042 | if (rec->spin_lock_off >= 0 && rec->res_spin_lock_off >= 0) { |
4043 | ret = -EINVAL; |
4044 | goto end; |
4045 | } |
4046 | |
4047 | /* bpf_{list_head, rb_node} require bpf_spin_lock */ |
4048 | if ((btf_record_has_field(rec, type: BPF_LIST_HEAD) || |
4049 | btf_record_has_field(rec, type: BPF_RB_ROOT)) && |
4050 | (rec->spin_lock_off < 0 && rec->res_spin_lock_off < 0)) { |
4051 | ret = -EINVAL; |
4052 | goto end; |
4053 | } |
4054 | |
4055 | if (rec->refcount_off < 0 && |
4056 | btf_record_has_field(rec, type: BPF_LIST_NODE) && |
4057 | btf_record_has_field(rec, type: BPF_RB_NODE)) { |
4058 | ret = -EINVAL; |
4059 | goto end; |
4060 | } |
4061 | |
4062 | sort_r(base: rec->fields, num: rec->cnt, size: sizeof(struct btf_field), cmp_func: btf_field_cmp, |
4063 | NULL, priv: rec); |
4064 | |
4065 | return rec; |
4066 | end: |
4067 | btf_record_free(rec); |
4068 | return ERR_PTR(error: ret); |
4069 | } |
4070 | |
4071 | int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec) |
4072 | { |
4073 | int i; |
4074 | |
4075 | /* There are three types that signify ownership of some other type: |
4076 | * kptr_ref, bpf_list_head, bpf_rb_root. |
4077 | * kptr_ref only supports storing kernel types, which can't store |
4078 | * references to program allocated local types. |
4079 | * |
4080 | * Hence we only need to ensure that bpf_{list_head,rb_root} ownership |
4081 | * does not form cycles. |
4082 | */ |
4083 | if (IS_ERR_OR_NULL(ptr: rec) || !(rec->field_mask & (BPF_GRAPH_ROOT | BPF_UPTR))) |
4084 | return 0; |
4085 | for (i = 0; i < rec->cnt; i++) { |
4086 | struct btf_struct_meta *meta; |
4087 | const struct btf_type *t; |
4088 | u32 btf_id; |
4089 | |
4090 | if (rec->fields[i].type == BPF_UPTR) { |
4091 | /* The uptr only supports pinning one page and cannot |
4092 | * point to a kernel struct |
4093 | */ |
4094 | if (btf_is_kernel(btf: rec->fields[i].kptr.btf)) |
4095 | return -EINVAL; |
4096 | t = btf_type_by_id(rec->fields[i].kptr.btf, |
4097 | rec->fields[i].kptr.btf_id); |
4098 | if (!t->size) |
4099 | return -EINVAL; |
4100 | if (t->size > PAGE_SIZE) |
4101 | return -E2BIG; |
4102 | continue; |
4103 | } |
4104 | |
4105 | if (!(rec->fields[i].type & BPF_GRAPH_ROOT)) |
4106 | continue; |
4107 | btf_id = rec->fields[i].graph_root.value_btf_id; |
4108 | meta = btf_find_struct_meta(btf, btf_id); |
4109 | if (!meta) |
4110 | return -EFAULT; |
4111 | rec->fields[i].graph_root.value_rec = meta->record; |
4112 | |
4113 | /* We need to set value_rec for all root types, but no need |
4114 | * to check ownership cycle for a type unless it's also a |
4115 | * node type. |
4116 | */ |
4117 | if (!(rec->field_mask & BPF_GRAPH_NODE)) |
4118 | continue; |
4119 | |
4120 | /* We need to ensure ownership acyclicity among all types. The |
4121 | * proper way to do it would be to topologically sort all BTF |
4122 | * IDs based on the ownership edges, since there can be multiple |
4123 | * bpf_{list_head,rb_node} in a type. Instead, we use the |
4124 | * following resaoning: |
4125 | * |
4126 | * - A type can only be owned by another type in user BTF if it |
4127 | * has a bpf_{list,rb}_node. Let's call these node types. |
4128 | * - A type can only _own_ another type in user BTF if it has a |
4129 | * bpf_{list_head,rb_root}. Let's call these root types. |
4130 | * |
4131 | * We ensure that if a type is both a root and node, its |
4132 | * element types cannot be root types. |
4133 | * |
4134 | * To ensure acyclicity: |
4135 | * |
4136 | * When A is an root type but not a node, its ownership |
4137 | * chain can be: |
4138 | * A -> B -> C |
4139 | * Where: |
4140 | * - A is an root, e.g. has bpf_rb_root. |
4141 | * - B is both a root and node, e.g. has bpf_rb_node and |
4142 | * bpf_list_head. |
4143 | * - C is only an root, e.g. has bpf_list_node |
4144 | * |
4145 | * When A is both a root and node, some other type already |
4146 | * owns it in the BTF domain, hence it can not own |
4147 | * another root type through any of the ownership edges. |
4148 | * A -> B |
4149 | * Where: |
4150 | * - A is both an root and node. |
4151 | * - B is only an node. |
4152 | */ |
4153 | if (meta->record->field_mask & BPF_GRAPH_ROOT) |
4154 | return -ELOOP; |
4155 | } |
4156 | return 0; |
4157 | } |
4158 | |
4159 | static void __btf_struct_show(const struct btf *btf, const struct btf_type *t, |
4160 | u32 type_id, void *data, u8 bits_offset, |
4161 | struct btf_show *show) |
4162 | { |
4163 | const struct btf_member *member; |
4164 | void *safe_data; |
4165 | u32 i; |
4166 | |
4167 | safe_data = btf_show_start_struct_type(show, t, type_id, data); |
4168 | if (!safe_data) |
4169 | return; |
4170 | |
4171 | for_each_member(i, t, member) { |
4172 | const struct btf_type *member_type = btf_type_by_id(btf, |
4173 | member->type); |
4174 | const struct btf_kind_operations *ops; |
4175 | u32 member_offset, bitfield_size; |
4176 | u32 bytes_offset; |
4177 | u8 bits8_offset; |
4178 | |
4179 | btf_show_start_member(show, m: member); |
4180 | |
4181 | member_offset = __btf_member_bit_offset(struct_type: t, member); |
4182 | bitfield_size = __btf_member_bitfield_size(struct_type: t, member); |
4183 | bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); |
4184 | bits8_offset = BITS_PER_BYTE_MASKED(member_offset); |
4185 | if (bitfield_size) { |
4186 | safe_data = btf_show_start_type(show, t: member_type, |
4187 | type_id: member->type, |
4188 | data: data + bytes_offset); |
4189 | if (safe_data) |
4190 | btf_bitfield_show(data: safe_data, |
4191 | bits_offset: bits8_offset, |
4192 | nr_bits: bitfield_size, show); |
4193 | btf_show_end_type(show); |
4194 | } else { |
4195 | ops = btf_type_ops(t: member_type); |
4196 | ops->show(btf, member_type, member->type, |
4197 | data + bytes_offset, bits8_offset, show); |
4198 | } |
4199 | |
4200 | btf_show_end_member(show); |
4201 | } |
4202 | |
4203 | btf_show_end_struct_type(show); |
4204 | } |
4205 | |
4206 | static void btf_struct_show(const struct btf *btf, const struct btf_type *t, |
4207 | u32 type_id, void *data, u8 bits_offset, |
4208 | struct btf_show *show) |
4209 | { |
4210 | const struct btf_member *m = show->state.member; |
4211 | |
4212 | /* |
4213 | * First check if any members would be shown (are non-zero). |
4214 | * See comments above "struct btf_show" definition for more |
4215 | * details on how this works at a high-level. |
4216 | */ |
4217 | if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) { |
4218 | if (!show->state.depth_check) { |
4219 | show->state.depth_check = show->state.depth + 1; |
4220 | show->state.depth_to_show = 0; |
4221 | } |
4222 | __btf_struct_show(btf, t, type_id, data, bits_offset, show); |
4223 | /* Restore saved member data here */ |
4224 | show->state.member = m; |
4225 | if (show->state.depth_check != show->state.depth + 1) |
4226 | return; |
4227 | show->state.depth_check = 0; |
4228 | |
4229 | if (show->state.depth_to_show <= show->state.depth) |
4230 | return; |
4231 | /* |
4232 | * Reaching here indicates we have recursed and found |
4233 | * non-zero child values. |
4234 | */ |
4235 | } |
4236 | |
4237 | __btf_struct_show(btf, t, type_id, data, bits_offset, show); |
4238 | } |
4239 | |
4240 | static const struct btf_kind_operations struct_ops = { |
4241 | .check_meta = btf_struct_check_meta, |
4242 | .resolve = btf_struct_resolve, |
4243 | .check_member = btf_struct_check_member, |
4244 | .check_kflag_member = btf_generic_check_kflag_member, |
4245 | .log_details = btf_struct_log, |
4246 | .show = btf_struct_show, |
4247 | }; |
4248 | |
4249 | static int btf_enum_check_member(struct btf_verifier_env *env, |
4250 | const struct btf_type *struct_type, |
4251 | const struct btf_member *member, |
4252 | const struct btf_type *member_type) |
4253 | { |
4254 | u32 struct_bits_off = member->offset; |
4255 | u32 struct_size, bytes_offset; |
4256 | |
4257 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
4258 | btf_verifier_log_member(env, struct_type, member, |
4259 | fmt: "Member is not byte aligned"); |
4260 | return -EINVAL; |
4261 | } |
4262 | |
4263 | struct_size = struct_type->size; |
4264 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
4265 | if (struct_size - bytes_offset < member_type->size) { |
4266 | btf_verifier_log_member(env, struct_type, member, |
4267 | fmt: "Member exceeds struct_size"); |
4268 | return -EINVAL; |
4269 | } |
4270 | |
4271 | return 0; |
4272 | } |
4273 | |
4274 | static int btf_enum_check_kflag_member(struct btf_verifier_env *env, |
4275 | const struct btf_type *struct_type, |
4276 | const struct btf_member *member, |
4277 | const struct btf_type *member_type) |
4278 | { |
4279 | u32 struct_bits_off, nr_bits, bytes_end, struct_size; |
4280 | u32 int_bitsize = sizeof(int) * BITS_PER_BYTE; |
4281 | |
4282 | struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); |
4283 | nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); |
4284 | if (!nr_bits) { |
4285 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
4286 | btf_verifier_log_member(env, struct_type, member, |
4287 | fmt: "Member is not byte aligned"); |
4288 | return -EINVAL; |
4289 | } |
4290 | |
4291 | nr_bits = int_bitsize; |
4292 | } else if (nr_bits > int_bitsize) { |
4293 | btf_verifier_log_member(env, struct_type, member, |
4294 | fmt: "Invalid member bitfield_size"); |
4295 | return -EINVAL; |
4296 | } |
4297 | |
4298 | struct_size = struct_type->size; |
4299 | bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits); |
4300 | if (struct_size < bytes_end) { |
4301 | btf_verifier_log_member(env, struct_type, member, |
4302 | fmt: "Member exceeds struct_size"); |
4303 | return -EINVAL; |
4304 | } |
4305 | |
4306 | return 0; |
4307 | } |
4308 | |
4309 | static s32 btf_enum_check_meta(struct btf_verifier_env *env, |
4310 | const struct btf_type *t, |
4311 | u32 meta_left) |
4312 | { |
4313 | const struct btf_enum *enums = btf_type_enum(t); |
4314 | struct btf *btf = env->btf; |
4315 | const char *fmt_str; |
4316 | u16 i, nr_enums; |
4317 | u32 meta_needed; |
4318 | |
4319 | nr_enums = btf_type_vlen(t); |
4320 | meta_needed = nr_enums * sizeof(*enums); |
4321 | |
4322 | if (meta_left < meta_needed) { |
4323 | btf_verifier_log_basic(env, t, |
4324 | "meta_left:%u meta_needed:%u", |
4325 | meta_left, meta_needed); |
4326 | return -EINVAL; |
4327 | } |
4328 | |
4329 | if (t->size > 8 || !is_power_of_2(n: t->size)) { |
4330 | btf_verifier_log_type(env, t, "Unexpected size"); |
4331 | return -EINVAL; |
4332 | } |
4333 | |
4334 | /* enum type either no name or a valid one */ |
4335 | if (t->name_off && |
4336 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
4337 | btf_verifier_log_type(env, t, "Invalid name"); |
4338 | return -EINVAL; |
4339 | } |
4340 | |
4341 | btf_verifier_log_type(env, t, NULL); |
4342 | |
4343 | for (i = 0; i < nr_enums; i++) { |
4344 | if (!btf_name_offset_valid(btf, offset: enums[i].name_off)) { |
4345 | btf_verifier_log(env, fmt: "\tInvalid name_offset:%u", |
4346 | enums[i].name_off); |
4347 | return -EINVAL; |
4348 | } |
4349 | |
4350 | /* enum member must have a valid name */ |
4351 | if (!enums[i].name_off || |
4352 | !btf_name_valid_identifier(btf, offset: enums[i].name_off)) { |
4353 | btf_verifier_log_type(env, t, "Invalid name"); |
4354 | return -EINVAL; |
4355 | } |
4356 | |
4357 | if (env->log.level == BPF_LOG_KERNEL) |
4358 | continue; |
4359 | fmt_str = btf_type_kflag(t) ? "\t%s val=%d\n": "\t%s val=%u\n"; |
4360 | btf_verifier_log(env, fmt: fmt_str, |
4361 | __btf_name_by_offset(btf, offset: enums[i].name_off), |
4362 | enums[i].val); |
4363 | } |
4364 | |
4365 | return meta_needed; |
4366 | } |
4367 | |
4368 | static void btf_enum_log(struct btf_verifier_env *env, |
4369 | const struct btf_type *t) |
4370 | { |
4371 | btf_verifier_log(env, fmt: "size=%u vlen=%u", t->size, btf_type_vlen(t)); |
4372 | } |
4373 | |
4374 | static void btf_enum_show(const struct btf *btf, const struct btf_type *t, |
4375 | u32 type_id, void *data, u8 bits_offset, |
4376 | struct btf_show *show) |
4377 | { |
4378 | const struct btf_enum *enums = btf_type_enum(t); |
4379 | u32 i, nr_enums = btf_type_vlen(t); |
4380 | void *safe_data; |
4381 | int v; |
4382 | |
4383 | safe_data = btf_show_start_type(show, t, type_id, data); |
4384 | if (!safe_data) |
4385 | return; |
4386 | |
4387 | v = *(int *)safe_data; |
4388 | |
4389 | for (i = 0; i < nr_enums; i++) { |
4390 | if (v != enums[i].val) |
4391 | continue; |
4392 | |
4393 | btf_show_type_value(show, "%s", |
4394 | __btf_name_by_offset(btf, |
4395 | enums[i].name_off)); |
4396 | |
4397 | btf_show_end_type(show); |
4398 | return; |
4399 | } |
4400 | |
4401 | if (btf_type_kflag(t)) |
4402 | btf_show_type_value(show, "%d", v); |
4403 | else |
4404 | btf_show_type_value(show, "%u", v); |
4405 | btf_show_end_type(show); |
4406 | } |
4407 | |
4408 | static const struct btf_kind_operations enum_ops = { |
4409 | .check_meta = btf_enum_check_meta, |
4410 | .resolve = btf_df_resolve, |
4411 | .check_member = btf_enum_check_member, |
4412 | .check_kflag_member = btf_enum_check_kflag_member, |
4413 | .log_details = btf_enum_log, |
4414 | .show = btf_enum_show, |
4415 | }; |
4416 | |
4417 | static s32 btf_enum64_check_meta(struct btf_verifier_env *env, |
4418 | const struct btf_type *t, |
4419 | u32 meta_left) |
4420 | { |
4421 | const struct btf_enum64 *enums = btf_type_enum64(t); |
4422 | struct btf *btf = env->btf; |
4423 | const char *fmt_str; |
4424 | u16 i, nr_enums; |
4425 | u32 meta_needed; |
4426 | |
4427 | nr_enums = btf_type_vlen(t); |
4428 | meta_needed = nr_enums * sizeof(*enums); |
4429 | |
4430 | if (meta_left < meta_needed) { |
4431 | btf_verifier_log_basic(env, t, |
4432 | "meta_left:%u meta_needed:%u", |
4433 | meta_left, meta_needed); |
4434 | return -EINVAL; |
4435 | } |
4436 | |
4437 | if (t->size > 8 || !is_power_of_2(n: t->size)) { |
4438 | btf_verifier_log_type(env, t, "Unexpected size"); |
4439 | return -EINVAL; |
4440 | } |
4441 | |
4442 | /* enum type either no name or a valid one */ |
4443 | if (t->name_off && |
4444 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
4445 | btf_verifier_log_type(env, t, "Invalid name"); |
4446 | return -EINVAL; |
4447 | } |
4448 | |
4449 | btf_verifier_log_type(env, t, NULL); |
4450 | |
4451 | for (i = 0; i < nr_enums; i++) { |
4452 | if (!btf_name_offset_valid(btf, offset: enums[i].name_off)) { |
4453 | btf_verifier_log(env, fmt: "\tInvalid name_offset:%u", |
4454 | enums[i].name_off); |
4455 | return -EINVAL; |
4456 | } |
4457 | |
4458 | /* enum member must have a valid name */ |
4459 | if (!enums[i].name_off || |
4460 | !btf_name_valid_identifier(btf, offset: enums[i].name_off)) { |
4461 | btf_verifier_log_type(env, t, "Invalid name"); |
4462 | return -EINVAL; |
4463 | } |
4464 | |
4465 | if (env->log.level == BPF_LOG_KERNEL) |
4466 | continue; |
4467 | |
4468 | fmt_str = btf_type_kflag(t) ? "\t%s val=%lld\n": "\t%s val=%llu\n"; |
4469 | btf_verifier_log(env, fmt: fmt_str, |
4470 | __btf_name_by_offset(btf, offset: enums[i].name_off), |
4471 | btf_enum64_value(e: enums + i)); |
4472 | } |
4473 | |
4474 | return meta_needed; |
4475 | } |
4476 | |
4477 | static void btf_enum64_show(const struct btf *btf, const struct btf_type *t, |
4478 | u32 type_id, void *data, u8 bits_offset, |
4479 | struct btf_show *show) |
4480 | { |
4481 | const struct btf_enum64 *enums = btf_type_enum64(t); |
4482 | u32 i, nr_enums = btf_type_vlen(t); |
4483 | void *safe_data; |
4484 | s64 v; |
4485 | |
4486 | safe_data = btf_show_start_type(show, t, type_id, data); |
4487 | if (!safe_data) |
4488 | return; |
4489 | |
4490 | v = *(u64 *)safe_data; |
4491 | |
4492 | for (i = 0; i < nr_enums; i++) { |
4493 | if (v != btf_enum64_value(e: enums + i)) |
4494 | continue; |
4495 | |
4496 | btf_show_type_value(show, "%s", |
4497 | __btf_name_by_offset(btf, |
4498 | enums[i].name_off)); |
4499 | |
4500 | btf_show_end_type(show); |
4501 | return; |
4502 | } |
4503 | |
4504 | if (btf_type_kflag(t)) |
4505 | btf_show_type_value(show, "%lld", v); |
4506 | else |
4507 | btf_show_type_value(show, "%llu", v); |
4508 | btf_show_end_type(show); |
4509 | } |
4510 | |
4511 | static const struct btf_kind_operations enum64_ops = { |
4512 | .check_meta = btf_enum64_check_meta, |
4513 | .resolve = btf_df_resolve, |
4514 | .check_member = btf_enum_check_member, |
4515 | .check_kflag_member = btf_enum_check_kflag_member, |
4516 | .log_details = btf_enum_log, |
4517 | .show = btf_enum64_show, |
4518 | }; |
4519 | |
4520 | static s32 btf_func_proto_check_meta(struct btf_verifier_env *env, |
4521 | const struct btf_type *t, |
4522 | u32 meta_left) |
4523 | { |
4524 | u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param); |
4525 | |
4526 | if (meta_left < meta_needed) { |
4527 | btf_verifier_log_basic(env, t, |
4528 | "meta_left:%u meta_needed:%u", |
4529 | meta_left, meta_needed); |
4530 | return -EINVAL; |
4531 | } |
4532 | |
4533 | if (t->name_off) { |
4534 | btf_verifier_log_type(env, t, "Invalid name"); |
4535 | return -EINVAL; |
4536 | } |
4537 | |
4538 | if (btf_type_kflag(t)) { |
4539 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); |
4540 | return -EINVAL; |
4541 | } |
4542 | |
4543 | btf_verifier_log_type(env, t, NULL); |
4544 | |
4545 | return meta_needed; |
4546 | } |
4547 | |
4548 | static void btf_func_proto_log(struct btf_verifier_env *env, |
4549 | const struct btf_type *t) |
4550 | { |
4551 | const struct btf_param *args = (const struct btf_param *)(t + 1); |
4552 | u16 nr_args = btf_type_vlen(t), i; |
4553 | |
4554 | btf_verifier_log(env, fmt: "return=%u args=(", t->type); |
4555 | if (!nr_args) { |
4556 | btf_verifier_log(env, fmt: "void"); |
4557 | goto done; |
4558 | } |
4559 | |
4560 | if (nr_args == 1 && !args[0].type) { |
4561 | /* Only one vararg */ |
4562 | btf_verifier_log(env, fmt: "vararg"); |
4563 | goto done; |
4564 | } |
4565 | |
4566 | btf_verifier_log(env, fmt: "%u %s", args[0].type, |
4567 | __btf_name_by_offset(btf: env->btf, |
4568 | offset: args[0].name_off)); |
4569 | for (i = 1; i < nr_args - 1; i++) |
4570 | btf_verifier_log(env, fmt: ", %u %s", args[i].type, |
4571 | __btf_name_by_offset(btf: env->btf, |
4572 | offset: args[i].name_off)); |
4573 | |
4574 | if (nr_args > 1) { |
4575 | const struct btf_param *last_arg = &args[nr_args - 1]; |
4576 | |
4577 | if (last_arg->type) |
4578 | btf_verifier_log(env, fmt: ", %u %s", last_arg->type, |
4579 | __btf_name_by_offset(btf: env->btf, |
4580 | offset: last_arg->name_off)); |
4581 | else |
4582 | btf_verifier_log(env, fmt: ", vararg"); |
4583 | } |
4584 | |
4585 | done: |
4586 | btf_verifier_log(env, fmt: ")"); |
4587 | } |
4588 | |
4589 | static const struct btf_kind_operations func_proto_ops = { |
4590 | .check_meta = btf_func_proto_check_meta, |
4591 | .resolve = btf_df_resolve, |
4592 | /* |
4593 | * BTF_KIND_FUNC_PROTO cannot be directly referred by |
4594 | * a struct's member. |
4595 | * |
4596 | * It should be a function pointer instead. |
4597 | * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO) |
4598 | * |
4599 | * Hence, there is no btf_func_check_member(). |
4600 | */ |
4601 | .check_member = btf_df_check_member, |
4602 | .check_kflag_member = btf_df_check_kflag_member, |
4603 | .log_details = btf_func_proto_log, |
4604 | .show = btf_df_show, |
4605 | }; |
4606 | |
4607 | static s32 btf_func_check_meta(struct btf_verifier_env *env, |
4608 | const struct btf_type *t, |
4609 | u32 meta_left) |
4610 | { |
4611 | if (!t->name_off || |
4612 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
4613 | btf_verifier_log_type(env, t, "Invalid name"); |
4614 | return -EINVAL; |
4615 | } |
4616 | |
4617 | if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) { |
4618 | btf_verifier_log_type(env, t, "Invalid func linkage"); |
4619 | return -EINVAL; |
4620 | } |
4621 | |
4622 | if (btf_type_kflag(t)) { |
4623 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); |
4624 | return -EINVAL; |
4625 | } |
4626 | |
4627 | btf_verifier_log_type(env, t, NULL); |
4628 | |
4629 | return 0; |
4630 | } |
4631 | |
4632 | static int btf_func_resolve(struct btf_verifier_env *env, |
4633 | const struct resolve_vertex *v) |
4634 | { |
4635 | const struct btf_type *t = v->t; |
4636 | u32 next_type_id = t->type; |
4637 | int err; |
4638 | |
4639 | err = btf_func_check(env, t); |
4640 | if (err) |
4641 | return err; |
4642 | |
4643 | env_stack_pop_resolved(env, resolved_type_id: next_type_id, resolved_size: 0); |
4644 | return 0; |
4645 | } |
4646 | |
4647 | static const struct btf_kind_operations func_ops = { |
4648 | .check_meta = btf_func_check_meta, |
4649 | .resolve = btf_func_resolve, |
4650 | .check_member = btf_df_check_member, |
4651 | .check_kflag_member = btf_df_check_kflag_member, |
4652 | .log_details = btf_ref_type_log, |
4653 | .show = btf_df_show, |
4654 | }; |
4655 | |
4656 | static s32 btf_var_check_meta(struct btf_verifier_env *env, |
4657 | const struct btf_type *t, |
4658 | u32 meta_left) |
4659 | { |
4660 | const struct btf_var *var; |
4661 | u32 meta_needed = sizeof(*var); |
4662 | |
4663 | if (meta_left < meta_needed) { |
4664 | btf_verifier_log_basic(env, t, |
4665 | "meta_left:%u meta_needed:%u", |
4666 | meta_left, meta_needed); |
4667 | return -EINVAL; |
4668 | } |
4669 | |
4670 | if (btf_type_vlen(t)) { |
4671 | btf_verifier_log_type(env, t, "vlen != 0"); |
4672 | return -EINVAL; |
4673 | } |
4674 | |
4675 | if (btf_type_kflag(t)) { |
4676 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); |
4677 | return -EINVAL; |
4678 | } |
4679 | |
4680 | if (!t->name_off || |
4681 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
4682 | btf_verifier_log_type(env, t, "Invalid name"); |
4683 | return -EINVAL; |
4684 | } |
4685 | |
4686 | /* A var cannot be in type void */ |
4687 | if (!t->type || !BTF_TYPE_ID_VALID(t->type)) { |
4688 | btf_verifier_log_type(env, t, "Invalid type_id"); |
4689 | return -EINVAL; |
4690 | } |
4691 | |
4692 | var = btf_type_var(t); |
4693 | if (var->linkage != BTF_VAR_STATIC && |
4694 | var->linkage != BTF_VAR_GLOBAL_ALLOCATED) { |
4695 | btf_verifier_log_type(env, t, "Linkage not supported"); |
4696 | return -EINVAL; |
4697 | } |
4698 | |
4699 | btf_verifier_log_type(env, t, NULL); |
4700 | |
4701 | return meta_needed; |
4702 | } |
4703 | |
4704 | static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t) |
4705 | { |
4706 | const struct btf_var *var = btf_type_var(t); |
4707 | |
4708 | btf_verifier_log(env, fmt: "type_id=%u linkage=%u", t->type, var->linkage); |
4709 | } |
4710 | |
4711 | static const struct btf_kind_operations var_ops = { |
4712 | .check_meta = btf_var_check_meta, |
4713 | .resolve = btf_var_resolve, |
4714 | .check_member = btf_df_check_member, |
4715 | .check_kflag_member = btf_df_check_kflag_member, |
4716 | .log_details = btf_var_log, |
4717 | .show = btf_var_show, |
4718 | }; |
4719 | |
4720 | static s32 btf_datasec_check_meta(struct btf_verifier_env *env, |
4721 | const struct btf_type *t, |
4722 | u32 meta_left) |
4723 | { |
4724 | const struct btf_var_secinfo *vsi; |
4725 | u64 last_vsi_end_off = 0, sum = 0; |
4726 | u32 i, meta_needed; |
4727 | |
4728 | meta_needed = btf_type_vlen(t) * sizeof(*vsi); |
4729 | if (meta_left < meta_needed) { |
4730 | btf_verifier_log_basic(env, t, |
4731 | "meta_left:%u meta_needed:%u", |
4732 | meta_left, meta_needed); |
4733 | return -EINVAL; |
4734 | } |
4735 | |
4736 | if (!t->size) { |
4737 | btf_verifier_log_type(env, t, "size == 0"); |
4738 | return -EINVAL; |
4739 | } |
4740 | |
4741 | if (btf_type_kflag(t)) { |
4742 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); |
4743 | return -EINVAL; |
4744 | } |
4745 | |
4746 | if (!t->name_off || |
4747 | !btf_name_valid_section(btf: env->btf, offset: t->name_off)) { |
4748 | btf_verifier_log_type(env, t, "Invalid name"); |
4749 | return -EINVAL; |
4750 | } |
4751 | |
4752 | btf_verifier_log_type(env, t, NULL); |
4753 | |
4754 | for_each_vsi(i, t, vsi) { |
4755 | /* A var cannot be in type void */ |
4756 | if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) { |
4757 | btf_verifier_log_vsi(env, datasec_type: t, vsi, |
4758 | fmt: "Invalid type_id"); |
4759 | return -EINVAL; |
4760 | } |
4761 | |
4762 | if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) { |
4763 | btf_verifier_log_vsi(env, datasec_type: t, vsi, |
4764 | fmt: "Invalid offset"); |
4765 | return -EINVAL; |
4766 | } |
4767 | |
4768 | if (!vsi->size || vsi->size > t->size) { |
4769 | btf_verifier_log_vsi(env, datasec_type: t, vsi, |
4770 | fmt: "Invalid size"); |
4771 | return -EINVAL; |
4772 | } |
4773 | |
4774 | last_vsi_end_off = vsi->offset + vsi->size; |
4775 | if (last_vsi_end_off > t->size) { |
4776 | btf_verifier_log_vsi(env, datasec_type: t, vsi, |
4777 | fmt: "Invalid offset+size"); |
4778 | return -EINVAL; |
4779 | } |
4780 | |
4781 | btf_verifier_log_vsi(env, datasec_type: t, vsi, NULL); |
4782 | sum += vsi->size; |
4783 | } |
4784 | |
4785 | if (t->size < sum) { |
4786 | btf_verifier_log_type(env, t, "Invalid btf_info size"); |
4787 | return -EINVAL; |
4788 | } |
4789 | |
4790 | return meta_needed; |
4791 | } |
4792 | |
4793 | static int btf_datasec_resolve(struct btf_verifier_env *env, |
4794 | const struct resolve_vertex *v) |
4795 | { |
4796 | const struct btf_var_secinfo *vsi; |
4797 | struct btf *btf = env->btf; |
4798 | u16 i; |
4799 | |
4800 | env->resolve_mode = RESOLVE_TBD; |
4801 | for_each_vsi_from(i, v->next_member, v->t, vsi) { |
4802 | u32 var_type_id = vsi->type, type_id, type_size = 0; |
4803 | const struct btf_type *var_type = btf_type_by_id(env->btf, |
4804 | var_type_id); |
4805 | if (!var_type || !btf_type_is_var(t: var_type)) { |
4806 | btf_verifier_log_vsi(env, datasec_type: v->t, vsi, |
4807 | fmt: "Not a VAR kind member"); |
4808 | return -EINVAL; |
4809 | } |
4810 | |
4811 | if (!env_type_is_resolve_sink(env, next_type: var_type) && |
4812 | !env_type_is_resolved(env, type_id: var_type_id)) { |
4813 | env_stack_set_next_member(env, next_member: i + 1); |
4814 | return env_stack_push(env, t: var_type, type_id: var_type_id); |
4815 | } |
4816 | |
4817 | type_id = var_type->type; |
4818 | if (!btf_type_id_size(btf, type_id: &type_id, ret_size: &type_size)) { |
4819 | btf_verifier_log_vsi(env, datasec_type: v->t, vsi, fmt: "Invalid type"); |
4820 | return -EINVAL; |
4821 | } |
4822 | |
4823 | if (vsi->size < type_size) { |
4824 | btf_verifier_log_vsi(env, datasec_type: v->t, vsi, fmt: "Invalid size"); |
4825 | return -EINVAL; |
4826 | } |
4827 | } |
4828 | |
4829 | env_stack_pop_resolved(env, resolved_type_id: 0, resolved_size: 0); |
4830 | return 0; |
4831 | } |
4832 | |
4833 | static void btf_datasec_log(struct btf_verifier_env *env, |
4834 | const struct btf_type *t) |
4835 | { |
4836 | btf_verifier_log(env, fmt: "size=%u vlen=%u", t->size, btf_type_vlen(t)); |
4837 | } |
4838 | |
4839 | static void btf_datasec_show(const struct btf *btf, |
4840 | const struct btf_type *t, u32 type_id, |
4841 | void *data, u8 bits_offset, |
4842 | struct btf_show *show) |
4843 | { |
4844 | const struct btf_var_secinfo *vsi; |
4845 | const struct btf_type *var; |
4846 | u32 i; |
4847 | |
4848 | if (!btf_show_start_type(show, t, type_id, data)) |
4849 | return; |
4850 | |
4851 | btf_show_type_value(show, "section (\"%s\") = {", |
4852 | __btf_name_by_offset(btf, t->name_off)); |
4853 | for_each_vsi(i, t, vsi) { |
4854 | var = btf_type_by_id(btf, vsi->type); |
4855 | if (i) |
4856 | btf_show(show, fmt: ","); |
4857 | btf_type_ops(t: var)->show(btf, var, vsi->type, |
4858 | data + vsi->offset, bits_offset, show); |
4859 | } |
4860 | btf_show_end_type(show); |
4861 | } |
4862 | |
4863 | static const struct btf_kind_operations datasec_ops = { |
4864 | .check_meta = btf_datasec_check_meta, |
4865 | .resolve = btf_datasec_resolve, |
4866 | .check_member = btf_df_check_member, |
4867 | .check_kflag_member = btf_df_check_kflag_member, |
4868 | .log_details = btf_datasec_log, |
4869 | .show = btf_datasec_show, |
4870 | }; |
4871 | |
4872 | static s32 btf_float_check_meta(struct btf_verifier_env *env, |
4873 | const struct btf_type *t, |
4874 | u32 meta_left) |
4875 | { |
4876 | if (btf_type_vlen(t)) { |
4877 | btf_verifier_log_type(env, t, "vlen != 0"); |
4878 | return -EINVAL; |
4879 | } |
4880 | |
4881 | if (btf_type_kflag(t)) { |
4882 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); |
4883 | return -EINVAL; |
4884 | } |
4885 | |
4886 | if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 && |
4887 | t->size != 16) { |
4888 | btf_verifier_log_type(env, t, "Invalid type_size"); |
4889 | return -EINVAL; |
4890 | } |
4891 | |
4892 | btf_verifier_log_type(env, t, NULL); |
4893 | |
4894 | return 0; |
4895 | } |
4896 | |
4897 | static int btf_float_check_member(struct btf_verifier_env *env, |
4898 | const struct btf_type *struct_type, |
4899 | const struct btf_member *member, |
4900 | const struct btf_type *member_type) |
4901 | { |
4902 | u64 start_offset_bytes; |
4903 | u64 end_offset_bytes; |
4904 | u64 misalign_bits; |
4905 | u64 align_bytes; |
4906 | u64 align_bits; |
4907 | |
4908 | /* Different architectures have different alignment requirements, so |
4909 | * here we check only for the reasonable minimum. This way we ensure |
4910 | * that types after CO-RE can pass the kernel BTF verifier. |
4911 | */ |
4912 | align_bytes = min_t(u64, sizeof(void *), member_type->size); |
4913 | align_bits = align_bytes * BITS_PER_BYTE; |
4914 | div64_u64_rem(dividend: member->offset, divisor: align_bits, remainder: &misalign_bits); |
4915 | if (misalign_bits) { |
4916 | btf_verifier_log_member(env, struct_type, member, |
4917 | fmt: "Member is not properly aligned"); |
4918 | return -EINVAL; |
4919 | } |
4920 | |
4921 | start_offset_bytes = member->offset / BITS_PER_BYTE; |
4922 | end_offset_bytes = start_offset_bytes + member_type->size; |
4923 | if (end_offset_bytes > struct_type->size) { |
4924 | btf_verifier_log_member(env, struct_type, member, |
4925 | fmt: "Member exceeds struct_size"); |
4926 | return -EINVAL; |
4927 | } |
4928 | |
4929 | return 0; |
4930 | } |
4931 | |
4932 | static void btf_float_log(struct btf_verifier_env *env, |
4933 | const struct btf_type *t) |
4934 | { |
4935 | btf_verifier_log(env, fmt: "size=%u", t->size); |
4936 | } |
4937 | |
4938 | static const struct btf_kind_operations float_ops = { |
4939 | .check_meta = btf_float_check_meta, |
4940 | .resolve = btf_df_resolve, |
4941 | .check_member = btf_float_check_member, |
4942 | .check_kflag_member = btf_generic_check_kflag_member, |
4943 | .log_details = btf_float_log, |
4944 | .show = btf_df_show, |
4945 | }; |
4946 | |
4947 | static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env, |
4948 | const struct btf_type *t, |
4949 | u32 meta_left) |
4950 | { |
4951 | const struct btf_decl_tag *tag; |
4952 | u32 meta_needed = sizeof(*tag); |
4953 | s32 component_idx; |
4954 | const char *value; |
4955 | |
4956 | if (meta_left < meta_needed) { |
4957 | btf_verifier_log_basic(env, t, |
4958 | "meta_left:%u meta_needed:%u", |
4959 | meta_left, meta_needed); |
4960 | return -EINVAL; |
4961 | } |
4962 | |
4963 | value = btf_name_by_offset(btf: env->btf, offset: t->name_off); |
4964 | if (!value || !value[0]) { |
4965 | btf_verifier_log_type(env, t, "Invalid value"); |
4966 | return -EINVAL; |
4967 | } |
4968 | |
4969 | if (btf_type_vlen(t)) { |
4970 | btf_verifier_log_type(env, t, "vlen != 0"); |
4971 | return -EINVAL; |
4972 | } |
4973 | |
4974 | component_idx = btf_type_decl_tag(t)->component_idx; |
4975 | if (component_idx < -1) { |
4976 | btf_verifier_log_type(env, t, "Invalid component_idx"); |
4977 | return -EINVAL; |
4978 | } |
4979 | |
4980 | btf_verifier_log_type(env, t, NULL); |
4981 | |
4982 | return meta_needed; |
4983 | } |
4984 | |
4985 | static int btf_decl_tag_resolve(struct btf_verifier_env *env, |
4986 | const struct resolve_vertex *v) |
4987 | { |
4988 | const struct btf_type *next_type; |
4989 | const struct btf_type *t = v->t; |
4990 | u32 next_type_id = t->type; |
4991 | struct btf *btf = env->btf; |
4992 | s32 component_idx; |
4993 | u32 vlen; |
4994 | |
4995 | next_type = btf_type_by_id(btf, next_type_id); |
4996 | if (!next_type || !btf_type_is_decl_tag_target(t: next_type)) { |
4997 | btf_verifier_log_type(env, v->t, "Invalid type_id"); |
4998 | return -EINVAL; |
4999 | } |
5000 | |
5001 | if (!env_type_is_resolve_sink(env, next_type) && |
5002 | !env_type_is_resolved(env, type_id: next_type_id)) |
5003 | return env_stack_push(env, t: next_type, type_id: next_type_id); |
5004 | |
5005 | component_idx = btf_type_decl_tag(t)->component_idx; |
5006 | if (component_idx != -1) { |
5007 | if (btf_type_is_var(t: next_type) || btf_type_is_typedef(t: next_type)) { |
5008 | btf_verifier_log_type(env, v->t, "Invalid component_idx"); |
5009 | return -EINVAL; |
5010 | } |
5011 | |
5012 | if (btf_type_is_struct(t: next_type)) { |
5013 | vlen = btf_type_vlen(t: next_type); |
5014 | } else { |
5015 | /* next_type should be a function */ |
5016 | next_type = btf_type_by_id(btf, next_type->type); |
5017 | vlen = btf_type_vlen(t: next_type); |
5018 | } |
5019 | |
5020 | if ((u32)component_idx >= vlen) { |
5021 | btf_verifier_log_type(env, v->t, "Invalid component_idx"); |
5022 | return -EINVAL; |
5023 | } |
5024 | } |
5025 | |
5026 | env_stack_pop_resolved(env, resolved_type_id: next_type_id, resolved_size: 0); |
5027 | |
5028 | return 0; |
5029 | } |
5030 | |
5031 | static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t) |
5032 | { |
5033 | btf_verifier_log(env, fmt: "type=%u component_idx=%d", t->type, |
5034 | btf_type_decl_tag(t)->component_idx); |
5035 | } |
5036 | |
5037 | static const struct btf_kind_operations decl_tag_ops = { |
5038 | .check_meta = btf_decl_tag_check_meta, |
5039 | .resolve = btf_decl_tag_resolve, |
5040 | .check_member = btf_df_check_member, |
5041 | .check_kflag_member = btf_df_check_kflag_member, |
5042 | .log_details = btf_decl_tag_log, |
5043 | .show = btf_df_show, |
5044 | }; |
5045 | |
5046 | static int btf_func_proto_check(struct btf_verifier_env *env, |
5047 | const struct btf_type *t) |
5048 | { |
5049 | const struct btf_type *ret_type; |
5050 | const struct btf_param *args; |
5051 | const struct btf *btf; |
5052 | u16 nr_args, i; |
5053 | int err; |
5054 | |
5055 | btf = env->btf; |
5056 | args = (const struct btf_param *)(t + 1); |
5057 | nr_args = btf_type_vlen(t); |
5058 | |
5059 | /* Check func return type which could be "void" (t->type == 0) */ |
5060 | if (t->type) { |
5061 | u32 ret_type_id = t->type; |
5062 | |
5063 | ret_type = btf_type_by_id(btf, ret_type_id); |
5064 | if (!ret_type) { |
5065 | btf_verifier_log_type(env, t, "Invalid return type"); |
5066 | return -EINVAL; |
5067 | } |
5068 | |
5069 | if (btf_type_is_resolve_source_only(t: ret_type)) { |
5070 | btf_verifier_log_type(env, t, "Invalid return type"); |
5071 | return -EINVAL; |
5072 | } |
5073 | |
5074 | if (btf_type_needs_resolve(t: ret_type) && |
5075 | !env_type_is_resolved(env, type_id: ret_type_id)) { |
5076 | err = btf_resolve(env, t: ret_type, type_id: ret_type_id); |
5077 | if (err) |
5078 | return err; |
5079 | } |
5080 | |
5081 | /* Ensure the return type is a type that has a size */ |
5082 | if (!btf_type_id_size(btf, type_id: &ret_type_id, NULL)) { |
5083 | btf_verifier_log_type(env, t, "Invalid return type"); |
5084 | return -EINVAL; |
5085 | } |
5086 | } |
5087 | |
5088 | if (!nr_args) |
5089 | return 0; |
5090 | |
5091 | /* Last func arg type_id could be 0 if it is a vararg */ |
5092 | if (!args[nr_args - 1].type) { |
5093 | if (args[nr_args - 1].name_off) { |
5094 | btf_verifier_log_type(env, t, "Invalid arg#%u", |
5095 | nr_args); |
5096 | return -EINVAL; |
5097 | } |
5098 | nr_args--; |
5099 | } |
5100 | |
5101 | for (i = 0; i < nr_args; i++) { |
5102 | const struct btf_type *arg_type; |
5103 | u32 arg_type_id; |
5104 | |
5105 | arg_type_id = args[i].type; |
5106 | arg_type = btf_type_by_id(btf, arg_type_id); |
5107 | if (!arg_type) { |
5108 | btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); |
5109 | return -EINVAL; |
5110 | } |
5111 | |
5112 | if (btf_type_is_resolve_source_only(t: arg_type)) { |
5113 | btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); |
5114 | return -EINVAL; |
5115 | } |
5116 | |
5117 | if (args[i].name_off && |
5118 | (!btf_name_offset_valid(btf, offset: args[i].name_off) || |
5119 | !btf_name_valid_identifier(btf, offset: args[i].name_off))) { |
5120 | btf_verifier_log_type(env, t, |
5121 | "Invalid arg#%u", i + 1); |
5122 | return -EINVAL; |
5123 | } |
5124 | |
5125 | if (btf_type_needs_resolve(t: arg_type) && |
5126 | !env_type_is_resolved(env, type_id: arg_type_id)) { |
5127 | err = btf_resolve(env, t: arg_type, type_id: arg_type_id); |
5128 | if (err) |
5129 | return err; |
5130 | } |
5131 | |
5132 | if (!btf_type_id_size(btf, type_id: &arg_type_id, NULL)) { |
5133 | btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); |
5134 | return -EINVAL; |
5135 | } |
5136 | } |
5137 | |
5138 | return 0; |
5139 | } |
5140 | |
5141 | static int btf_func_check(struct btf_verifier_env *env, |
5142 | const struct btf_type *t) |
5143 | { |
5144 | const struct btf_type *proto_type; |
5145 | const struct btf_param *args; |
5146 | const struct btf *btf; |
5147 | u16 nr_args, i; |
5148 | |
5149 | btf = env->btf; |
5150 | proto_type = btf_type_by_id(btf, t->type); |
5151 | |
5152 | if (!proto_type || !btf_type_is_func_proto(t: proto_type)) { |
5153 | btf_verifier_log_type(env, t, "Invalid type_id"); |
5154 | return -EINVAL; |
5155 | } |
5156 | |
5157 | args = (const struct btf_param *)(proto_type + 1); |
5158 | nr_args = btf_type_vlen(t: proto_type); |
5159 | for (i = 0; i < nr_args; i++) { |
5160 | if (!args[i].name_off && args[i].type) { |
5161 | btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); |
5162 | return -EINVAL; |
5163 | } |
5164 | } |
5165 | |
5166 | return 0; |
5167 | } |
5168 | |
5169 | static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = { |
5170 | [BTF_KIND_INT] = &int_ops, |
5171 | [BTF_KIND_PTR] = &ptr_ops, |
5172 | [BTF_KIND_ARRAY] = &array_ops, |
5173 | [BTF_KIND_STRUCT] = &struct_ops, |
5174 | [BTF_KIND_UNION] = &struct_ops, |
5175 | [BTF_KIND_ENUM] = &enum_ops, |
5176 | [BTF_KIND_FWD] = &fwd_ops, |
5177 | [BTF_KIND_TYPEDEF] = &modifier_ops, |
5178 | [BTF_KIND_VOLATILE] = &modifier_ops, |
5179 | [BTF_KIND_CONST] = &modifier_ops, |
5180 | [BTF_KIND_RESTRICT] = &modifier_ops, |
5181 | [BTF_KIND_FUNC] = &func_ops, |
5182 | [BTF_KIND_FUNC_PROTO] = &func_proto_ops, |
5183 | [BTF_KIND_VAR] = &var_ops, |
5184 | [BTF_KIND_DATASEC] = &datasec_ops, |
5185 | [BTF_KIND_FLOAT] = &float_ops, |
5186 | [BTF_KIND_DECL_TAG] = &decl_tag_ops, |
5187 | [BTF_KIND_TYPE_TAG] = &modifier_ops, |
5188 | [BTF_KIND_ENUM64] = &enum64_ops, |
5189 | }; |
5190 | |
5191 | static s32 btf_check_meta(struct btf_verifier_env *env, |
5192 | const struct btf_type *t, |
5193 | u32 meta_left) |
5194 | { |
5195 | u32 saved_meta_left = meta_left; |
5196 | s32 var_meta_size; |
5197 | |
5198 | if (meta_left < sizeof(*t)) { |
5199 | btf_verifier_log(env, fmt: "[%u] meta_left:%u meta_needed:%zu", |
5200 | env->log_type_id, meta_left, sizeof(*t)); |
5201 | return -EINVAL; |
5202 | } |
5203 | meta_left -= sizeof(*t); |
5204 | |
5205 | if (t->info & ~BTF_INFO_MASK) { |
5206 | btf_verifier_log(env, fmt: "[%u] Invalid btf_info:%x", |
5207 | env->log_type_id, t->info); |
5208 | return -EINVAL; |
5209 | } |
5210 | |
5211 | if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX || |
5212 | BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) { |
5213 | btf_verifier_log(env, fmt: "[%u] Invalid kind:%u", |
5214 | env->log_type_id, BTF_INFO_KIND(t->info)); |
5215 | return -EINVAL; |
5216 | } |
5217 | |
5218 | if (!btf_name_offset_valid(btf: env->btf, offset: t->name_off)) { |
5219 | btf_verifier_log(env, fmt: "[%u] Invalid name_offset:%u", |
5220 | env->log_type_id, t->name_off); |
5221 | return -EINVAL; |
5222 | } |
5223 | |
5224 | var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left); |
5225 | if (var_meta_size < 0) |
5226 | return var_meta_size; |
5227 | |
5228 | meta_left -= var_meta_size; |
5229 | |
5230 | return saved_meta_left - meta_left; |
5231 | } |
5232 | |
5233 | static int btf_check_all_metas(struct btf_verifier_env *env) |
5234 | { |
5235 | struct btf *btf = env->btf; |
5236 | struct btf_header *hdr; |
5237 | void *cur, *end; |
5238 | |
5239 | hdr = &btf->hdr; |
5240 | cur = btf->nohdr_data + hdr->type_off; |
5241 | end = cur + hdr->type_len; |
5242 | |
5243 | env->log_type_id = btf->base_btf ? btf->start_id : 1; |
5244 | while (cur < end) { |
5245 | struct btf_type *t = cur; |
5246 | s32 meta_size; |
5247 | |
5248 | meta_size = btf_check_meta(env, t, meta_left: end - cur); |
5249 | if (meta_size < 0) |
5250 | return meta_size; |
5251 | |
5252 | btf_add_type(env, t); |
5253 | cur += meta_size; |
5254 | env->log_type_id++; |
5255 | } |
5256 | |
5257 | return 0; |
5258 | } |
5259 | |
5260 | static bool btf_resolve_valid(struct btf_verifier_env *env, |
5261 | const struct btf_type *t, |
5262 | u32 type_id) |
5263 | { |
5264 | struct btf *btf = env->btf; |
5265 | |
5266 | if (!env_type_is_resolved(env, type_id)) |
5267 | return false; |
5268 | |
5269 | if (btf_type_is_struct(t) || btf_type_is_datasec(t)) |
5270 | return !btf_resolved_type_id(btf, type_id) && |
5271 | !btf_resolved_type_size(btf, type_id); |
5272 | |
5273 | if (btf_type_is_decl_tag(t) || btf_type_is_func(t)) |
5274 | return btf_resolved_type_id(btf, type_id) && |
5275 | !btf_resolved_type_size(btf, type_id); |
5276 | |
5277 | if (btf_type_is_modifier(t) || btf_type_is_ptr(t) || |
5278 | btf_type_is_var(t)) { |
5279 | t = btf_type_id_resolve(btf, type_id: &type_id); |
5280 | return t && |
5281 | !btf_type_is_modifier(t) && |
5282 | !btf_type_is_var(t) && |
5283 | !btf_type_is_datasec(t); |
5284 | } |
5285 | |
5286 | if (btf_type_is_array(t)) { |
5287 | const struct btf_array *array = btf_type_array(t); |
5288 | const struct btf_type *elem_type; |
5289 | u32 elem_type_id = array->type; |
5290 | u32 elem_size; |
5291 | |
5292 | elem_type = btf_type_id_size(btf, type_id: &elem_type_id, ret_size: &elem_size); |
5293 | return elem_type && !btf_type_is_modifier(t: elem_type) && |
5294 | (array->nelems * elem_size == |
5295 | btf_resolved_type_size(btf, type_id)); |
5296 | } |
5297 | |
5298 | return false; |
5299 | } |
5300 | |
5301 | static int btf_resolve(struct btf_verifier_env *env, |
5302 | const struct btf_type *t, u32 type_id) |
5303 | { |
5304 | u32 save_log_type_id = env->log_type_id; |
5305 | const struct resolve_vertex *v; |
5306 | int err = 0; |
5307 | |
5308 | env->resolve_mode = RESOLVE_TBD; |
5309 | env_stack_push(env, t, type_id); |
5310 | while (!err && (v = env_stack_peak(env))) { |
5311 | env->log_type_id = v->type_id; |
5312 | err = btf_type_ops(t: v->t)->resolve(env, v); |
5313 | } |
5314 | |
5315 | env->log_type_id = type_id; |
5316 | if (err == -E2BIG) { |
5317 | btf_verifier_log_type(env, t, |
5318 | "Exceeded max resolving depth:%u", |
5319 | MAX_RESOLVE_DEPTH); |
5320 | } else if (err == -EEXIST) { |
5321 | btf_verifier_log_type(env, t, "Loop detected"); |
5322 | } |
5323 | |
5324 | /* Final sanity check */ |
5325 | if (!err && !btf_resolve_valid(env, t, type_id)) { |
5326 | btf_verifier_log_type(env, t, "Invalid resolve state"); |
5327 | err = -EINVAL; |
5328 | } |
5329 | |
5330 | env->log_type_id = save_log_type_id; |
5331 | return err; |
5332 | } |
5333 | |
5334 | static int btf_check_all_types(struct btf_verifier_env *env) |
5335 | { |
5336 | struct btf *btf = env->btf; |
5337 | const struct btf_type *t; |
5338 | u32 type_id, i; |
5339 | int err; |
5340 | |
5341 | err = env_resolve_init(env); |
5342 | if (err) |
5343 | return err; |
5344 | |
5345 | env->phase++; |
5346 | for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) { |
5347 | type_id = btf->start_id + i; |
5348 | t = btf_type_by_id(btf, type_id); |
5349 | |
5350 | env->log_type_id = type_id; |
5351 | if (btf_type_needs_resolve(t) && |
5352 | !env_type_is_resolved(env, type_id)) { |
5353 | err = btf_resolve(env, t, type_id); |
5354 | if (err) |
5355 | return err; |
5356 | } |
5357 | |
5358 | if (btf_type_is_func_proto(t)) { |
5359 | err = btf_func_proto_check(env, t); |
5360 | if (err) |
5361 | return err; |
5362 | } |
5363 | } |
5364 | |
5365 | return 0; |
5366 | } |
5367 | |
5368 | static int btf_parse_type_sec(struct btf_verifier_env *env) |
5369 | { |
5370 | const struct btf_header *hdr = &env->btf->hdr; |
5371 | int err; |
5372 | |
5373 | /* Type section must align to 4 bytes */ |
5374 | if (hdr->type_off & (sizeof(u32) - 1)) { |
5375 | btf_verifier_log(env, fmt: "Unaligned type_off"); |
5376 | return -EINVAL; |
5377 | } |
5378 | |
5379 | if (!env->btf->base_btf && !hdr->type_len) { |
5380 | btf_verifier_log(env, fmt: "No type found"); |
5381 | return -EINVAL; |
5382 | } |
5383 | |
5384 | err = btf_check_all_metas(env); |
5385 | if (err) |
5386 | return err; |
5387 | |
5388 | return btf_check_all_types(env); |
5389 | } |
5390 | |
5391 | static int btf_parse_str_sec(struct btf_verifier_env *env) |
5392 | { |
5393 | const struct btf_header *hdr; |
5394 | struct btf *btf = env->btf; |
5395 | const char *start, *end; |
5396 | |
5397 | hdr = &btf->hdr; |
5398 | start = btf->nohdr_data + hdr->str_off; |
5399 | end = start + hdr->str_len; |
5400 | |
5401 | if (end != btf->data + btf->data_size) { |
5402 | btf_verifier_log(env, fmt: "String section is not at the end"); |
5403 | return -EINVAL; |
5404 | } |
5405 | |
5406 | btf->strings = start; |
5407 | |
5408 | if (btf->base_btf && !hdr->str_len) |
5409 | return 0; |
5410 | if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) { |
5411 | btf_verifier_log(env, fmt: "Invalid string section"); |
5412 | return -EINVAL; |
5413 | } |
5414 | if (!btf->base_btf && start[0]) { |
5415 | btf_verifier_log(env, fmt: "Invalid string section"); |
5416 | return -EINVAL; |
5417 | } |
5418 | |
5419 | return 0; |
5420 | } |
5421 | |
5422 | static const size_t btf_sec_info_offset[] = { |
5423 | offsetof(struct btf_header, type_off), |
5424 | offsetof(struct btf_header, str_off), |
5425 | }; |
5426 | |
5427 | static int btf_sec_info_cmp(const void *a, const void *b) |
5428 | { |
5429 | const struct btf_sec_info *x = a; |
5430 | const struct btf_sec_info *y = b; |
5431 | |
5432 | return (int)(x->off - y->off) ? : (int)(x->len - y->len); |
5433 | } |
5434 | |
5435 | static int btf_check_sec_info(struct btf_verifier_env *env, |
5436 | u32 btf_data_size) |
5437 | { |
5438 | struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)]; |
5439 | u32 total, expected_total, i; |
5440 | const struct btf_header *hdr; |
5441 | const struct btf *btf; |
5442 | |
5443 | btf = env->btf; |
5444 | hdr = &btf->hdr; |
5445 | |
5446 | /* Populate the secs from hdr */ |
5447 | for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) |
5448 | secs[i] = *(struct btf_sec_info *)((void *)hdr + |
5449 | btf_sec_info_offset[i]); |
5450 | |
5451 | sort(base: secs, ARRAY_SIZE(btf_sec_info_offset), |
5452 | size: sizeof(struct btf_sec_info), cmp_func: btf_sec_info_cmp, NULL); |
5453 | |
5454 | /* Check for gaps and overlap among sections */ |
5455 | total = 0; |
5456 | expected_total = btf_data_size - hdr->hdr_len; |
5457 | for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) { |
5458 | if (expected_total < secs[i].off) { |
5459 | btf_verifier_log(env, fmt: "Invalid section offset"); |
5460 | return -EINVAL; |
5461 | } |
5462 | if (total < secs[i].off) { |
5463 | /* gap */ |
5464 | btf_verifier_log(env, fmt: "Unsupported section found"); |
5465 | return -EINVAL; |
5466 | } |
5467 | if (total > secs[i].off) { |
5468 | btf_verifier_log(env, fmt: "Section overlap found"); |
5469 | return -EINVAL; |
5470 | } |
5471 | if (expected_total - total < secs[i].len) { |
5472 | btf_verifier_log(env, |
5473 | fmt: "Total section length too long"); |
5474 | return -EINVAL; |
5475 | } |
5476 | total += secs[i].len; |
5477 | } |
5478 | |
5479 | /* There is data other than hdr and known sections */ |
5480 | if (expected_total != total) { |
5481 | btf_verifier_log(env, fmt: "Unsupported section found"); |
5482 | return -EINVAL; |
5483 | } |
5484 | |
5485 | return 0; |
5486 | } |
5487 | |
5488 | static int btf_parse_hdr(struct btf_verifier_env *env) |
5489 | { |
5490 | u32 hdr_len, hdr_copy, btf_data_size; |
5491 | const struct btf_header *hdr; |
5492 | struct btf *btf; |
5493 | |
5494 | btf = env->btf; |
5495 | btf_data_size = btf->data_size; |
5496 | |
5497 | if (btf_data_size < offsetofend(struct btf_header, hdr_len)) { |
5498 | btf_verifier_log(env, fmt: "hdr_len not found"); |
5499 | return -EINVAL; |
5500 | } |
5501 | |
5502 | hdr = btf->data; |
5503 | hdr_len = hdr->hdr_len; |
5504 | if (btf_data_size < hdr_len) { |
5505 | btf_verifier_log(env, fmt: "btf_header not found"); |
5506 | return -EINVAL; |
5507 | } |
5508 | |
5509 | /* Ensure the unsupported header fields are zero */ |
5510 | if (hdr_len > sizeof(btf->hdr)) { |
5511 | u8 *expected_zero = btf->data + sizeof(btf->hdr); |
5512 | u8 *end = btf->data + hdr_len; |
5513 | |
5514 | for (; expected_zero < end; expected_zero++) { |
5515 | if (*expected_zero) { |
5516 | btf_verifier_log(env, fmt: "Unsupported btf_header"); |
5517 | return -E2BIG; |
5518 | } |
5519 | } |
5520 | } |
5521 | |
5522 | hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr)); |
5523 | memcpy(&btf->hdr, btf->data, hdr_copy); |
5524 | |
5525 | hdr = &btf->hdr; |
5526 | |
5527 | btf_verifier_log_hdr(env, btf_data_size); |
5528 | |
5529 | if (hdr->magic != BTF_MAGIC) { |
5530 | btf_verifier_log(env, fmt: "Invalid magic"); |
5531 | return -EINVAL; |
5532 | } |
5533 | |
5534 | if (hdr->version != BTF_VERSION) { |
5535 | btf_verifier_log(env, fmt: "Unsupported version"); |
5536 | return -ENOTSUPP; |
5537 | } |
5538 | |
5539 | if (hdr->flags) { |
5540 | btf_verifier_log(env, fmt: "Unsupported flags"); |
5541 | return -ENOTSUPP; |
5542 | } |
5543 | |
5544 | if (!btf->base_btf && btf_data_size == hdr->hdr_len) { |
5545 | btf_verifier_log(env, fmt: "No data"); |
5546 | return -EINVAL; |
5547 | } |
5548 | |
5549 | return btf_check_sec_info(env, btf_data_size); |
5550 | } |
5551 | |
5552 | static const char *alloc_obj_fields[] = { |
5553 | "bpf_spin_lock", |
5554 | "bpf_list_head", |
5555 | "bpf_list_node", |
5556 | "bpf_rb_root", |
5557 | "bpf_rb_node", |
5558 | "bpf_refcount", |
5559 | }; |
5560 | |
5561 | static struct btf_struct_metas * |
5562 | btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf) |
5563 | { |
5564 | struct btf_struct_metas *tab = NULL; |
5565 | struct btf_id_set *aof; |
5566 | int i, n, id, ret; |
5567 | |
5568 | BUILD_BUG_ON(offsetof(struct btf_id_set, cnt) != 0); |
5569 | BUILD_BUG_ON(sizeof(struct btf_id_set) != sizeof(u32)); |
5570 | |
5571 | aof = kmalloc(sizeof(*aof), GFP_KERNEL | __GFP_NOWARN); |
5572 | if (!aof) |
5573 | return ERR_PTR(error: -ENOMEM); |
5574 | aof->cnt = 0; |
5575 | |
5576 | for (i = 0; i < ARRAY_SIZE(alloc_obj_fields); i++) { |
5577 | /* Try to find whether this special type exists in user BTF, and |
5578 | * if so remember its ID so we can easily find it among members |
5579 | * of structs that we iterate in the next loop. |
5580 | */ |
5581 | struct btf_id_set *new_aof; |
5582 | |
5583 | id = btf_find_by_name_kind(btf, name: alloc_obj_fields[i], kind: BTF_KIND_STRUCT); |
5584 | if (id < 0) |
5585 | continue; |
5586 | |
5587 | new_aof = krealloc(aof, struct_size(new_aof, ids, aof->cnt + 1), |
5588 | GFP_KERNEL | __GFP_NOWARN); |
5589 | if (!new_aof) { |
5590 | ret = -ENOMEM; |
5591 | goto free_aof; |
5592 | } |
5593 | aof = new_aof; |
5594 | aof->ids[aof->cnt++] = id; |
5595 | } |
5596 | |
5597 | n = btf_nr_types(btf); |
5598 | for (i = 1; i < n; i++) { |
5599 | /* Try to find if there are kptrs in user BTF and remember their ID */ |
5600 | struct btf_id_set *new_aof; |
5601 | struct btf_field_info tmp; |
5602 | const struct btf_type *t; |
5603 | |
5604 | t = btf_type_by_id(btf, i); |
5605 | if (!t) { |
5606 | ret = -EINVAL; |
5607 | goto free_aof; |
5608 | } |
5609 | |
5610 | ret = btf_find_kptr(btf, t, off: 0, sz: 0, info: &tmp, field_mask: BPF_KPTR); |
5611 | if (ret != BTF_FIELD_FOUND) |
5612 | continue; |
5613 | |
5614 | new_aof = krealloc(aof, struct_size(new_aof, ids, aof->cnt + 1), |
5615 | GFP_KERNEL | __GFP_NOWARN); |
5616 | if (!new_aof) { |
5617 | ret = -ENOMEM; |
5618 | goto free_aof; |
5619 | } |
5620 | aof = new_aof; |
5621 | aof->ids[aof->cnt++] = i; |
5622 | } |
5623 | |
5624 | if (!aof->cnt) { |
5625 | kfree(objp: aof); |
5626 | return NULL; |
5627 | } |
5628 | sort(base: &aof->ids, num: aof->cnt, size: sizeof(aof->ids[0]), cmp_func: btf_id_cmp_func, NULL); |
5629 | |
5630 | for (i = 1; i < n; i++) { |
5631 | struct btf_struct_metas *new_tab; |
5632 | const struct btf_member *member; |
5633 | struct btf_struct_meta *type; |
5634 | struct btf_record *record; |
5635 | const struct btf_type *t; |
5636 | int j, tab_cnt; |
5637 | |
5638 | t = btf_type_by_id(btf, i); |
5639 | if (!__btf_type_is_struct(t)) |
5640 | continue; |
5641 | |
5642 | cond_resched(); |
5643 | |
5644 | for_each_member(j, t, member) { |
5645 | if (btf_id_set_contains(set: aof, id: member->type)) |
5646 | goto parse; |
5647 | } |
5648 | continue; |
5649 | parse: |
5650 | tab_cnt = tab ? tab->cnt : 0; |
5651 | new_tab = krealloc(tab, struct_size(new_tab, types, tab_cnt + 1), |
5652 | GFP_KERNEL | __GFP_NOWARN); |
5653 | if (!new_tab) { |
5654 | ret = -ENOMEM; |
5655 | goto free; |
5656 | } |
5657 | if (!tab) |
5658 | new_tab->cnt = 0; |
5659 | tab = new_tab; |
5660 | |
5661 | type = &tab->types[tab->cnt]; |
5662 | type->btf_id = i; |
5663 | record = btf_parse_fields(btf, t, field_mask: BPF_SPIN_LOCK | BPF_RES_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE | |
5664 | BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT | |
5665 | BPF_KPTR, value_size: t->size); |
5666 | /* The record cannot be unset, treat it as an error if so */ |
5667 | if (IS_ERR_OR_NULL(ptr: record)) { |
5668 | ret = PTR_ERR_OR_ZERO(ptr: record) ?: -EFAULT; |
5669 | goto free; |
5670 | } |
5671 | type->record = record; |
5672 | tab->cnt++; |
5673 | } |
5674 | kfree(objp: aof); |
5675 | return tab; |
5676 | free: |
5677 | btf_struct_metas_free(tab); |
5678 | free_aof: |
5679 | kfree(objp: aof); |
5680 | return ERR_PTR(error: ret); |
5681 | } |
5682 | |
5683 | struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id) |
5684 | { |
5685 | struct btf_struct_metas *tab; |
5686 | |
5687 | BUILD_BUG_ON(offsetof(struct btf_struct_meta, btf_id) != 0); |
5688 | tab = btf->struct_meta_tab; |
5689 | if (!tab) |
5690 | return NULL; |
5691 | return bsearch(key: &btf_id, base: tab->types, num: tab->cnt, size: sizeof(tab->types[0]), cmp: btf_id_cmp_func); |
5692 | } |
5693 | |
5694 | static int btf_check_type_tags(struct btf_verifier_env *env, |
5695 | struct btf *btf, int start_id) |
5696 | { |
5697 | int i, n, good_id = start_id - 1; |
5698 | bool in_tags; |
5699 | |
5700 | n = btf_nr_types(btf); |
5701 | for (i = start_id; i < n; i++) { |
5702 | const struct btf_type *t; |
5703 | int chain_limit = 32; |
5704 | u32 cur_id = i; |
5705 | |
5706 | t = btf_type_by_id(btf, i); |
5707 | if (!t) |
5708 | return -EINVAL; |
5709 | if (!btf_type_is_modifier(t)) |
5710 | continue; |
5711 | |
5712 | cond_resched(); |
5713 | |
5714 | in_tags = btf_type_is_type_tag(t); |
5715 | while (btf_type_is_modifier(t)) { |
5716 | if (!chain_limit--) { |
5717 | btf_verifier_log(env, fmt: "Max chain length or cycle detected"); |
5718 | return -ELOOP; |
5719 | } |
5720 | if (btf_type_is_type_tag(t)) { |
5721 | if (!in_tags) { |
5722 | btf_verifier_log(env, fmt: "Type tags don't precede modifiers"); |
5723 | return -EINVAL; |
5724 | } |
5725 | } else if (in_tags) { |
5726 | in_tags = false; |
5727 | } |
5728 | if (cur_id <= good_id) |
5729 | break; |
5730 | /* Move to next type */ |
5731 | cur_id = t->type; |
5732 | t = btf_type_by_id(btf, cur_id); |
5733 | if (!t) |
5734 | return -EINVAL; |
5735 | } |
5736 | good_id = i; |
5737 | } |
5738 | return 0; |
5739 | } |
5740 | |
5741 | static int finalize_log(struct bpf_verifier_log *log, bpfptr_t uattr, u32 uattr_size) |
5742 | { |
5743 | u32 log_true_size; |
5744 | int err; |
5745 | |
5746 | err = bpf_vlog_finalize(log, log_size_actual: &log_true_size); |
5747 | |
5748 | if (uattr_size >= offsetofend(union bpf_attr, btf_log_true_size) && |
5749 | copy_to_bpfptr_offset(dst: uattr, offsetof(union bpf_attr, btf_log_true_size), |
5750 | src: &log_true_size, size: sizeof(log_true_size))) |
5751 | err = -EFAULT; |
5752 | |
5753 | return err; |
5754 | } |
5755 | |
5756 | static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) |
5757 | { |
5758 | bpfptr_t btf_data = make_bpfptr(addr: attr->btf, is_kernel: uattr.is_kernel); |
5759 | char __user *log_ubuf = u64_to_user_ptr(attr->btf_log_buf); |
5760 | struct btf_struct_metas *struct_meta_tab; |
5761 | struct btf_verifier_env *env = NULL; |
5762 | struct btf *btf = NULL; |
5763 | u8 *data; |
5764 | int err, ret; |
5765 | |
5766 | if (attr->btf_size > BTF_MAX_SIZE) |
5767 | return ERR_PTR(error: -E2BIG); |
5768 | |
5769 | env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); |
5770 | if (!env) |
5771 | return ERR_PTR(error: -ENOMEM); |
5772 | |
5773 | /* user could have requested verbose verifier output |
5774 | * and supplied buffer to store the verification trace |
5775 | */ |
5776 | err = bpf_vlog_init(log: &env->log, log_level: attr->btf_log_level, |
5777 | log_buf: log_ubuf, log_size: attr->btf_log_size); |
5778 | if (err) |
5779 | goto errout_free; |
5780 | |
5781 | btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); |
5782 | if (!btf) { |
5783 | err = -ENOMEM; |
5784 | goto errout; |
5785 | } |
5786 | env->btf = btf; |
5787 | |
5788 | data = kvmalloc(attr->btf_size, GFP_KERNEL | __GFP_NOWARN); |
5789 | if (!data) { |
5790 | err = -ENOMEM; |
5791 | goto errout; |
5792 | } |
5793 | |
5794 | btf->data = data; |
5795 | btf->data_size = attr->btf_size; |
5796 | |
5797 | if (copy_from_bpfptr(dst: data, src: btf_data, size: attr->btf_size)) { |
5798 | err = -EFAULT; |
5799 | goto errout; |
5800 | } |
5801 | |
5802 | err = btf_parse_hdr(env); |
5803 | if (err) |
5804 | goto errout; |
5805 | |
5806 | btf->nohdr_data = btf->data + btf->hdr.hdr_len; |
5807 | |
5808 | err = btf_parse_str_sec(env); |
5809 | if (err) |
5810 | goto errout; |
5811 | |
5812 | err = btf_parse_type_sec(env); |
5813 | if (err) |
5814 | goto errout; |
5815 | |
5816 | err = btf_check_type_tags(env, btf, start_id: 1); |
5817 | if (err) |
5818 | goto errout; |
5819 | |
5820 | struct_meta_tab = btf_parse_struct_metas(log: &env->log, btf); |
5821 | if (IS_ERR(ptr: struct_meta_tab)) { |
5822 | err = PTR_ERR(ptr: struct_meta_tab); |
5823 | goto errout; |
5824 | } |
5825 | btf->struct_meta_tab = struct_meta_tab; |
5826 | |
5827 | if (struct_meta_tab) { |
5828 | int i; |
5829 | |
5830 | for (i = 0; i < struct_meta_tab->cnt; i++) { |
5831 | err = btf_check_and_fixup_fields(btf, rec: struct_meta_tab->types[i].record); |
5832 | if (err < 0) |
5833 | goto errout_meta; |
5834 | } |
5835 | } |
5836 | |
5837 | err = finalize_log(log: &env->log, uattr, uattr_size); |
5838 | if (err) |
5839 | goto errout_free; |
5840 | |
5841 | btf_verifier_env_free(env); |
5842 | refcount_set(r: &btf->refcnt, n: 1); |
5843 | return btf; |
5844 | |
5845 | errout_meta: |
5846 | btf_free_struct_meta_tab(btf); |
5847 | errout: |
5848 | /* overwrite err with -ENOSPC or -EFAULT */ |
5849 | ret = finalize_log(log: &env->log, uattr, uattr_size); |
5850 | if (ret) |
5851 | err = ret; |
5852 | errout_free: |
5853 | btf_verifier_env_free(env); |
5854 | if (btf) |
5855 | btf_free(btf); |
5856 | return ERR_PTR(error: err); |
5857 | } |
5858 | |
5859 | extern char __start_BTF[]; |
5860 | extern char __stop_BTF[]; |
5861 | extern struct btf *btf_vmlinux; |
5862 | |
5863 | #define BPF_MAP_TYPE(_id, _ops) |
5864 | #define BPF_LINK_TYPE(_id, _name) |
5865 | static union { |
5866 | struct bpf_ctx_convert { |
5867 | #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ |
5868 | prog_ctx_type _id##_prog; \ |
5869 | kern_ctx_type _id##_kern; |
5870 | #include <linux/bpf_types.h> |
5871 | #undef BPF_PROG_TYPE |
5872 | } *__t; |
5873 | /* 't' is written once under lock. Read many times. */ |
5874 | const struct btf_type *t; |
5875 | } bpf_ctx_convert; |
5876 | enum { |
5877 | #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ |
5878 | __ctx_convert##_id, |
5879 | #include <linux/bpf_types.h> |
5880 | #undef BPF_PROG_TYPE |
5881 | __ctx_convert_unused, /* to avoid empty enum in extreme .config */ |
5882 | }; |
5883 | static u8 bpf_ctx_convert_map[] = { |
5884 | #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ |
5885 | [_id] = __ctx_convert##_id, |
5886 | #include <linux/bpf_types.h> |
5887 | #undef BPF_PROG_TYPE |
5888 | 0, /* avoid empty array */ |
5889 | }; |
5890 | #undef BPF_MAP_TYPE |
5891 | #undef BPF_LINK_TYPE |
5892 | |
5893 | static const struct btf_type *find_canonical_prog_ctx_type(enum bpf_prog_type prog_type) |
5894 | { |
5895 | const struct btf_type *conv_struct; |
5896 | const struct btf_member *ctx_type; |
5897 | |
5898 | conv_struct = bpf_ctx_convert.t; |
5899 | if (!conv_struct) |
5900 | return NULL; |
5901 | /* prog_type is valid bpf program type. No need for bounds check. */ |
5902 | ctx_type = btf_type_member(t: conv_struct) + bpf_ctx_convert_map[prog_type] * 2; |
5903 | /* ctx_type is a pointer to prog_ctx_type in vmlinux. |
5904 | * Like 'struct __sk_buff' |
5905 | */ |
5906 | return btf_type_by_id(btf_vmlinux, ctx_type->type); |
5907 | } |
5908 | |
5909 | static int find_kern_ctx_type_id(enum bpf_prog_type prog_type) |
5910 | { |
5911 | const struct btf_type *conv_struct; |
5912 | const struct btf_member *ctx_type; |
5913 | |
5914 | conv_struct = bpf_ctx_convert.t; |
5915 | if (!conv_struct) |
5916 | return -EFAULT; |
5917 | /* prog_type is valid bpf program type. No need for bounds check. */ |
5918 | ctx_type = btf_type_member(t: conv_struct) + bpf_ctx_convert_map[prog_type] * 2 + 1; |
5919 | /* ctx_type is a pointer to prog_ctx_type in vmlinux. |
5920 | * Like 'struct sk_buff' |
5921 | */ |
5922 | return ctx_type->type; |
5923 | } |
5924 | |
5925 | bool btf_is_projection_of(const char *pname, const char *tname) |
5926 | { |
5927 | if (strcmp(pname, "__sk_buff") == 0 && strcmp(tname, "sk_buff") == 0) |
5928 | return true; |
5929 | if (strcmp(pname, "xdp_md") == 0 && strcmp(tname, "xdp_buff") == 0) |
5930 | return true; |
5931 | return false; |
5932 | } |
5933 | |
5934 | bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf, |
5935 | const struct btf_type *t, enum bpf_prog_type prog_type, |
5936 | int arg) |
5937 | { |
5938 | const struct btf_type *ctx_type; |
5939 | const char *tname, *ctx_tname; |
5940 | |
5941 | t = btf_type_by_id(btf, t->type); |
5942 | |
5943 | /* KPROBE programs allow bpf_user_pt_regs_t typedef, which we need to |
5944 | * check before we skip all the typedef below. |
5945 | */ |
5946 | if (prog_type == BPF_PROG_TYPE_KPROBE) { |
5947 | while (btf_type_is_modifier(t) && !btf_type_is_typedef(t)) |
5948 | t = btf_type_by_id(btf, t->type); |
5949 | |
5950 | if (btf_type_is_typedef(t)) { |
5951 | tname = btf_name_by_offset(btf, offset: t->name_off); |
5952 | if (tname && strcmp(tname, "bpf_user_pt_regs_t") == 0) |
5953 | return true; |
5954 | } |
5955 | } |
5956 | |
5957 | while (btf_type_is_modifier(t)) |
5958 | t = btf_type_by_id(btf, t->type); |
5959 | if (!btf_type_is_struct(t)) { |
5960 | /* Only pointer to struct is supported for now. |
5961 | * That means that BPF_PROG_TYPE_TRACEPOINT with BTF |
5962 | * is not supported yet. |
5963 | * BPF_PROG_TYPE_RAW_TRACEPOINT is fine. |
5964 | */ |
5965 | return false; |
5966 | } |
5967 | tname = btf_name_by_offset(btf, offset: t->name_off); |
5968 | if (!tname) { |
5969 | bpf_log(log, fmt: "arg#%d struct doesn't have a name\n", arg); |
5970 | return false; |
5971 | } |
5972 | |
5973 | ctx_type = find_canonical_prog_ctx_type(prog_type); |
5974 | if (!ctx_type) { |
5975 | bpf_log(log, fmt: "btf_vmlinux is malformed\n"); |
5976 | /* should not happen */ |
5977 | return false; |
5978 | } |
5979 | again: |
5980 | ctx_tname = btf_name_by_offset(btf: btf_vmlinux, offset: ctx_type->name_off); |
5981 | if (!ctx_tname) { |
5982 | /* should not happen */ |
5983 | bpf_log(log, fmt: "Please fix kernel include/linux/bpf_types.h\n"); |
5984 | return false; |
5985 | } |
5986 | /* program types without named context types work only with arg:ctx tag */ |
5987 | if (ctx_tname[0] == '\0') |
5988 | return false; |
5989 | /* only compare that prog's ctx type name is the same as |
5990 | * kernel expects. No need to compare field by field. |
5991 | * It's ok for bpf prog to do: |
5992 | * struct __sk_buff {}; |
5993 | * int socket_filter_bpf_prog(struct __sk_buff *skb) |
5994 | * { // no fields of skb are ever used } |
5995 | */ |
5996 | if (btf_is_projection_of(pname: ctx_tname, tname)) |
5997 | return true; |
5998 | if (strcmp(ctx_tname, tname)) { |
5999 | /* bpf_user_pt_regs_t is a typedef, so resolve it to |
6000 | * underlying struct and check name again |
6001 | */ |
6002 | if (!btf_type_is_modifier(t: ctx_type)) |
6003 | return false; |
6004 | while (btf_type_is_modifier(t: ctx_type)) |
6005 | ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type); |
6006 | goto again; |
6007 | } |
6008 | return true; |
6009 | } |
6010 | |
6011 | /* forward declarations for arch-specific underlying types of |
6012 | * bpf_user_pt_regs_t; this avoids the need for arch-specific #ifdef |
6013 | * compilation guards below for BPF_PROG_TYPE_PERF_EVENT checks, but still |
6014 | * works correctly with __builtin_types_compatible_p() on respective |
6015 | * architectures |
6016 | */ |
6017 | struct user_regs_struct; |
6018 | struct user_pt_regs; |
6019 | |
6020 | static int btf_validate_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf, |
6021 | const struct btf_type *t, int arg, |
6022 | enum bpf_prog_type prog_type, |
6023 | enum bpf_attach_type attach_type) |
6024 | { |
6025 | const struct btf_type *ctx_type; |
6026 | const char *tname, *ctx_tname; |
6027 | |
6028 | if (!btf_is_ptr(t)) { |
6029 | bpf_log(log, fmt: "arg#%d type isn't a pointer\n", arg); |
6030 | return -EINVAL; |
6031 | } |
6032 | t = btf_type_by_id(btf, t->type); |
6033 | |
6034 | /* KPROBE and PERF_EVENT programs allow bpf_user_pt_regs_t typedef */ |
6035 | if (prog_type == BPF_PROG_TYPE_KPROBE || prog_type == BPF_PROG_TYPE_PERF_EVENT) { |
6036 | while (btf_type_is_modifier(t) && !btf_type_is_typedef(t)) |
6037 | t = btf_type_by_id(btf, t->type); |
6038 | |
6039 | if (btf_type_is_typedef(t)) { |
6040 | tname = btf_name_by_offset(btf, offset: t->name_off); |
6041 | if (tname && strcmp(tname, "bpf_user_pt_regs_t") == 0) |
6042 | return 0; |
6043 | } |
6044 | } |
6045 | |
6046 | /* all other program types don't use typedefs for context type */ |
6047 | while (btf_type_is_modifier(t)) |
6048 | t = btf_type_by_id(btf, t->type); |
6049 | |
6050 | /* `void *ctx __arg_ctx` is always valid */ |
6051 | if (btf_type_is_void(t)) |
6052 | return 0; |
6053 | |
6054 | tname = btf_name_by_offset(btf, offset: t->name_off); |
6055 | if (str_is_empty(s: tname)) { |
6056 | bpf_log(log, fmt: "arg#%d type doesn't have a name\n", arg); |
6057 | return -EINVAL; |
6058 | } |
6059 | |
6060 | /* special cases */ |
6061 | switch (prog_type) { |
6062 | case BPF_PROG_TYPE_KPROBE: |
6063 | if (__btf_type_is_struct(t) && strcmp(tname, "pt_regs") == 0) |
6064 | return 0; |
6065 | break; |
6066 | case BPF_PROG_TYPE_PERF_EVENT: |
6067 | if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) && |
6068 | __btf_type_is_struct(t) && strcmp(tname, "pt_regs") == 0) |
6069 | return 0; |
6070 | if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) && |
6071 | __btf_type_is_struct(t) && strcmp(tname, "user_pt_regs") == 0) |
6072 | return 0; |
6073 | if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) && |
6074 | __btf_type_is_struct(t) && strcmp(tname, "user_regs_struct") == 0) |
6075 | return 0; |
6076 | break; |
6077 | case BPF_PROG_TYPE_RAW_TRACEPOINT: |
6078 | case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: |
6079 | /* allow u64* as ctx */ |
6080 | if (btf_is_int(t) && t->size == 8) |
6081 | return 0; |
6082 | break; |
6083 | case BPF_PROG_TYPE_TRACING: |
6084 | switch (attach_type) { |
6085 | case BPF_TRACE_RAW_TP: |
6086 | /* tp_btf program is TRACING, so need special case here */ |
6087 | if (__btf_type_is_struct(t) && |
6088 | strcmp(tname, "bpf_raw_tracepoint_args") == 0) |
6089 | return 0; |
6090 | /* allow u64* as ctx */ |
6091 | if (btf_is_int(t) && t->size == 8) |
6092 | return 0; |
6093 | break; |
6094 | case BPF_TRACE_ITER: |
6095 | /* allow struct bpf_iter__xxx types only */ |
6096 | if (__btf_type_is_struct(t) && |
6097 | strncmp(tname, "bpf_iter__", sizeof( "bpf_iter__") - 1) == 0) |
6098 | return 0; |
6099 | break; |
6100 | case BPF_TRACE_FENTRY: |
6101 | case BPF_TRACE_FEXIT: |
6102 | case BPF_MODIFY_RETURN: |
6103 | /* allow u64* as ctx */ |
6104 | if (btf_is_int(t) && t->size == 8) |
6105 | return 0; |
6106 | break; |
6107 | default: |
6108 | break; |
6109 | } |
6110 | break; |
6111 | case BPF_PROG_TYPE_LSM: |
6112 | case BPF_PROG_TYPE_STRUCT_OPS: |
6113 | /* allow u64* as ctx */ |
6114 | if (btf_is_int(t) && t->size == 8) |
6115 | return 0; |
6116 | break; |
6117 | case BPF_PROG_TYPE_TRACEPOINT: |
6118 | case BPF_PROG_TYPE_SYSCALL: |
6119 | case BPF_PROG_TYPE_EXT: |
6120 | return 0; /* anything goes */ |
6121 | default: |
6122 | break; |
6123 | } |
6124 | |
6125 | ctx_type = find_canonical_prog_ctx_type(prog_type); |
6126 | if (!ctx_type) { |
6127 | /* should not happen */ |
6128 | bpf_log(log, fmt: "btf_vmlinux is malformed\n"); |
6129 | return -EINVAL; |
6130 | } |
6131 | |
6132 | /* resolve typedefs and check that underlying structs are matching as well */ |
6133 | while (btf_type_is_modifier(t: ctx_type)) |
6134 | ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type); |
6135 | |
6136 | /* if program type doesn't have distinctly named struct type for |
6137 | * context, then __arg_ctx argument can only be `void *`, which we |
6138 | * already checked above |
6139 | */ |
6140 | if (!__btf_type_is_struct(t: ctx_type)) { |
6141 | bpf_log(log, fmt: "arg#%d should be void pointer\n", arg); |
6142 | return -EINVAL; |
6143 | } |
6144 | |
6145 | ctx_tname = btf_name_by_offset(btf: btf_vmlinux, offset: ctx_type->name_off); |
6146 | if (!__btf_type_is_struct(t) || strcmp(ctx_tname, tname) != 0) { |
6147 | bpf_log(log, fmt: "arg#%d should be `struct %s *`\n", arg, ctx_tname); |
6148 | return -EINVAL; |
6149 | } |
6150 | |
6151 | return 0; |
6152 | } |
6153 | |
6154 | static int btf_translate_to_vmlinux(struct bpf_verifier_log *log, |
6155 | struct btf *btf, |
6156 | const struct btf_type *t, |
6157 | enum bpf_prog_type prog_type, |
6158 | int arg) |
6159 | { |
6160 | if (!btf_is_prog_ctx_type(log, btf, t, prog_type, arg)) |
6161 | return -ENOENT; |
6162 | return find_kern_ctx_type_id(prog_type); |
6163 | } |
6164 | |
6165 | int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type) |
6166 | { |
6167 | const struct btf_member *kctx_member; |
6168 | const struct btf_type *conv_struct; |
6169 | const struct btf_type *kctx_type; |
6170 | u32 kctx_type_id; |
6171 | |
6172 | conv_struct = bpf_ctx_convert.t; |
6173 | /* get member for kernel ctx type */ |
6174 | kctx_member = btf_type_member(t: conv_struct) + bpf_ctx_convert_map[prog_type] * 2 + 1; |
6175 | kctx_type_id = kctx_member->type; |
6176 | kctx_type = btf_type_by_id(btf_vmlinux, kctx_type_id); |
6177 | if (!btf_type_is_struct(t: kctx_type)) { |
6178 | bpf_log(log, fmt: "kern ctx type id %u is not a struct\n", kctx_type_id); |
6179 | return -EINVAL; |
6180 | } |
6181 | |
6182 | return kctx_type_id; |
6183 | } |
6184 | |
6185 | BTF_ID_LIST(bpf_ctx_convert_btf_id) |
6186 | BTF_ID(struct, bpf_ctx_convert) |
6187 | |
6188 | static struct btf *btf_parse_base(struct btf_verifier_env *env, const char *name, |
6189 | void *data, unsigned int data_size) |
6190 | { |
6191 | struct btf *btf = NULL; |
6192 | int err; |
6193 | |
6194 | if (!IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) |
6195 | return ERR_PTR(error: -ENOENT); |
6196 | |
6197 | btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); |
6198 | if (!btf) { |
6199 | err = -ENOMEM; |
6200 | goto errout; |
6201 | } |
6202 | env->btf = btf; |
6203 | |
6204 | btf->data = data; |
6205 | btf->data_size = data_size; |
6206 | btf->kernel_btf = true; |
6207 | snprintf(buf: btf->name, size: sizeof(btf->name), fmt: "%s", name); |
6208 | |
6209 | err = btf_parse_hdr(env); |
6210 | if (err) |
6211 | goto errout; |
6212 | |
6213 | btf->nohdr_data = btf->data + btf->hdr.hdr_len; |
6214 | |
6215 | err = btf_parse_str_sec(env); |
6216 | if (err) |
6217 | goto errout; |
6218 | |
6219 | err = btf_check_all_metas(env); |
6220 | if (err) |
6221 | goto errout; |
6222 | |
6223 | err = btf_check_type_tags(env, btf, start_id: 1); |
6224 | if (err) |
6225 | goto errout; |
6226 | |
6227 | refcount_set(r: &btf->refcnt, n: 1); |
6228 | |
6229 | return btf; |
6230 | |
6231 | errout: |
6232 | if (btf) { |
6233 | kvfree(addr: btf->types); |
6234 | kfree(objp: btf); |
6235 | } |
6236 | return ERR_PTR(error: err); |
6237 | } |
6238 | |
6239 | struct btf *btf_parse_vmlinux(void) |
6240 | { |
6241 | struct btf_verifier_env *env = NULL; |
6242 | struct bpf_verifier_log *log; |
6243 | struct btf *btf; |
6244 | int err; |
6245 | |
6246 | env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); |
6247 | if (!env) |
6248 | return ERR_PTR(error: -ENOMEM); |
6249 | |
6250 | log = &env->log; |
6251 | log->level = BPF_LOG_KERNEL; |
6252 | btf = btf_parse_base(env, name: "vmlinux", data: __start_BTF, data_size: __stop_BTF - __start_BTF); |
6253 | if (IS_ERR(ptr: btf)) |
6254 | goto err_out; |
6255 | |
6256 | /* btf_parse_vmlinux() runs under bpf_verifier_lock */ |
6257 | bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); |
6258 | err = btf_alloc_id(btf); |
6259 | if (err) { |
6260 | btf_free(btf); |
6261 | btf = ERR_PTR(error: err); |
6262 | } |
6263 | err_out: |
6264 | btf_verifier_env_free(env); |
6265 | return btf; |
6266 | } |
6267 | |
6268 | /* If .BTF_ids section was created with distilled base BTF, both base and |
6269 | * split BTF ids will need to be mapped to actual base/split ids for |
6270 | * BTF now that it has been relocated. |
6271 | */ |
6272 | static __u32 btf_relocate_id(const struct btf *btf, __u32 id) |
6273 | { |
6274 | if (!btf->base_btf || !btf->base_id_map) |
6275 | return id; |
6276 | return btf->base_id_map[id]; |
6277 | } |
6278 | |
6279 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
6280 | |
6281 | static struct btf *btf_parse_module(const char *module_name, const void *data, |
6282 | unsigned int data_size, void *base_data, |
6283 | unsigned int base_data_size) |
6284 | { |
6285 | struct btf *btf = NULL, *vmlinux_btf, *base_btf = NULL; |
6286 | struct btf_verifier_env *env = NULL; |
6287 | struct bpf_verifier_log *log; |
6288 | int err = 0; |
6289 | |
6290 | vmlinux_btf = bpf_get_btf_vmlinux(); |
6291 | if (IS_ERR(vmlinux_btf)) |
6292 | return vmlinux_btf; |
6293 | if (!vmlinux_btf) |
6294 | return ERR_PTR(-EINVAL); |
6295 | |
6296 | env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); |
6297 | if (!env) |
6298 | return ERR_PTR(-ENOMEM); |
6299 | |
6300 | log = &env->log; |
6301 | log->level = BPF_LOG_KERNEL; |
6302 | |
6303 | if (base_data) { |
6304 | base_btf = btf_parse_base(env, ".BTF.base", base_data, base_data_size); |
6305 | if (IS_ERR(base_btf)) { |
6306 | err = PTR_ERR(base_btf); |
6307 | goto errout; |
6308 | } |
6309 | } else { |
6310 | base_btf = vmlinux_btf; |
6311 | } |
6312 | |
6313 | btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); |
6314 | if (!btf) { |
6315 | err = -ENOMEM; |
6316 | goto errout; |
6317 | } |
6318 | env->btf = btf; |
6319 | |
6320 | btf->base_btf = base_btf; |
6321 | btf->start_id = base_btf->nr_types; |
6322 | btf->start_str_off = base_btf->hdr.str_len; |
6323 | btf->kernel_btf = true; |
6324 | snprintf(btf->name, sizeof(btf->name), "%s", module_name); |
6325 | |
6326 | btf->data = kvmemdup(data, data_size, GFP_KERNEL | __GFP_NOWARN); |
6327 | if (!btf->data) { |
6328 | err = -ENOMEM; |
6329 | goto errout; |
6330 | } |
6331 | btf->data_size = data_size; |
6332 | |
6333 | err = btf_parse_hdr(env); |
6334 | if (err) |
6335 | goto errout; |
6336 | |
6337 | btf->nohdr_data = btf->data + btf->hdr.hdr_len; |
6338 | |
6339 | err = btf_parse_str_sec(env); |
6340 | if (err) |
6341 | goto errout; |
6342 | |
6343 | err = btf_check_all_metas(env); |
6344 | if (err) |
6345 | goto errout; |
6346 | |
6347 | err = btf_check_type_tags(env, btf, btf_nr_types(base_btf)); |
6348 | if (err) |
6349 | goto errout; |
6350 | |
6351 | if (base_btf != vmlinux_btf) { |
6352 | err = btf_relocate(btf, vmlinux_btf, &btf->base_id_map); |
6353 | if (err) |
6354 | goto errout; |
6355 | btf_free(base_btf); |
6356 | base_btf = vmlinux_btf; |
6357 | } |
6358 | |
6359 | btf_verifier_env_free(env); |
6360 | refcount_set(&btf->refcnt, 1); |
6361 | return btf; |
6362 | |
6363 | errout: |
6364 | btf_verifier_env_free(env); |
6365 | if (!IS_ERR(base_btf) && base_btf != vmlinux_btf) |
6366 | btf_free(base_btf); |
6367 | if (btf) { |
6368 | kvfree(btf->data); |
6369 | kvfree(btf->types); |
6370 | kfree(btf); |
6371 | } |
6372 | return ERR_PTR(err); |
6373 | } |
6374 | |
6375 | #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */ |
6376 | |
6377 | struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog) |
6378 | { |
6379 | struct bpf_prog *tgt_prog = prog->aux->dst_prog; |
6380 | |
6381 | if (tgt_prog) |
6382 | return tgt_prog->aux->btf; |
6383 | else |
6384 | return prog->aux->attach_btf; |
6385 | } |
6386 | |
6387 | static bool is_void_or_int_ptr(struct btf *btf, const struct btf_type *t) |
6388 | { |
6389 | /* skip modifiers */ |
6390 | t = btf_type_skip_modifiers(btf, id: t->type, NULL); |
6391 | return btf_type_is_void(t) || btf_type_is_int(t); |
6392 | } |
6393 | |
6394 | u32 btf_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto, |
6395 | int off) |
6396 | { |
6397 | const struct btf_param *args; |
6398 | const struct btf_type *t; |
6399 | u32 offset = 0, nr_args; |
6400 | int i; |
6401 | |
6402 | if (!func_proto) |
6403 | return off / 8; |
6404 | |
6405 | nr_args = btf_type_vlen(t: func_proto); |
6406 | args = (const struct btf_param *)(func_proto + 1); |
6407 | for (i = 0; i < nr_args; i++) { |
6408 | t = btf_type_skip_modifiers(btf, id: args[i].type, NULL); |
6409 | offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8); |
6410 | if (off < offset) |
6411 | return i; |
6412 | } |
6413 | |
6414 | t = btf_type_skip_modifiers(btf, id: func_proto->type, NULL); |
6415 | offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8); |
6416 | if (off < offset) |
6417 | return nr_args; |
6418 | |
6419 | return nr_args + 1; |
6420 | } |
6421 | |
6422 | static bool prog_args_trusted(const struct bpf_prog *prog) |
6423 | { |
6424 | enum bpf_attach_type atype = prog->expected_attach_type; |
6425 | |
6426 | switch (prog->type) { |
6427 | case BPF_PROG_TYPE_TRACING: |
6428 | return atype == BPF_TRACE_RAW_TP || atype == BPF_TRACE_ITER; |
6429 | case BPF_PROG_TYPE_LSM: |
6430 | return bpf_lsm_is_trusted(prog); |
6431 | case BPF_PROG_TYPE_STRUCT_OPS: |
6432 | return true; |
6433 | default: |
6434 | return false; |
6435 | } |
6436 | } |
6437 | |
6438 | int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto, |
6439 | u32 arg_no) |
6440 | { |
6441 | const struct btf_param *args; |
6442 | const struct btf_type *t; |
6443 | int off = 0, i; |
6444 | u32 sz; |
6445 | |
6446 | args = btf_params(t: func_proto); |
6447 | for (i = 0; i < arg_no; i++) { |
6448 | t = btf_type_by_id(btf, args[i].type); |
6449 | t = btf_resolve_size(btf, type: t, type_size: &sz); |
6450 | if (IS_ERR(ptr: t)) |
6451 | return PTR_ERR(ptr: t); |
6452 | off += roundup(sz, 8); |
6453 | } |
6454 | |
6455 | return off; |
6456 | } |
6457 | |
6458 | struct bpf_raw_tp_null_args { |
6459 | const char *func; |
6460 | u64 mask; |
6461 | }; |
6462 | |
6463 | static const struct bpf_raw_tp_null_args raw_tp_null_args[] = { |
6464 | /* sched */ |
6465 | { "sched_pi_setprio", 0x10 }, |
6466 | /* ... from sched_numa_pair_template event class */ |
6467 | { "sched_stick_numa", 0x100 }, |
6468 | { "sched_swap_numa", 0x100 }, |
6469 | /* afs */ |
6470 | { "afs_make_fs_call", 0x10 }, |
6471 | { "afs_make_fs_calli", 0x10 }, |
6472 | { "afs_make_fs_call1", 0x10 }, |
6473 | { "afs_make_fs_call2", 0x10 }, |
6474 | { "afs_protocol_error", 0x1 }, |
6475 | { "afs_flock_ev", 0x10 }, |
6476 | /* cachefiles */ |
6477 | { "cachefiles_lookup", 0x1 | 0x200 }, |
6478 | { "cachefiles_unlink", 0x1 }, |
6479 | { "cachefiles_rename", 0x1 }, |
6480 | { "cachefiles_prep_read", 0x1 }, |
6481 | { "cachefiles_mark_active", 0x1 }, |
6482 | { "cachefiles_mark_failed", 0x1 }, |
6483 | { "cachefiles_mark_inactive", 0x1 }, |
6484 | { "cachefiles_vfs_error", 0x1 }, |
6485 | { "cachefiles_io_error", 0x1 }, |
6486 | { "cachefiles_ondemand_open", 0x1 }, |
6487 | { "cachefiles_ondemand_copen", 0x1 }, |
6488 | { "cachefiles_ondemand_close", 0x1 }, |
6489 | { "cachefiles_ondemand_read", 0x1 }, |
6490 | { "cachefiles_ondemand_cread", 0x1 }, |
6491 | { "cachefiles_ondemand_fd_write", 0x1 }, |
6492 | { "cachefiles_ondemand_fd_release", 0x1 }, |
6493 | /* ext4, from ext4__mballoc event class */ |
6494 | { "ext4_mballoc_discard", 0x10 }, |
6495 | { "ext4_mballoc_free", 0x10 }, |
6496 | /* fib */ |
6497 | { "fib_table_lookup", 0x100 }, |
6498 | /* filelock */ |
6499 | /* ... from filelock_lock event class */ |
6500 | { "posix_lock_inode", 0x10 }, |
6501 | { "fcntl_setlk", 0x10 }, |
6502 | { "locks_remove_posix", 0x10 }, |
6503 | { "flock_lock_inode", 0x10 }, |
6504 | /* ... from filelock_lease event class */ |
6505 | { "break_lease_noblock", 0x10 }, |
6506 | { "break_lease_block", 0x10 }, |
6507 | { "break_lease_unblock", 0x10 }, |
6508 | { "generic_delete_lease", 0x10 }, |
6509 | { "time_out_leases", 0x10 }, |
6510 | /* host1x */ |
6511 | { "host1x_cdma_push_gather", 0x10000 }, |
6512 | /* huge_memory */ |
6513 | { "mm_khugepaged_scan_pmd", 0x10 }, |
6514 | { "mm_collapse_huge_page_isolate", 0x1 }, |
6515 | { "mm_khugepaged_scan_file", 0x10 }, |
6516 | { "mm_khugepaged_collapse_file", 0x10 }, |
6517 | /* kmem */ |
6518 | { "mm_page_alloc", 0x1 }, |
6519 | { "mm_page_pcpu_drain", 0x1 }, |
6520 | /* .. from mm_page event class */ |
6521 | { "mm_page_alloc_zone_locked", 0x1 }, |
6522 | /* netfs */ |
6523 | { "netfs_failure", 0x10 }, |
6524 | /* power */ |
6525 | { "device_pm_callback_start", 0x10 }, |
6526 | /* qdisc */ |
6527 | { "qdisc_dequeue", 0x1000 }, |
6528 | /* rxrpc */ |
6529 | { "rxrpc_recvdata", 0x1 }, |
6530 | { "rxrpc_resend", 0x10 }, |
6531 | { "rxrpc_tq", 0x10 }, |
6532 | { "rxrpc_client", 0x1 }, |
6533 | /* skb */ |
6534 | {"kfree_skb", 0x1000}, |
6535 | /* sunrpc */ |
6536 | { "xs_stream_read_data", 0x1 }, |
6537 | /* ... from xprt_cong_event event class */ |
6538 | { "xprt_reserve_cong", 0x10 }, |
6539 | { "xprt_release_cong", 0x10 }, |
6540 | { "xprt_get_cong", 0x10 }, |
6541 | { "xprt_put_cong", 0x10 }, |
6542 | /* tcp */ |
6543 | { "tcp_send_reset", 0x11 }, |
6544 | { "tcp_sendmsg_locked", 0x100 }, |
6545 | /* tegra_apb_dma */ |
6546 | { "tegra_dma_tx_status", 0x100 }, |
6547 | /* timer_migration */ |
6548 | { "tmigr_update_events", 0x1 }, |
6549 | /* writeback, from writeback_folio_template event class */ |
6550 | { "writeback_dirty_folio", 0x10 }, |
6551 | { "folio_wait_writeback", 0x10 }, |
6552 | /* rdma */ |
6553 | { "mr_integ_alloc", 0x2000 }, |
6554 | /* bpf_testmod */ |
6555 | { "bpf_testmod_test_read", 0x0 }, |
6556 | /* amdgpu */ |
6557 | { "amdgpu_vm_bo_map", 0x1 }, |
6558 | { "amdgpu_vm_bo_unmap", 0x1 }, |
6559 | /* netfs */ |
6560 | { "netfs_folioq", 0x1 }, |
6561 | /* xfs from xfs_defer_pending_class */ |
6562 | { "xfs_defer_create_intent", 0x1 }, |
6563 | { "xfs_defer_cancel_list", 0x1 }, |
6564 | { "xfs_defer_pending_finish", 0x1 }, |
6565 | { "xfs_defer_pending_abort", 0x1 }, |
6566 | { "xfs_defer_relog_intent", 0x1 }, |
6567 | { "xfs_defer_isolate_paused", 0x1 }, |
6568 | { "xfs_defer_item_pause", 0x1 }, |
6569 | { "xfs_defer_item_unpause", 0x1 }, |
6570 | /* xfs from xfs_defer_pending_item_class */ |
6571 | { "xfs_defer_add_item", 0x1 }, |
6572 | { "xfs_defer_cancel_item", 0x1 }, |
6573 | { "xfs_defer_finish_item", 0x1 }, |
6574 | /* xfs from xfs_icwalk_class */ |
6575 | { "xfs_ioc_free_eofblocks", 0x10 }, |
6576 | { "xfs_blockgc_free_space", 0x10 }, |
6577 | /* xfs from xfs_btree_cur_class */ |
6578 | { "xfs_btree_updkeys", 0x100 }, |
6579 | { "xfs_btree_overlapped_query_range", 0x100 }, |
6580 | /* xfs from xfs_imap_class*/ |
6581 | { "xfs_map_blocks_found", 0x10000 }, |
6582 | { "xfs_map_blocks_alloc", 0x10000 }, |
6583 | { "xfs_iomap_alloc", 0x1000 }, |
6584 | { "xfs_iomap_found", 0x1000 }, |
6585 | /* xfs from xfs_fs_class */ |
6586 | { "xfs_inodegc_flush", 0x1 }, |
6587 | { "xfs_inodegc_push", 0x1 }, |
6588 | { "xfs_inodegc_start", 0x1 }, |
6589 | { "xfs_inodegc_stop", 0x1 }, |
6590 | { "xfs_inodegc_queue", 0x1 }, |
6591 | { "xfs_inodegc_throttle", 0x1 }, |
6592 | { "xfs_fs_sync_fs", 0x1 }, |
6593 | { "xfs_blockgc_start", 0x1 }, |
6594 | { "xfs_blockgc_stop", 0x1 }, |
6595 | { "xfs_blockgc_worker", 0x1 }, |
6596 | { "xfs_blockgc_flush_all", 0x1 }, |
6597 | /* xfs_scrub */ |
6598 | { "xchk_nlinks_live_update", 0x10 }, |
6599 | /* xfs_scrub from xchk_metapath_class */ |
6600 | { "xchk_metapath_lookup", 0x100 }, |
6601 | /* nfsd */ |
6602 | { "nfsd_dirent", 0x1 }, |
6603 | { "nfsd_file_acquire", 0x1001 }, |
6604 | { "nfsd_file_insert_err", 0x1 }, |
6605 | { "nfsd_file_cons_err", 0x1 }, |
6606 | /* nfs4 */ |
6607 | { "nfs4_setup_sequence", 0x1 }, |
6608 | { "pnfs_update_layout", 0x10000 }, |
6609 | { "nfs4_inode_callback_event", 0x200 }, |
6610 | { "nfs4_inode_stateid_callback_event", 0x200 }, |
6611 | /* nfs from pnfs_layout_event */ |
6612 | { "pnfs_mds_fallback_pg_init_read", 0x10000 }, |
6613 | { "pnfs_mds_fallback_pg_init_write", 0x10000 }, |
6614 | { "pnfs_mds_fallback_pg_get_mirror_count", 0x10000 }, |
6615 | { "pnfs_mds_fallback_read_done", 0x10000 }, |
6616 | { "pnfs_mds_fallback_write_done", 0x10000 }, |
6617 | { "pnfs_mds_fallback_read_pagelist", 0x10000 }, |
6618 | { "pnfs_mds_fallback_write_pagelist", 0x10000 }, |
6619 | /* coda */ |
6620 | { "coda_dec_pic_run", 0x10 }, |
6621 | { "coda_dec_pic_done", 0x10 }, |
6622 | /* cfg80211 */ |
6623 | { "cfg80211_scan_done", 0x11 }, |
6624 | { "rdev_set_coalesce", 0x10 }, |
6625 | { "cfg80211_report_wowlan_wakeup", 0x100 }, |
6626 | { "cfg80211_inform_bss_frame", 0x100 }, |
6627 | { "cfg80211_michael_mic_failure", 0x10000 }, |
6628 | /* cfg80211 from wiphy_work_event */ |
6629 | { "wiphy_work_queue", 0x10 }, |
6630 | { "wiphy_work_run", 0x10 }, |
6631 | { "wiphy_work_cancel", 0x10 }, |
6632 | { "wiphy_work_flush", 0x10 }, |
6633 | /* hugetlbfs */ |
6634 | { "hugetlbfs_alloc_inode", 0x10 }, |
6635 | /* spufs */ |
6636 | { "spufs_context", 0x10 }, |
6637 | /* kvm_hv */ |
6638 | { "kvm_page_fault_enter", 0x100 }, |
6639 | /* dpu */ |
6640 | { "dpu_crtc_setup_mixer", 0x100 }, |
6641 | /* binder */ |
6642 | { "binder_transaction", 0x100 }, |
6643 | /* bcachefs */ |
6644 | { "btree_path_free", 0x100 }, |
6645 | /* hfi1_tx */ |
6646 | { "hfi1_sdma_progress", 0x1000 }, |
6647 | /* iptfs */ |
6648 | { "iptfs_ingress_postq_event", 0x1000 }, |
6649 | /* neigh */ |
6650 | { "neigh_update", 0x10 }, |
6651 | /* snd_firewire_lib */ |
6652 | { "amdtp_packet", 0x100 }, |
6653 | }; |
6654 | |
6655 | bool btf_ctx_access(int off, int size, enum bpf_access_type type, |
6656 | const struct bpf_prog *prog, |
6657 | struct bpf_insn_access_aux *info) |
6658 | { |
6659 | const struct btf_type *t = prog->aux->attach_func_proto; |
6660 | struct bpf_prog *tgt_prog = prog->aux->dst_prog; |
6661 | struct btf *btf = bpf_prog_get_target_btf(prog); |
6662 | const char *tname = prog->aux->attach_func_name; |
6663 | struct bpf_verifier_log *log = info->log; |
6664 | const struct btf_param *args; |
6665 | bool ptr_err_raw_tp = false; |
6666 | const char *tag_value; |
6667 | u32 nr_args, arg; |
6668 | int i, ret; |
6669 | |
6670 | if (off % 8) { |
6671 | bpf_log(log, fmt: "func '%s' offset %d is not multiple of 8\n", |
6672 | tname, off); |
6673 | return false; |
6674 | } |
6675 | arg = btf_ctx_arg_idx(btf, func_proto: t, off); |
6676 | args = (const struct btf_param *)(t + 1); |
6677 | /* if (t == NULL) Fall back to default BPF prog with |
6678 | * MAX_BPF_FUNC_REG_ARGS u64 arguments. |
6679 | */ |
6680 | nr_args = t ? btf_type_vlen(t) : MAX_BPF_FUNC_REG_ARGS; |
6681 | if (prog->aux->attach_btf_trace) { |
6682 | /* skip first 'void *__data' argument in btf_trace_##name typedef */ |
6683 | args++; |
6684 | nr_args--; |
6685 | } |
6686 | |
6687 | if (arg > nr_args) { |
6688 | bpf_log(log, fmt: "func '%s' doesn't have %d-th argument\n", |
6689 | tname, arg + 1); |
6690 | return false; |
6691 | } |
6692 | |
6693 | if (arg == nr_args) { |
6694 | switch (prog->expected_attach_type) { |
6695 | case BPF_LSM_MAC: |
6696 | /* mark we are accessing the return value */ |
6697 | info->is_retval = true; |
6698 | fallthrough; |
6699 | case BPF_LSM_CGROUP: |
6700 | case BPF_TRACE_FEXIT: |
6701 | /* When LSM programs are attached to void LSM hooks |
6702 | * they use FEXIT trampolines and when attached to |
6703 | * int LSM hooks, they use MODIFY_RETURN trampolines. |
6704 | * |
6705 | * While the LSM programs are BPF_MODIFY_RETURN-like |
6706 | * the check: |
6707 | * |
6708 | * if (ret_type != 'int') |
6709 | * return -EINVAL; |
6710 | * |
6711 | * is _not_ done here. This is still safe as LSM hooks |
6712 | * have only void and int return types. |
6713 | */ |
6714 | if (!t) |
6715 | return true; |
6716 | t = btf_type_by_id(btf, t->type); |
6717 | break; |
6718 | case BPF_MODIFY_RETURN: |
6719 | /* For now the BPF_MODIFY_RETURN can only be attached to |
6720 | * functions that return an int. |
6721 | */ |
6722 | if (!t) |
6723 | return false; |
6724 | |
6725 | t = btf_type_skip_modifiers(btf, id: t->type, NULL); |
6726 | if (!btf_type_is_small_int(t)) { |
6727 | bpf_log(log, |
6728 | fmt: "ret type %s not allowed for fmod_ret\n", |
6729 | btf_type_str(t)); |
6730 | return false; |
6731 | } |
6732 | break; |
6733 | default: |
6734 | bpf_log(log, fmt: "func '%s' doesn't have %d-th argument\n", |
6735 | tname, arg + 1); |
6736 | return false; |
6737 | } |
6738 | } else { |
6739 | if (!t) |
6740 | /* Default prog with MAX_BPF_FUNC_REG_ARGS args */ |
6741 | return true; |
6742 | t = btf_type_by_id(btf, args[arg].type); |
6743 | } |
6744 | |
6745 | /* skip modifiers */ |
6746 | while (btf_type_is_modifier(t)) |
6747 | t = btf_type_by_id(btf, t->type); |
6748 | if (btf_type_is_small_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t)) |
6749 | /* accessing a scalar */ |
6750 | return true; |
6751 | if (!btf_type_is_ptr(t)) { |
6752 | bpf_log(log, |
6753 | fmt: "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n", |
6754 | tname, arg, |
6755 | __btf_name_by_offset(btf, offset: t->name_off), |
6756 | btf_type_str(t)); |
6757 | return false; |
6758 | } |
6759 | |
6760 | if (size != sizeof(u64)) { |
6761 | bpf_log(log, fmt: "func '%s' size %d must be 8\n", |
6762 | tname, size); |
6763 | return false; |
6764 | } |
6765 | |
6766 | /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */ |
6767 | for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { |
6768 | const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; |
6769 | u32 type, flag; |
6770 | |
6771 | type = base_type(type: ctx_arg_info->reg_type); |
6772 | flag = type_flag(type: ctx_arg_info->reg_type); |
6773 | if (ctx_arg_info->offset == off && type == PTR_TO_BUF && |
6774 | (flag & PTR_MAYBE_NULL)) { |
6775 | info->reg_type = ctx_arg_info->reg_type; |
6776 | return true; |
6777 | } |
6778 | } |
6779 | |
6780 | /* |
6781 | * If it's a pointer to void, it's the same as scalar from the verifier |
6782 | * safety POV. Either way, no futher pointer walking is allowed. |
6783 | */ |
6784 | if (is_void_or_int_ptr(btf, t)) |
6785 | return true; |
6786 | |
6787 | /* this is a pointer to another type */ |
6788 | for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { |
6789 | const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; |
6790 | |
6791 | if (ctx_arg_info->offset == off) { |
6792 | if (!ctx_arg_info->btf_id) { |
6793 | bpf_log(log,fmt: "invalid btf_id for context argument offset %u\n", off); |
6794 | return false; |
6795 | } |
6796 | |
6797 | info->reg_type = ctx_arg_info->reg_type; |
6798 | info->btf = ctx_arg_info->btf ? : btf_vmlinux; |
6799 | info->btf_id = ctx_arg_info->btf_id; |
6800 | info->ref_obj_id = ctx_arg_info->ref_obj_id; |
6801 | return true; |
6802 | } |
6803 | } |
6804 | |
6805 | info->reg_type = PTR_TO_BTF_ID; |
6806 | if (prog_args_trusted(prog)) |
6807 | info->reg_type |= PTR_TRUSTED; |
6808 | |
6809 | if (btf_param_match_suffix(btf, arg: &args[arg], suffix: "__nullable")) |
6810 | info->reg_type |= PTR_MAYBE_NULL; |
6811 | |
6812 | if (prog->expected_attach_type == BPF_TRACE_RAW_TP) { |
6813 | struct btf *btf = prog->aux->attach_btf; |
6814 | const struct btf_type *t; |
6815 | const char *tname; |
6816 | |
6817 | /* BTF lookups cannot fail, return false on error */ |
6818 | t = btf_type_by_id(btf, prog->aux->attach_btf_id); |
6819 | if (!t) |
6820 | return false; |
6821 | tname = btf_name_by_offset(btf, offset: t->name_off); |
6822 | if (!tname) |
6823 | return false; |
6824 | /* Checked by bpf_check_attach_target */ |
6825 | tname += sizeof("btf_trace_") - 1; |
6826 | for (i = 0; i < ARRAY_SIZE(raw_tp_null_args); i++) { |
6827 | /* Is this a func with potential NULL args? */ |
6828 | if (strcmp(tname, raw_tp_null_args[i].func)) |
6829 | continue; |
6830 | if (raw_tp_null_args[i].mask & (0x1ULL << (arg * 4))) |
6831 | info->reg_type |= PTR_MAYBE_NULL; |
6832 | /* Is the current arg IS_ERR? */ |
6833 | if (raw_tp_null_args[i].mask & (0x2ULL << (arg * 4))) |
6834 | ptr_err_raw_tp = true; |
6835 | break; |
6836 | } |
6837 | /* If we don't know NULL-ness specification and the tracepoint |
6838 | * is coming from a loadable module, be conservative and mark |
6839 | * argument as PTR_MAYBE_NULL. |
6840 | */ |
6841 | if (i == ARRAY_SIZE(raw_tp_null_args) && btf_is_module(btf)) |
6842 | info->reg_type |= PTR_MAYBE_NULL; |
6843 | } |
6844 | |
6845 | if (tgt_prog) { |
6846 | enum bpf_prog_type tgt_type; |
6847 | |
6848 | if (tgt_prog->type == BPF_PROG_TYPE_EXT) |
6849 | tgt_type = tgt_prog->aux->saved_dst_prog_type; |
6850 | else |
6851 | tgt_type = tgt_prog->type; |
6852 | |
6853 | ret = btf_translate_to_vmlinux(log, btf, t, prog_type: tgt_type, arg); |
6854 | if (ret > 0) { |
6855 | info->btf = btf_vmlinux; |
6856 | info->btf_id = ret; |
6857 | return true; |
6858 | } else { |
6859 | return false; |
6860 | } |
6861 | } |
6862 | |
6863 | info->btf = btf; |
6864 | info->btf_id = t->type; |
6865 | t = btf_type_by_id(btf, t->type); |
6866 | |
6867 | if (btf_type_is_type_tag(t) && !btf_type_kflag(t)) { |
6868 | tag_value = __btf_name_by_offset(btf, offset: t->name_off); |
6869 | if (strcmp(tag_value, "user") == 0) |
6870 | info->reg_type |= MEM_USER; |
6871 | if (strcmp(tag_value, "percpu") == 0) |
6872 | info->reg_type |= MEM_PERCPU; |
6873 | } |
6874 | |
6875 | /* skip modifiers */ |
6876 | while (btf_type_is_modifier(t)) { |
6877 | info->btf_id = t->type; |
6878 | t = btf_type_by_id(btf, t->type); |
6879 | } |
6880 | if (!btf_type_is_struct(t)) { |
6881 | bpf_log(log, |
6882 | fmt: "func '%s' arg%d type %s is not a struct\n", |
6883 | tname, arg, btf_type_str(t)); |
6884 | return false; |
6885 | } |
6886 | bpf_log(log, fmt: "func '%s' arg%d has btf_id %d type %s '%s'\n", |
6887 | tname, arg, info->btf_id, btf_type_str(t), |
6888 | __btf_name_by_offset(btf, offset: t->name_off)); |
6889 | |
6890 | /* Perform all checks on the validity of type for this argument, but if |
6891 | * we know it can be IS_ERR at runtime, scrub pointer type and mark as |
6892 | * scalar. |
6893 | */ |
6894 | if (ptr_err_raw_tp) { |
6895 | bpf_log(log, fmt: "marking pointer arg%d as scalar as it may encode error", arg); |
6896 | info->reg_type = SCALAR_VALUE; |
6897 | } |
6898 | return true; |
6899 | } |
6900 | EXPORT_SYMBOL_GPL(btf_ctx_access); |
6901 | |
6902 | enum bpf_struct_walk_result { |
6903 | /* < 0 error */ |
6904 | WALK_SCALAR = 0, |
6905 | WALK_PTR, |
6906 | WALK_STRUCT, |
6907 | }; |
6908 | |
6909 | static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf, |
6910 | const struct btf_type *t, int off, int size, |
6911 | u32 *next_btf_id, enum bpf_type_flag *flag, |
6912 | const char **field_name) |
6913 | { |
6914 | u32 i, moff, mtrue_end, msize = 0, total_nelems = 0; |
6915 | const struct btf_type *mtype, *elem_type = NULL; |
6916 | const struct btf_member *member; |
6917 | const char *tname, *mname, *tag_value; |
6918 | u32 vlen, elem_id, mid; |
6919 | |
6920 | again: |
6921 | if (btf_type_is_modifier(t)) |
6922 | t = btf_type_skip_modifiers(btf, id: t->type, NULL); |
6923 | tname = __btf_name_by_offset(btf, offset: t->name_off); |
6924 | if (!btf_type_is_struct(t)) { |
6925 | bpf_log(log, fmt: "Type '%s' is not a struct\n", tname); |
6926 | return -EINVAL; |
6927 | } |
6928 | |
6929 | vlen = btf_type_vlen(t); |
6930 | if (BTF_INFO_KIND(t->info) == BTF_KIND_UNION && vlen != 1 && !(*flag & PTR_UNTRUSTED)) |
6931 | /* |
6932 | * walking unions yields untrusted pointers |
6933 | * with exception of __bpf_md_ptr and other |
6934 | * unions with a single member |
6935 | */ |
6936 | *flag |= PTR_UNTRUSTED; |
6937 | |
6938 | if (off + size > t->size) { |
6939 | /* If the last element is a variable size array, we may |
6940 | * need to relax the rule. |
6941 | */ |
6942 | struct btf_array *array_elem; |
6943 | |
6944 | if (vlen == 0) |
6945 | goto error; |
6946 | |
6947 | member = btf_type_member(t) + vlen - 1; |
6948 | mtype = btf_type_skip_modifiers(btf, id: member->type, |
6949 | NULL); |
6950 | if (!btf_type_is_array(t: mtype)) |
6951 | goto error; |
6952 | |
6953 | array_elem = (struct btf_array *)(mtype + 1); |
6954 | if (array_elem->nelems != 0) |
6955 | goto error; |
6956 | |
6957 | moff = __btf_member_bit_offset(struct_type: t, member) / 8; |
6958 | if (off < moff) |
6959 | goto error; |
6960 | |
6961 | /* allow structure and integer */ |
6962 | t = btf_type_skip_modifiers(btf, id: array_elem->type, |
6963 | NULL); |
6964 | |
6965 | if (btf_type_is_int(t)) |
6966 | return WALK_SCALAR; |
6967 | |
6968 | if (!btf_type_is_struct(t)) |
6969 | goto error; |
6970 | |
6971 | off = (off - moff) % t->size; |
6972 | goto again; |
6973 | |
6974 | error: |
6975 | bpf_log(log, fmt: "access beyond struct %s at off %u size %u\n", |
6976 | tname, off, size); |
6977 | return -EACCES; |
6978 | } |
6979 | |
6980 | for_each_member(i, t, member) { |
6981 | /* offset of the field in bytes */ |
6982 | moff = __btf_member_bit_offset(struct_type: t, member) / 8; |
6983 | if (off + size <= moff) |
6984 | /* won't find anything, field is already too far */ |
6985 | break; |
6986 | |
6987 | if (__btf_member_bitfield_size(struct_type: t, member)) { |
6988 | u32 end_bit = __btf_member_bit_offset(struct_type: t, member) + |
6989 | __btf_member_bitfield_size(struct_type: t, member); |
6990 | |
6991 | /* off <= moff instead of off == moff because clang |
6992 | * does not generate a BTF member for anonymous |
6993 | * bitfield like the ":16" here: |
6994 | * struct { |
6995 | * int :16; |
6996 | * int x:8; |
6997 | * }; |
6998 | */ |
6999 | if (off <= moff && |
7000 | BITS_ROUNDUP_BYTES(end_bit) <= off + size) |
7001 | return WALK_SCALAR; |
7002 | |
7003 | /* off may be accessing a following member |
7004 | * |
7005 | * or |
7006 | * |
7007 | * Doing partial access at either end of this |
7008 | * bitfield. Continue on this case also to |
7009 | * treat it as not accessing this bitfield |
7010 | * and eventually error out as field not |
7011 | * found to keep it simple. |
7012 | * It could be relaxed if there was a legit |
7013 | * partial access case later. |
7014 | */ |
7015 | continue; |
7016 | } |
7017 | |
7018 | /* In case of "off" is pointing to holes of a struct */ |
7019 | if (off < moff) |
7020 | break; |
7021 | |
7022 | /* type of the field */ |
7023 | mid = member->type; |
7024 | mtype = btf_type_by_id(btf, member->type); |
7025 | mname = __btf_name_by_offset(btf, offset: member->name_off); |
7026 | |
7027 | mtype = __btf_resolve_size(btf, type: mtype, type_size: &msize, |
7028 | elem_type: &elem_type, elem_id: &elem_id, total_nelems: &total_nelems, |
7029 | type_id: &mid); |
7030 | if (IS_ERR(ptr: mtype)) { |
7031 | bpf_log(log, fmt: "field %s doesn't have size\n", mname); |
7032 | return -EFAULT; |
7033 | } |
7034 | |
7035 | mtrue_end = moff + msize; |
7036 | if (off >= mtrue_end) |
7037 | /* no overlap with member, keep iterating */ |
7038 | continue; |
7039 | |
7040 | if (btf_type_is_array(t: mtype)) { |
7041 | u32 elem_idx; |
7042 | |
7043 | /* __btf_resolve_size() above helps to |
7044 | * linearize a multi-dimensional array. |
7045 | * |
7046 | * The logic here is treating an array |
7047 | * in a struct as the following way: |
7048 | * |
7049 | * struct outer { |
7050 | * struct inner array[2][2]; |
7051 | * }; |
7052 | * |
7053 | * looks like: |
7054 | * |
7055 | * struct outer { |
7056 | * struct inner array_elem0; |
7057 | * struct inner array_elem1; |
7058 | * struct inner array_elem2; |
7059 | * struct inner array_elem3; |
7060 | * }; |
7061 | * |
7062 | * When accessing outer->array[1][0], it moves |
7063 | * moff to "array_elem2", set mtype to |
7064 | * "struct inner", and msize also becomes |
7065 | * sizeof(struct inner). Then most of the |
7066 | * remaining logic will fall through without |
7067 | * caring the current member is an array or |
7068 | * not. |
7069 | * |
7070 | * Unlike mtype/msize/moff, mtrue_end does not |
7071 | * change. The naming difference ("_true") tells |
7072 | * that it is not always corresponding to |
7073 | * the current mtype/msize/moff. |
7074 | * It is the true end of the current |
7075 | * member (i.e. array in this case). That |
7076 | * will allow an int array to be accessed like |
7077 | * a scratch space, |
7078 | * i.e. allow access beyond the size of |
7079 | * the array's element as long as it is |
7080 | * within the mtrue_end boundary. |
7081 | */ |
7082 | |
7083 | /* skip empty array */ |
7084 | if (moff == mtrue_end) |
7085 | continue; |
7086 | |
7087 | msize /= total_nelems; |
7088 | elem_idx = (off - moff) / msize; |
7089 | moff += elem_idx * msize; |
7090 | mtype = elem_type; |
7091 | mid = elem_id; |
7092 | } |
7093 | |
7094 | /* the 'off' we're looking for is either equal to start |
7095 | * of this field or inside of this struct |
7096 | */ |
7097 | if (btf_type_is_struct(t: mtype)) { |
7098 | /* our field must be inside that union or struct */ |
7099 | t = mtype; |
7100 | |
7101 | /* return if the offset matches the member offset */ |
7102 | if (off == moff) { |
7103 | *next_btf_id = mid; |
7104 | return WALK_STRUCT; |
7105 | } |
7106 | |
7107 | /* adjust offset we're looking for */ |
7108 | off -= moff; |
7109 | goto again; |
7110 | } |
7111 | |
7112 | if (btf_type_is_ptr(t: mtype)) { |
7113 | const struct btf_type *stype, *t; |
7114 | enum bpf_type_flag tmp_flag = 0; |
7115 | u32 id; |
7116 | |
7117 | if (msize != size || off != moff) { |
7118 | bpf_log(log, |
7119 | fmt: "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n", |
7120 | mname, moff, tname, off, size); |
7121 | return -EACCES; |
7122 | } |
7123 | |
7124 | /* check type tag */ |
7125 | t = btf_type_by_id(btf, mtype->type); |
7126 | if (btf_type_is_type_tag(t) && !btf_type_kflag(t)) { |
7127 | tag_value = __btf_name_by_offset(btf, offset: t->name_off); |
7128 | /* check __user tag */ |
7129 | if (strcmp(tag_value, "user") == 0) |
7130 | tmp_flag = MEM_USER; |
7131 | /* check __percpu tag */ |
7132 | if (strcmp(tag_value, "percpu") == 0) |
7133 | tmp_flag = MEM_PERCPU; |
7134 | /* check __rcu tag */ |
7135 | if (strcmp(tag_value, "rcu") == 0) |
7136 | tmp_flag = MEM_RCU; |
7137 | } |
7138 | |
7139 | stype = btf_type_skip_modifiers(btf, id: mtype->type, res_id: &id); |
7140 | if (btf_type_is_struct(t: stype)) { |
7141 | *next_btf_id = id; |
7142 | *flag |= tmp_flag; |
7143 | if (field_name) |
7144 | *field_name = mname; |
7145 | return WALK_PTR; |
7146 | } |
7147 | } |
7148 | |
7149 | /* Allow more flexible access within an int as long as |
7150 | * it is within mtrue_end. |
7151 | * Since mtrue_end could be the end of an array, |
7152 | * that also allows using an array of int as a scratch |
7153 | * space. e.g. skb->cb[]. |
7154 | */ |
7155 | if (off + size > mtrue_end && !(*flag & PTR_UNTRUSTED)) { |
7156 | bpf_log(log, |
7157 | fmt: "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n", |
7158 | mname, mtrue_end, tname, off, size); |
7159 | return -EACCES; |
7160 | } |
7161 | |
7162 | return WALK_SCALAR; |
7163 | } |
7164 | bpf_log(log, fmt: "struct %s doesn't have field at offset %d\n", tname, off); |
7165 | return -EINVAL; |
7166 | } |
7167 | |
7168 | int btf_struct_access(struct bpf_verifier_log *log, |
7169 | const struct bpf_reg_state *reg, |
7170 | int off, int size, enum bpf_access_type atype __maybe_unused, |
7171 | u32 *next_btf_id, enum bpf_type_flag *flag, |
7172 | const char **field_name) |
7173 | { |
7174 | const struct btf *btf = reg->btf; |
7175 | enum bpf_type_flag tmp_flag = 0; |
7176 | const struct btf_type *t; |
7177 | u32 id = reg->btf_id; |
7178 | int err; |
7179 | |
7180 | while (type_is_alloc(type: reg->type)) { |
7181 | struct btf_struct_meta *meta; |
7182 | struct btf_record *rec; |
7183 | int i; |
7184 | |
7185 | meta = btf_find_struct_meta(btf, btf_id: id); |
7186 | if (!meta) |
7187 | break; |
7188 | rec = meta->record; |
7189 | for (i = 0; i < rec->cnt; i++) { |
7190 | struct btf_field *field = &rec->fields[i]; |
7191 | u32 offset = field->offset; |
7192 | if (off < offset + field->size && offset < off + size) { |
7193 | bpf_log(log, |
7194 | fmt: "direct access to %s is disallowed\n", |
7195 | btf_field_type_name(type: field->type)); |
7196 | return -EACCES; |
7197 | } |
7198 | } |
7199 | break; |
7200 | } |
7201 | |
7202 | t = btf_type_by_id(btf, id); |
7203 | do { |
7204 | err = btf_struct_walk(log, btf, t, off, size, next_btf_id: &id, flag: &tmp_flag, field_name); |
7205 | |
7206 | switch (err) { |
7207 | case WALK_PTR: |
7208 | /* For local types, the destination register cannot |
7209 | * become a pointer again. |
7210 | */ |
7211 | if (type_is_alloc(type: reg->type)) |
7212 | return SCALAR_VALUE; |
7213 | /* If we found the pointer or scalar on t+off, |
7214 | * we're done. |
7215 | */ |
7216 | *next_btf_id = id; |
7217 | *flag = tmp_flag; |
7218 | return PTR_TO_BTF_ID; |
7219 | case WALK_SCALAR: |
7220 | return SCALAR_VALUE; |
7221 | case WALK_STRUCT: |
7222 | /* We found nested struct, so continue the search |
7223 | * by diving in it. At this point the offset is |
7224 | * aligned with the new type, so set it to 0. |
7225 | */ |
7226 | t = btf_type_by_id(btf, id); |
7227 | off = 0; |
7228 | break; |
7229 | default: |
7230 | /* It's either error or unknown return value.. |
7231 | * scream and leave. |
7232 | */ |
7233 | if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value")) |
7234 | return -EINVAL; |
7235 | return err; |
7236 | } |
7237 | } while (t); |
7238 | |
7239 | return -EINVAL; |
7240 | } |
7241 | |
7242 | /* Check that two BTF types, each specified as an BTF object + id, are exactly |
7243 | * the same. Trivial ID check is not enough due to module BTFs, because we can |
7244 | * end up with two different module BTFs, but IDs point to the common type in |
7245 | * vmlinux BTF. |
7246 | */ |
7247 | bool btf_types_are_same(const struct btf *btf1, u32 id1, |
7248 | const struct btf *btf2, u32 id2) |
7249 | { |
7250 | if (id1 != id2) |
7251 | return false; |
7252 | if (btf1 == btf2) |
7253 | return true; |
7254 | return btf_type_by_id(btf1, id1) == btf_type_by_id(btf2, id2); |
7255 | } |
7256 | |
7257 | bool btf_struct_ids_match(struct bpf_verifier_log *log, |
7258 | const struct btf *btf, u32 id, int off, |
7259 | const struct btf *need_btf, u32 need_type_id, |
7260 | bool strict) |
7261 | { |
7262 | const struct btf_type *type; |
7263 | enum bpf_type_flag flag = 0; |
7264 | int err; |
7265 | |
7266 | /* Are we already done? */ |
7267 | if (off == 0 && btf_types_are_same(btf1: btf, id1: id, btf2: need_btf, id2: need_type_id)) |
7268 | return true; |
7269 | /* In case of strict type match, we do not walk struct, the top level |
7270 | * type match must succeed. When strict is true, off should have already |
7271 | * been 0. |
7272 | */ |
7273 | if (strict) |
7274 | return false; |
7275 | again: |
7276 | type = btf_type_by_id(btf, id); |
7277 | if (!type) |
7278 | return false; |
7279 | err = btf_struct_walk(log, btf, t: type, off, size: 1, next_btf_id: &id, flag: &flag, NULL); |
7280 | if (err != WALK_STRUCT) |
7281 | return false; |
7282 | |
7283 | /* We found nested struct object. If it matches |
7284 | * the requested ID, we're done. Otherwise let's |
7285 | * continue the search with offset 0 in the new |
7286 | * type. |
7287 | */ |
7288 | if (!btf_types_are_same(btf1: btf, id1: id, btf2: need_btf, id2: need_type_id)) { |
7289 | off = 0; |
7290 | goto again; |
7291 | } |
7292 | |
7293 | return true; |
7294 | } |
7295 | |
7296 | static int __get_type_size(struct btf *btf, u32 btf_id, |
7297 | const struct btf_type **ret_type) |
7298 | { |
7299 | const struct btf_type *t; |
7300 | |
7301 | *ret_type = btf_type_by_id(btf, 0); |
7302 | if (!btf_id) |
7303 | /* void */ |
7304 | return 0; |
7305 | t = btf_type_by_id(btf, btf_id); |
7306 | while (t && btf_type_is_modifier(t)) |
7307 | t = btf_type_by_id(btf, t->type); |
7308 | if (!t) |
7309 | return -EINVAL; |
7310 | *ret_type = t; |
7311 | if (btf_type_is_ptr(t)) |
7312 | /* kernel size of pointer. Not BPF's size of pointer*/ |
7313 | return sizeof(void *); |
7314 | if (btf_type_is_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t)) |
7315 | return t->size; |
7316 | return -EINVAL; |
7317 | } |
7318 | |
7319 | static u8 __get_type_fmodel_flags(const struct btf_type *t) |
7320 | { |
7321 | u8 flags = 0; |
7322 | |
7323 | if (__btf_type_is_struct(t)) |
7324 | flags |= BTF_FMODEL_STRUCT_ARG; |
7325 | if (btf_type_is_signed_int(t)) |
7326 | flags |= BTF_FMODEL_SIGNED_ARG; |
7327 | |
7328 | return flags; |
7329 | } |
7330 | |
7331 | int btf_distill_func_proto(struct bpf_verifier_log *log, |
7332 | struct btf *btf, |
7333 | const struct btf_type *func, |
7334 | const char *tname, |
7335 | struct btf_func_model *m) |
7336 | { |
7337 | const struct btf_param *args; |
7338 | const struct btf_type *t; |
7339 | u32 i, nargs; |
7340 | int ret; |
7341 | |
7342 | if (!func) { |
7343 | /* BTF function prototype doesn't match the verifier types. |
7344 | * Fall back to MAX_BPF_FUNC_REG_ARGS u64 args. |
7345 | */ |
7346 | for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) { |
7347 | m->arg_size[i] = 8; |
7348 | m->arg_flags[i] = 0; |
7349 | } |
7350 | m->ret_size = 8; |
7351 | m->ret_flags = 0; |
7352 | m->nr_args = MAX_BPF_FUNC_REG_ARGS; |
7353 | return 0; |
7354 | } |
7355 | args = (const struct btf_param *)(func + 1); |
7356 | nargs = btf_type_vlen(t: func); |
7357 | if (nargs > MAX_BPF_FUNC_ARGS) { |
7358 | bpf_log(log, |
7359 | fmt: "The function %s has %d arguments. Too many.\n", |
7360 | tname, nargs); |
7361 | return -EINVAL; |
7362 | } |
7363 | ret = __get_type_size(btf, btf_id: func->type, ret_type: &t); |
7364 | if (ret < 0 || __btf_type_is_struct(t)) { |
7365 | bpf_log(log, |
7366 | fmt: "The function %s return type %s is unsupported.\n", |
7367 | tname, btf_type_str(t)); |
7368 | return -EINVAL; |
7369 | } |
7370 | m->ret_size = ret; |
7371 | m->ret_flags = __get_type_fmodel_flags(t); |
7372 | |
7373 | for (i = 0; i < nargs; i++) { |
7374 | if (i == nargs - 1 && args[i].type == 0) { |
7375 | bpf_log(log, |
7376 | fmt: "The function %s with variable args is unsupported.\n", |
7377 | tname); |
7378 | return -EINVAL; |
7379 | } |
7380 | ret = __get_type_size(btf, btf_id: args[i].type, ret_type: &t); |
7381 | |
7382 | /* No support of struct argument size greater than 16 bytes */ |
7383 | if (ret < 0 || ret > 16) { |
7384 | bpf_log(log, |
7385 | fmt: "The function %s arg%d type %s is unsupported.\n", |
7386 | tname, i, btf_type_str(t)); |
7387 | return -EINVAL; |
7388 | } |
7389 | if (ret == 0) { |
7390 | bpf_log(log, |
7391 | fmt: "The function %s has malformed void argument.\n", |
7392 | tname); |
7393 | return -EINVAL; |
7394 | } |
7395 | m->arg_size[i] = ret; |
7396 | m->arg_flags[i] = __get_type_fmodel_flags(t); |
7397 | } |
7398 | m->nr_args = nargs; |
7399 | return 0; |
7400 | } |
7401 | |
7402 | /* Compare BTFs of two functions assuming only scalars and pointers to context. |
7403 | * t1 points to BTF_KIND_FUNC in btf1 |
7404 | * t2 points to BTF_KIND_FUNC in btf2 |
7405 | * Returns: |
7406 | * EINVAL - function prototype mismatch |
7407 | * EFAULT - verifier bug |
7408 | * 0 - 99% match. The last 1% is validated by the verifier. |
7409 | */ |
7410 | static int btf_check_func_type_match(struct bpf_verifier_log *log, |
7411 | struct btf *btf1, const struct btf_type *t1, |
7412 | struct btf *btf2, const struct btf_type *t2) |
7413 | { |
7414 | const struct btf_param *args1, *args2; |
7415 | const char *fn1, *fn2, *s1, *s2; |
7416 | u32 nargs1, nargs2, i; |
7417 | |
7418 | fn1 = btf_name_by_offset(btf: btf1, offset: t1->name_off); |
7419 | fn2 = btf_name_by_offset(btf: btf2, offset: t2->name_off); |
7420 | |
7421 | if (btf_func_linkage(t: t1) != BTF_FUNC_GLOBAL) { |
7422 | bpf_log(log, fmt: "%s() is not a global function\n", fn1); |
7423 | return -EINVAL; |
7424 | } |
7425 | if (btf_func_linkage(t: t2) != BTF_FUNC_GLOBAL) { |
7426 | bpf_log(log, fmt: "%s() is not a global function\n", fn2); |
7427 | return -EINVAL; |
7428 | } |
7429 | |
7430 | t1 = btf_type_by_id(btf1, t1->type); |
7431 | if (!t1 || !btf_type_is_func_proto(t: t1)) |
7432 | return -EFAULT; |
7433 | t2 = btf_type_by_id(btf2, t2->type); |
7434 | if (!t2 || !btf_type_is_func_proto(t: t2)) |
7435 | return -EFAULT; |
7436 | |
7437 | args1 = (const struct btf_param *)(t1 + 1); |
7438 | nargs1 = btf_type_vlen(t: t1); |
7439 | args2 = (const struct btf_param *)(t2 + 1); |
7440 | nargs2 = btf_type_vlen(t: t2); |
7441 | |
7442 | if (nargs1 != nargs2) { |
7443 | bpf_log(log, fmt: "%s() has %d args while %s() has %d args\n", |
7444 | fn1, nargs1, fn2, nargs2); |
7445 | return -EINVAL; |
7446 | } |
7447 | |
7448 | t1 = btf_type_skip_modifiers(btf: btf1, id: t1->type, NULL); |
7449 | t2 = btf_type_skip_modifiers(btf: btf2, id: t2->type, NULL); |
7450 | if (t1->info != t2->info) { |
7451 | bpf_log(log, |
7452 | fmt: "Return type %s of %s() doesn't match type %s of %s()\n", |
7453 | btf_type_str(t: t1), fn1, |
7454 | btf_type_str(t: t2), fn2); |
7455 | return -EINVAL; |
7456 | } |
7457 | |
7458 | for (i = 0; i < nargs1; i++) { |
7459 | t1 = btf_type_skip_modifiers(btf: btf1, id: args1[i].type, NULL); |
7460 | t2 = btf_type_skip_modifiers(btf: btf2, id: args2[i].type, NULL); |
7461 | |
7462 | if (t1->info != t2->info) { |
7463 | bpf_log(log, fmt: "arg%d in %s() is %s while %s() has %s\n", |
7464 | i, fn1, btf_type_str(t: t1), |
7465 | fn2, btf_type_str(t: t2)); |
7466 | return -EINVAL; |
7467 | } |
7468 | if (btf_type_has_size(t: t1) && t1->size != t2->size) { |
7469 | bpf_log(log, |
7470 | fmt: "arg%d in %s() has size %d while %s() has %d\n", |
7471 | i, fn1, t1->size, |
7472 | fn2, t2->size); |
7473 | return -EINVAL; |
7474 | } |
7475 | |
7476 | /* global functions are validated with scalars and pointers |
7477 | * to context only. And only global functions can be replaced. |
7478 | * Hence type check only those types. |
7479 | */ |
7480 | if (btf_type_is_int(t: t1) || btf_is_any_enum(t: t1)) |
7481 | continue; |
7482 | if (!btf_type_is_ptr(t: t1)) { |
7483 | bpf_log(log, |
7484 | fmt: "arg%d in %s() has unrecognized type\n", |
7485 | i, fn1); |
7486 | return -EINVAL; |
7487 | } |
7488 | t1 = btf_type_skip_modifiers(btf: btf1, id: t1->type, NULL); |
7489 | t2 = btf_type_skip_modifiers(btf: btf2, id: t2->type, NULL); |
7490 | if (!btf_type_is_struct(t: t1)) { |
7491 | bpf_log(log, |
7492 | fmt: "arg%d in %s() is not a pointer to context\n", |
7493 | i, fn1); |
7494 | return -EINVAL; |
7495 | } |
7496 | if (!btf_type_is_struct(t: t2)) { |
7497 | bpf_log(log, |
7498 | fmt: "arg%d in %s() is not a pointer to context\n", |
7499 | i, fn2); |
7500 | return -EINVAL; |
7501 | } |
7502 | /* This is an optional check to make program writing easier. |
7503 | * Compare names of structs and report an error to the user. |
7504 | * btf_prepare_func_args() already checked that t2 struct |
7505 | * is a context type. btf_prepare_func_args() will check |
7506 | * later that t1 struct is a context type as well. |
7507 | */ |
7508 | s1 = btf_name_by_offset(btf: btf1, offset: t1->name_off); |
7509 | s2 = btf_name_by_offset(btf: btf2, offset: t2->name_off); |
7510 | if (strcmp(s1, s2)) { |
7511 | bpf_log(log, |
7512 | fmt: "arg%d %s(struct %s *) doesn't match %s(struct %s *)\n", |
7513 | i, fn1, s1, fn2, s2); |
7514 | return -EINVAL; |
7515 | } |
7516 | } |
7517 | return 0; |
7518 | } |
7519 | |
7520 | /* Compare BTFs of given program with BTF of target program */ |
7521 | int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, |
7522 | struct btf *btf2, const struct btf_type *t2) |
7523 | { |
7524 | struct btf *btf1 = prog->aux->btf; |
7525 | const struct btf_type *t1; |
7526 | u32 btf_id = 0; |
7527 | |
7528 | if (!prog->aux->func_info) { |
7529 | bpf_log(log, fmt: "Program extension requires BTF\n"); |
7530 | return -EINVAL; |
7531 | } |
7532 | |
7533 | btf_id = prog->aux->func_info[0].type_id; |
7534 | if (!btf_id) |
7535 | return -EFAULT; |
7536 | |
7537 | t1 = btf_type_by_id(btf1, btf_id); |
7538 | if (!t1 || !btf_type_is_func(t: t1)) |
7539 | return -EFAULT; |
7540 | |
7541 | return btf_check_func_type_match(log, btf1, t1, btf2, t2); |
7542 | } |
7543 | |
7544 | static bool btf_is_dynptr_ptr(const struct btf *btf, const struct btf_type *t) |
7545 | { |
7546 | const char *name; |
7547 | |
7548 | t = btf_type_by_id(btf, t->type); /* skip PTR */ |
7549 | |
7550 | while (btf_type_is_modifier(t)) |
7551 | t = btf_type_by_id(btf, t->type); |
7552 | |
7553 | /* allow either struct or struct forward declaration */ |
7554 | if (btf_type_is_struct(t) || |
7555 | (btf_type_is_fwd(t) && btf_type_kflag(t) == 0)) { |
7556 | name = btf_str_by_offset(btf, offset: t->name_off); |
7557 | return name && strcmp(name, "bpf_dynptr") == 0; |
7558 | } |
7559 | |
7560 | return false; |
7561 | } |
7562 | |
7563 | struct bpf_cand_cache { |
7564 | const char *name; |
7565 | u32 name_len; |
7566 | u16 kind; |
7567 | u16 cnt; |
7568 | struct { |
7569 | const struct btf *btf; |
7570 | u32 id; |
7571 | } cands[]; |
7572 | }; |
7573 | |
7574 | static DEFINE_MUTEX(cand_cache_mutex); |
7575 | |
7576 | static struct bpf_cand_cache * |
7577 | bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id); |
7578 | |
7579 | static int btf_get_ptr_to_btf_id(struct bpf_verifier_log *log, int arg_idx, |
7580 | const struct btf *btf, const struct btf_type *t) |
7581 | { |
7582 | struct bpf_cand_cache *cc; |
7583 | struct bpf_core_ctx ctx = { |
7584 | .btf = btf, |
7585 | .log = log, |
7586 | }; |
7587 | u32 kern_type_id, type_id; |
7588 | int err = 0; |
7589 | |
7590 | /* skip PTR and modifiers */ |
7591 | type_id = t->type; |
7592 | t = btf_type_by_id(btf, t->type); |
7593 | while (btf_type_is_modifier(t)) { |
7594 | type_id = t->type; |
7595 | t = btf_type_by_id(btf, t->type); |
7596 | } |
7597 | |
7598 | mutex_lock(&cand_cache_mutex); |
7599 | cc = bpf_core_find_cands(ctx: &ctx, local_type_id: type_id); |
7600 | if (IS_ERR(ptr: cc)) { |
7601 | err = PTR_ERR(ptr: cc); |
7602 | bpf_log(log, fmt: "arg#%d reference type('%s %s') candidate matching error: %d\n", |
7603 | arg_idx, btf_type_str(t), __btf_name_by_offset(btf, offset: t->name_off), |
7604 | err); |
7605 | goto cand_cache_unlock; |
7606 | } |
7607 | if (cc->cnt != 1) { |
7608 | bpf_log(log, fmt: "arg#%d reference type('%s %s') %s\n", |
7609 | arg_idx, btf_type_str(t), __btf_name_by_offset(btf, offset: t->name_off), |
7610 | cc->cnt == 0 ? "has no matches": "is ambiguous"); |
7611 | err = cc->cnt == 0 ? -ENOENT : -ESRCH; |
7612 | goto cand_cache_unlock; |
7613 | } |
7614 | if (btf_is_module(btf: cc->cands[0].btf)) { |
7615 | bpf_log(log, fmt: "arg#%d reference type('%s %s') points to kernel module type (unsupported)\n", |
7616 | arg_idx, btf_type_str(t), __btf_name_by_offset(btf, offset: t->name_off)); |
7617 | err = -EOPNOTSUPP; |
7618 | goto cand_cache_unlock; |
7619 | } |
7620 | kern_type_id = cc->cands[0].id; |
7621 | |
7622 | cand_cache_unlock: |
7623 | mutex_unlock(lock: &cand_cache_mutex); |
7624 | if (err) |
7625 | return err; |
7626 | |
7627 | return kern_type_id; |
7628 | } |
7629 | |
7630 | enum btf_arg_tag { |
7631 | ARG_TAG_CTX = BIT_ULL(0), |
7632 | ARG_TAG_NONNULL = BIT_ULL(1), |
7633 | ARG_TAG_TRUSTED = BIT_ULL(2), |
7634 | ARG_TAG_NULLABLE = BIT_ULL(3), |
7635 | ARG_TAG_ARENA = BIT_ULL(4), |
7636 | }; |
7637 | |
7638 | /* Process BTF of a function to produce high-level expectation of function |
7639 | * arguments (like ARG_PTR_TO_CTX, or ARG_PTR_TO_MEM, etc). This information |
7640 | * is cached in subprog info for reuse. |
7641 | * Returns: |
7642 | * EFAULT - there is a verifier bug. Abort verification. |
7643 | * EINVAL - cannot convert BTF. |
7644 | * 0 - Successfully processed BTF and constructed argument expectations. |
7645 | */ |
7646 | int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog) |
7647 | { |
7648 | bool is_global = subprog_aux(env, subprog)->linkage == BTF_FUNC_GLOBAL; |
7649 | struct bpf_subprog_info *sub = subprog_info(env, subprog); |
7650 | struct bpf_verifier_log *log = &env->log; |
7651 | struct bpf_prog *prog = env->prog; |
7652 | enum bpf_prog_type prog_type = prog->type; |
7653 | struct btf *btf = prog->aux->btf; |
7654 | const struct btf_param *args; |
7655 | const struct btf_type *t, *ref_t, *fn_t; |
7656 | u32 i, nargs, btf_id; |
7657 | const char *tname; |
7658 | |
7659 | if (sub->args_cached) |
7660 | return 0; |
7661 | |
7662 | if (!prog->aux->func_info) { |
7663 | verifier_bug(env, "func_info undefined"); |
7664 | return -EFAULT; |
7665 | } |
7666 | |
7667 | btf_id = prog->aux->func_info[subprog].type_id; |
7668 | if (!btf_id) { |
7669 | if (!is_global) /* not fatal for static funcs */ |
7670 | return -EINVAL; |
7671 | bpf_log(log, fmt: "Global functions need valid BTF\n"); |
7672 | return -EFAULT; |
7673 | } |
7674 | |
7675 | fn_t = btf_type_by_id(btf, btf_id); |
7676 | if (!fn_t || !btf_type_is_func(t: fn_t)) { |
7677 | /* These checks were already done by the verifier while loading |
7678 | * struct bpf_func_info |
7679 | */ |
7680 | bpf_log(log, fmt: "BTF of func#%d doesn't point to KIND_FUNC\n", |
7681 | subprog); |
7682 | return -EFAULT; |
7683 | } |
7684 | tname = btf_name_by_offset(btf, offset: fn_t->name_off); |
7685 | |
7686 | if (prog->aux->func_info_aux[subprog].unreliable) { |
7687 | verifier_bug(env, "unreliable BTF for function %s()", tname); |
7688 | return -EFAULT; |
7689 | } |
7690 | if (prog_type == BPF_PROG_TYPE_EXT) |
7691 | prog_type = prog->aux->dst_prog->type; |
7692 | |
7693 | t = btf_type_by_id(btf, fn_t->type); |
7694 | if (!t || !btf_type_is_func_proto(t)) { |
7695 | bpf_log(log, fmt: "Invalid type of function %s()\n", tname); |
7696 | return -EFAULT; |
7697 | } |
7698 | args = (const struct btf_param *)(t + 1); |
7699 | nargs = btf_type_vlen(t); |
7700 | if (nargs > MAX_BPF_FUNC_REG_ARGS) { |
7701 | if (!is_global) |
7702 | return -EINVAL; |
7703 | bpf_log(log, fmt: "Global function %s() with %d > %d args. Buggy compiler.\n", |
7704 | tname, nargs, MAX_BPF_FUNC_REG_ARGS); |
7705 | return -EINVAL; |
7706 | } |
7707 | /* check that function returns int, exception cb also requires this */ |
7708 | t = btf_type_by_id(btf, t->type); |
7709 | while (btf_type_is_modifier(t)) |
7710 | t = btf_type_by_id(btf, t->type); |
7711 | if (!btf_type_is_int(t) && !btf_is_any_enum(t)) { |
7712 | if (!is_global) |
7713 | return -EINVAL; |
7714 | bpf_log(log, |
7715 | fmt: "Global function %s() doesn't return scalar. Only those are supported.\n", |
7716 | tname); |
7717 | return -EINVAL; |
7718 | } |
7719 | /* Convert BTF function arguments into verifier types. |
7720 | * Only PTR_TO_CTX and SCALAR are supported atm. |
7721 | */ |
7722 | for (i = 0; i < nargs; i++) { |
7723 | u32 tags = 0; |
7724 | int id = 0; |
7725 | |
7726 | /* 'arg:<tag>' decl_tag takes precedence over derivation of |
7727 | * register type from BTF type itself |
7728 | */ |
7729 | while ((id = btf_find_next_decl_tag(btf, pt: fn_t, comp_idx: i, tag_key: "arg:", last_id: id)) > 0) { |
7730 | const struct btf_type *tag_t = btf_type_by_id(btf, id); |
7731 | const char *tag = __btf_name_by_offset(btf, offset: tag_t->name_off) + 4; |
7732 | |
7733 | /* disallow arg tags in static subprogs */ |
7734 | if (!is_global) { |
7735 | bpf_log(log, fmt: "arg#%d type tag is not supported in static functions\n", i); |
7736 | return -EOPNOTSUPP; |
7737 | } |
7738 | |
7739 | if (strcmp(tag, "ctx") == 0) { |
7740 | tags |= ARG_TAG_CTX; |
7741 | } else if (strcmp(tag, "trusted") == 0) { |
7742 | tags |= ARG_TAG_TRUSTED; |
7743 | } else if (strcmp(tag, "nonnull") == 0) { |
7744 | tags |= ARG_TAG_NONNULL; |
7745 | } else if (strcmp(tag, "nullable") == 0) { |
7746 | tags |= ARG_TAG_NULLABLE; |
7747 | } else if (strcmp(tag, "arena") == 0) { |
7748 | tags |= ARG_TAG_ARENA; |
7749 | } else { |
7750 | bpf_log(log, fmt: "arg#%d has unsupported set of tags\n", i); |
7751 | return -EOPNOTSUPP; |
7752 | } |
7753 | } |
7754 | if (id != -ENOENT) { |
7755 | bpf_log(log, fmt: "arg#%d type tag fetching failure: %d\n", i, id); |
7756 | return id; |
7757 | } |
7758 | |
7759 | t = btf_type_by_id(btf, args[i].type); |
7760 | while (btf_type_is_modifier(t)) |
7761 | t = btf_type_by_id(btf, t->type); |
7762 | if (!btf_type_is_ptr(t)) |
7763 | goto skip_pointer; |
7764 | |
7765 | if ((tags & ARG_TAG_CTX) || btf_is_prog_ctx_type(log, btf, t, prog_type, arg: i)) { |
7766 | if (tags & ~ARG_TAG_CTX) { |
7767 | bpf_log(log, fmt: "arg#%d has invalid combination of tags\n", i); |
7768 | return -EINVAL; |
7769 | } |
7770 | if ((tags & ARG_TAG_CTX) && |
7771 | btf_validate_prog_ctx_type(log, btf, t, arg: i, prog_type, |
7772 | attach_type: prog->expected_attach_type)) |
7773 | return -EINVAL; |
7774 | sub->args[i].arg_type = ARG_PTR_TO_CTX; |
7775 | continue; |
7776 | } |
7777 | if (btf_is_dynptr_ptr(btf, t)) { |
7778 | if (tags) { |
7779 | bpf_log(log, fmt: "arg#%d has invalid combination of tags\n", i); |
7780 | return -EINVAL; |
7781 | } |
7782 | sub->args[i].arg_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY; |
7783 | continue; |
7784 | } |
7785 | if (tags & ARG_TAG_TRUSTED) { |
7786 | int kern_type_id; |
7787 | |
7788 | if (tags & ARG_TAG_NONNULL) { |
7789 | bpf_log(log, fmt: "arg#%d has invalid combination of tags\n", i); |
7790 | return -EINVAL; |
7791 | } |
7792 | |
7793 | kern_type_id = btf_get_ptr_to_btf_id(log, arg_idx: i, btf, t); |
7794 | if (kern_type_id < 0) |
7795 | return kern_type_id; |
7796 | |
7797 | sub->args[i].arg_type = ARG_PTR_TO_BTF_ID | PTR_TRUSTED; |
7798 | if (tags & ARG_TAG_NULLABLE) |
7799 | sub->args[i].arg_type |= PTR_MAYBE_NULL; |
7800 | sub->args[i].btf_id = kern_type_id; |
7801 | continue; |
7802 | } |
7803 | if (tags & ARG_TAG_ARENA) { |
7804 | if (tags & ~ARG_TAG_ARENA) { |
7805 | bpf_log(log, fmt: "arg#%d arena cannot be combined with any other tags\n", i); |
7806 | return -EINVAL; |
7807 | } |
7808 | sub->args[i].arg_type = ARG_PTR_TO_ARENA; |
7809 | continue; |
7810 | } |
7811 | if (is_global) { /* generic user data pointer */ |
7812 | u32 mem_size; |
7813 | |
7814 | if (tags & ARG_TAG_NULLABLE) { |
7815 | bpf_log(log, fmt: "arg#%d has invalid combination of tags\n", i); |
7816 | return -EINVAL; |
7817 | } |
7818 | |
7819 | t = btf_type_skip_modifiers(btf, id: t->type, NULL); |
7820 | ref_t = btf_resolve_size(btf, type: t, type_size: &mem_size); |
7821 | if (IS_ERR(ptr: ref_t)) { |
7822 | bpf_log(log, fmt: "arg#%d reference type('%s %s') size cannot be determined: %ld\n", |
7823 | i, btf_type_str(t), btf_name_by_offset(btf, offset: t->name_off), |
7824 | PTR_ERR(ptr: ref_t)); |
7825 | return -EINVAL; |
7826 | } |
7827 | |
7828 | sub->args[i].arg_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL; |
7829 | if (tags & ARG_TAG_NONNULL) |
7830 | sub->args[i].arg_type &= ~PTR_MAYBE_NULL; |
7831 | sub->args[i].mem_size = mem_size; |
7832 | continue; |
7833 | } |
7834 | |
7835 | skip_pointer: |
7836 | if (tags) { |
7837 | bpf_log(log, fmt: "arg#%d has pointer tag, but is not a pointer type\n", i); |
7838 | return -EINVAL; |
7839 | } |
7840 | if (btf_type_is_int(t) || btf_is_any_enum(t)) { |
7841 | sub->args[i].arg_type = ARG_ANYTHING; |
7842 | continue; |
7843 | } |
7844 | if (!is_global) |
7845 | return -EINVAL; |
7846 | bpf_log(log, fmt: "Arg#%d type %s in %s() is not supported yet.\n", |
7847 | i, btf_type_str(t), tname); |
7848 | return -EINVAL; |
7849 | } |
7850 | |
7851 | sub->arg_cnt = nargs; |
7852 | sub->args_cached = true; |
7853 | |
7854 | return 0; |
7855 | } |
7856 | |
7857 | static void btf_type_show(const struct btf *btf, u32 type_id, void *obj, |
7858 | struct btf_show *show) |
7859 | { |
7860 | const struct btf_type *t = btf_type_by_id(btf, type_id); |
7861 | |
7862 | show->btf = btf; |
7863 | memset(&show->state, 0, sizeof(show->state)); |
7864 | memset(&show->obj, 0, sizeof(show->obj)); |
7865 | |
7866 | btf_type_ops(t)->show(btf, t, type_id, obj, 0, show); |
7867 | } |
7868 | |
7869 | __printf(2, 0) static void btf_seq_show(struct btf_show *show, const char *fmt, |
7870 | va_list args) |
7871 | { |
7872 | seq_vprintf(m: (struct seq_file *)show->target, fmt, args); |
7873 | } |
7874 | |
7875 | int btf_type_seq_show_flags(const struct btf *btf, u32 type_id, |
7876 | void *obj, struct seq_file *m, u64 flags) |
7877 | { |
7878 | struct btf_show sseq; |
7879 | |
7880 | sseq.target = m; |
7881 | sseq.showfn = btf_seq_show; |
7882 | sseq.flags = flags; |
7883 | |
7884 | btf_type_show(btf, type_id, obj, show: &sseq); |
7885 | |
7886 | return sseq.state.status; |
7887 | } |
7888 | |
7889 | void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, |
7890 | struct seq_file *m) |
7891 | { |
7892 | (void) btf_type_seq_show_flags(btf, type_id, obj, m, |
7893 | BTF_SHOW_NONAME | BTF_SHOW_COMPACT | |
7894 | BTF_SHOW_ZERO | BTF_SHOW_UNSAFE); |
7895 | } |
7896 | |
7897 | struct btf_show_snprintf { |
7898 | struct btf_show show; |
7899 | int len_left; /* space left in string */ |
7900 | int len; /* length we would have written */ |
7901 | }; |
7902 | |
7903 | __printf(2, 0) static void btf_snprintf_show(struct btf_show *show, const char *fmt, |
7904 | va_list args) |
7905 | { |
7906 | struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show; |
7907 | int len; |
7908 | |
7909 | len = vsnprintf(buf: show->target, size: ssnprintf->len_left, fmt, args); |
7910 | |
7911 | if (len < 0) { |
7912 | ssnprintf->len_left = 0; |
7913 | ssnprintf->len = len; |
7914 | } else if (len >= ssnprintf->len_left) { |
7915 | /* no space, drive on to get length we would have written */ |
7916 | ssnprintf->len_left = 0; |
7917 | ssnprintf->len += len; |
7918 | } else { |
7919 | ssnprintf->len_left -= len; |
7920 | ssnprintf->len += len; |
7921 | show->target += len; |
7922 | } |
7923 | } |
7924 | |
7925 | int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj, |
7926 | char *buf, int len, u64 flags) |
7927 | { |
7928 | struct btf_show_snprintf ssnprintf; |
7929 | |
7930 | ssnprintf.show.target = buf; |
7931 | ssnprintf.show.flags = flags; |
7932 | ssnprintf.show.showfn = btf_snprintf_show; |
7933 | ssnprintf.len_left = len; |
7934 | ssnprintf.len = 0; |
7935 | |
7936 | btf_type_show(btf, type_id, obj, show: (struct btf_show *)&ssnprintf); |
7937 | |
7938 | /* If we encountered an error, return it. */ |
7939 | if (ssnprintf.show.state.status) |
7940 | return ssnprintf.show.state.status; |
7941 | |
7942 | /* Otherwise return length we would have written */ |
7943 | return ssnprintf.len; |
7944 | } |
7945 | |
7946 | #ifdef CONFIG_PROC_FS |
7947 | static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp) |
7948 | { |
7949 | const struct btf *btf = filp->private_data; |
7950 | |
7951 | seq_printf(m, fmt: "btf_id:\t%u\n", btf->id); |
7952 | } |
7953 | #endif |
7954 | |
7955 | static int btf_release(struct inode *inode, struct file *filp) |
7956 | { |
7957 | btf_put(btf: filp->private_data); |
7958 | return 0; |
7959 | } |
7960 | |
7961 | const struct file_operations btf_fops = { |
7962 | #ifdef CONFIG_PROC_FS |
7963 | .show_fdinfo = bpf_btf_show_fdinfo, |
7964 | #endif |
7965 | .release = btf_release, |
7966 | }; |
7967 | |
7968 | static int __btf_new_fd(struct btf *btf) |
7969 | { |
7970 | return anon_inode_getfd(name: "btf", fops: &btf_fops, priv: btf, O_RDONLY | O_CLOEXEC); |
7971 | } |
7972 | |
7973 | int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) |
7974 | { |
7975 | struct btf *btf; |
7976 | int ret; |
7977 | |
7978 | btf = btf_parse(attr, uattr, uattr_size); |
7979 | if (IS_ERR(ptr: btf)) |
7980 | return PTR_ERR(ptr: btf); |
7981 | |
7982 | ret = btf_alloc_id(btf); |
7983 | if (ret) { |
7984 | btf_free(btf); |
7985 | return ret; |
7986 | } |
7987 | |
7988 | /* |
7989 | * The BTF ID is published to the userspace. |
7990 | * All BTF free must go through call_rcu() from |
7991 | * now on (i.e. free by calling btf_put()). |
7992 | */ |
7993 | |
7994 | ret = __btf_new_fd(btf); |
7995 | if (ret < 0) |
7996 | btf_put(btf); |
7997 | |
7998 | return ret; |
7999 | } |
8000 | |
8001 | struct btf *btf_get_by_fd(int fd) |
8002 | { |
8003 | struct btf *btf; |
8004 | CLASS(fd, f)(fd); |
8005 | |
8006 | btf = __btf_get_by_fd(f); |
8007 | if (!IS_ERR(ptr: btf)) |
8008 | refcount_inc(r: &btf->refcnt); |
8009 | |
8010 | return btf; |
8011 | } |
8012 | |
8013 | int btf_get_info_by_fd(const struct btf *btf, |
8014 | const union bpf_attr *attr, |
8015 | union bpf_attr __user *uattr) |
8016 | { |
8017 | struct bpf_btf_info __user *uinfo; |
8018 | struct bpf_btf_info info; |
8019 | u32 info_copy, btf_copy; |
8020 | void __user *ubtf; |
8021 | char __user *uname; |
8022 | u32 uinfo_len, uname_len, name_len; |
8023 | int ret = 0; |
8024 | |
8025 | uinfo = u64_to_user_ptr(attr->info.info); |
8026 | uinfo_len = attr->info.info_len; |
8027 | |
8028 | info_copy = min_t(u32, uinfo_len, sizeof(info)); |
8029 | memset(&info, 0, sizeof(info)); |
8030 | if (copy_from_user(to: &info, from: uinfo, n: info_copy)) |
8031 | return -EFAULT; |
8032 | |
8033 | info.id = btf->id; |
8034 | ubtf = u64_to_user_ptr(info.btf); |
8035 | btf_copy = min_t(u32, btf->data_size, info.btf_size); |
8036 | if (copy_to_user(to: ubtf, from: btf->data, n: btf_copy)) |
8037 | return -EFAULT; |
8038 | info.btf_size = btf->data_size; |
8039 | |
8040 | info.kernel_btf = btf->kernel_btf; |
8041 | |
8042 | uname = u64_to_user_ptr(info.name); |
8043 | uname_len = info.name_len; |
8044 | if (!uname ^ !uname_len) |
8045 | return -EINVAL; |
8046 | |
8047 | name_len = strlen(btf->name); |
8048 | info.name_len = name_len; |
8049 | |
8050 | if (uname) { |
8051 | if (uname_len >= name_len + 1) { |
8052 | if (copy_to_user(to: uname, from: btf->name, n: name_len + 1)) |
8053 | return -EFAULT; |
8054 | } else { |
8055 | char zero = '\0'; |
8056 | |
8057 | if (copy_to_user(to: uname, from: btf->name, n: uname_len - 1)) |
8058 | return -EFAULT; |
8059 | if (put_user(zero, uname + uname_len - 1)) |
8060 | return -EFAULT; |
8061 | /* let user-space know about too short buffer */ |
8062 | ret = -ENOSPC; |
8063 | } |
8064 | } |
8065 | |
8066 | if (copy_to_user(to: uinfo, from: &info, n: info_copy) || |
8067 | put_user(info_copy, &uattr->info.info_len)) |
8068 | return -EFAULT; |
8069 | |
8070 | return ret; |
8071 | } |
8072 | |
8073 | int btf_get_fd_by_id(u32 id) |
8074 | { |
8075 | struct btf *btf; |
8076 | int fd; |
8077 | |
8078 | rcu_read_lock(); |
8079 | btf = idr_find(&btf_idr, id); |
8080 | if (!btf || !refcount_inc_not_zero(r: &btf->refcnt)) |
8081 | btf = ERR_PTR(error: -ENOENT); |
8082 | rcu_read_unlock(); |
8083 | |
8084 | if (IS_ERR(ptr: btf)) |
8085 | return PTR_ERR(ptr: btf); |
8086 | |
8087 | fd = __btf_new_fd(btf); |
8088 | if (fd < 0) |
8089 | btf_put(btf); |
8090 | |
8091 | return fd; |
8092 | } |
8093 | |
8094 | u32 btf_obj_id(const struct btf *btf) |
8095 | { |
8096 | return btf->id; |
8097 | } |
8098 | |
8099 | bool btf_is_kernel(const struct btf *btf) |
8100 | { |
8101 | return btf->kernel_btf; |
8102 | } |
8103 | |
8104 | bool btf_is_module(const struct btf *btf) |
8105 | { |
8106 | return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0; |
8107 | } |
8108 | |
8109 | enum { |
8110 | BTF_MODULE_F_LIVE = (1 << 0), |
8111 | }; |
8112 | |
8113 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
8114 | struct btf_module { |
8115 | struct list_head list; |
8116 | struct module *module; |
8117 | struct btf *btf; |
8118 | struct bin_attribute *sysfs_attr; |
8119 | int flags; |
8120 | }; |
8121 | |
8122 | static LIST_HEAD(btf_modules); |
8123 | static DEFINE_MUTEX(btf_module_mutex); |
8124 | |
8125 | static void purge_cand_cache(struct btf *btf); |
8126 | |
8127 | static int btf_module_notify(struct notifier_block *nb, unsigned long op, |
8128 | void *module) |
8129 | { |
8130 | struct btf_module *btf_mod, *tmp; |
8131 | struct module *mod = module; |
8132 | struct btf *btf; |
8133 | int err = 0; |
8134 | |
8135 | if (mod->btf_data_size == 0 || |
8136 | (op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE && |
8137 | op != MODULE_STATE_GOING)) |
8138 | goto out; |
8139 | |
8140 | switch (op) { |
8141 | case MODULE_STATE_COMING: |
8142 | btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL); |
8143 | if (!btf_mod) { |
8144 | err = -ENOMEM; |
8145 | goto out; |
8146 | } |
8147 | btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size, |
8148 | mod->btf_base_data, mod->btf_base_data_size); |
8149 | if (IS_ERR(btf)) { |
8150 | kfree(btf_mod); |
8151 | if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) { |
8152 | pr_warn("failed to validate module [%s] BTF: %ld\n", |
8153 | mod->name, PTR_ERR(btf)); |
8154 | err = PTR_ERR(btf); |
8155 | } else { |
8156 | pr_warn_once("Kernel module BTF mismatch detected, BTF debug info may be unavailable for some modules\n"); |
8157 | } |
8158 | goto out; |
8159 | } |
8160 | err = btf_alloc_id(btf); |
8161 | if (err) { |
8162 | btf_free(btf); |
8163 | kfree(btf_mod); |
8164 | goto out; |
8165 | } |
8166 | |
8167 | purge_cand_cache(NULL); |
8168 | mutex_lock(&btf_module_mutex); |
8169 | btf_mod->module = module; |
8170 | btf_mod->btf = btf; |
8171 | list_add(&btf_mod->list, &btf_modules); |
8172 | mutex_unlock(&btf_module_mutex); |
8173 | |
8174 | if (IS_ENABLED(CONFIG_SYSFS)) { |
8175 | struct bin_attribute *attr; |
8176 | |
8177 | attr = kzalloc(sizeof(*attr), GFP_KERNEL); |
8178 | if (!attr) |
8179 | goto out; |
8180 | |
8181 | sysfs_bin_attr_init(attr); |
8182 | attr->attr.name = btf->name; |
8183 | attr->attr.mode = 0444; |
8184 | attr->size = btf->data_size; |
8185 | attr->private = btf->data; |
8186 | attr->read_new = sysfs_bin_attr_simple_read; |
8187 | |
8188 | err = sysfs_create_bin_file(btf_kobj, attr); |
8189 | if (err) { |
8190 | pr_warn("failed to register module [%s] BTF in sysfs: %d\n", |
8191 | mod->name, err); |
8192 | kfree(attr); |
8193 | err = 0; |
8194 | goto out; |
8195 | } |
8196 | |
8197 | btf_mod->sysfs_attr = attr; |
8198 | } |
8199 | |
8200 | break; |
8201 | case MODULE_STATE_LIVE: |
8202 | mutex_lock(&btf_module_mutex); |
8203 | list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { |
8204 | if (btf_mod->module != module) |
8205 | continue; |
8206 | |
8207 | btf_mod->flags |= BTF_MODULE_F_LIVE; |
8208 | break; |
8209 | } |
8210 | mutex_unlock(&btf_module_mutex); |
8211 | break; |
8212 | case MODULE_STATE_GOING: |
8213 | mutex_lock(&btf_module_mutex); |
8214 | list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { |
8215 | if (btf_mod->module != module) |
8216 | continue; |
8217 | |
8218 | list_del(&btf_mod->list); |
8219 | if (btf_mod->sysfs_attr) |
8220 | sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr); |
8221 | purge_cand_cache(btf_mod->btf); |
8222 | btf_put(btf_mod->btf); |
8223 | kfree(btf_mod->sysfs_attr); |
8224 | kfree(btf_mod); |
8225 | break; |
8226 | } |
8227 | mutex_unlock(&btf_module_mutex); |
8228 | break; |
8229 | } |
8230 | out: |
8231 | return notifier_from_errno(err); |
8232 | } |
8233 | |
8234 | static struct notifier_block btf_module_nb = { |
8235 | .notifier_call = btf_module_notify, |
8236 | }; |
8237 | |
8238 | static int __init btf_module_init(void) |
8239 | { |
8240 | register_module_notifier(&btf_module_nb); |
8241 | return 0; |
8242 | } |
8243 | |
8244 | fs_initcall(btf_module_init); |
8245 | #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */ |
8246 | |
8247 | struct module *btf_try_get_module(const struct btf *btf) |
8248 | { |
8249 | struct module *res = NULL; |
8250 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
8251 | struct btf_module *btf_mod, *tmp; |
8252 | |
8253 | mutex_lock(&btf_module_mutex); |
8254 | list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { |
8255 | if (btf_mod->btf != btf) |
8256 | continue; |
8257 | |
8258 | /* We must only consider module whose __init routine has |
8259 | * finished, hence we must check for BTF_MODULE_F_LIVE flag, |
8260 | * which is set from the notifier callback for |
8261 | * MODULE_STATE_LIVE. |
8262 | */ |
8263 | if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module)) |
8264 | res = btf_mod->module; |
8265 | |
8266 | break; |
8267 | } |
8268 | mutex_unlock(&btf_module_mutex); |
8269 | #endif |
8270 | |
8271 | return res; |
8272 | } |
8273 | |
8274 | /* Returns struct btf corresponding to the struct module. |
8275 | * This function can return NULL or ERR_PTR. |
8276 | */ |
8277 | static struct btf *btf_get_module_btf(const struct module *module) |
8278 | { |
8279 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
8280 | struct btf_module *btf_mod, *tmp; |
8281 | #endif |
8282 | struct btf *btf = NULL; |
8283 | |
8284 | if (!module) { |
8285 | btf = bpf_get_btf_vmlinux(); |
8286 | if (!IS_ERR_OR_NULL(ptr: btf)) |
8287 | btf_get(btf); |
8288 | return btf; |
8289 | } |
8290 | |
8291 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
8292 | mutex_lock(&btf_module_mutex); |
8293 | list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { |
8294 | if (btf_mod->module != module) |
8295 | continue; |
8296 | |
8297 | btf_get(btf_mod->btf); |
8298 | btf = btf_mod->btf; |
8299 | break; |
8300 | } |
8301 | mutex_unlock(&btf_module_mutex); |
8302 | #endif |
8303 | |
8304 | return btf; |
8305 | } |
8306 | |
8307 | static int check_btf_kconfigs(const struct module *module, const char *feature) |
8308 | { |
8309 | if (!module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { |
8310 | pr_err("missing vmlinux BTF, cannot register %s\n", feature); |
8311 | return -ENOENT; |
8312 | } |
8313 | if (module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) |
8314 | pr_warn("missing module BTF, cannot register %s\n", feature); |
8315 | return 0; |
8316 | } |
8317 | |
8318 | BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags) |
8319 | { |
8320 | struct btf *btf = NULL; |
8321 | int btf_obj_fd = 0; |
8322 | long ret; |
8323 | |
8324 | if (flags) |
8325 | return -EINVAL; |
8326 | |
8327 | if (name_sz <= 1 || name[name_sz - 1]) |
8328 | return -EINVAL; |
8329 | |
8330 | ret = bpf_find_btf_id(name, kind, &btf); |
8331 | if (ret > 0 && btf_is_module(btf)) { |
8332 | btf_obj_fd = __btf_new_fd(btf); |
8333 | if (btf_obj_fd < 0) { |
8334 | btf_put(btf); |
8335 | return btf_obj_fd; |
8336 | } |
8337 | return ret | (((u64)btf_obj_fd) << 32); |
8338 | } |
8339 | if (ret > 0) |
8340 | btf_put(btf); |
8341 | return ret; |
8342 | } |
8343 | |
8344 | const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = { |
8345 | .func = bpf_btf_find_by_name_kind, |
8346 | .gpl_only = false, |
8347 | .ret_type = RET_INTEGER, |
8348 | .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
8349 | .arg2_type = ARG_CONST_SIZE, |
8350 | .arg3_type = ARG_ANYTHING, |
8351 | .arg4_type = ARG_ANYTHING, |
8352 | }; |
8353 | |
8354 | BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE) |
8355 | #define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type) |
8356 | BTF_TRACING_TYPE_xxx |
8357 | #undef BTF_TRACING_TYPE |
8358 | |
8359 | /* Validate well-formedness of iter argument type. |
8360 | * On success, return positive BTF ID of iter state's STRUCT type. |
8361 | * On error, negative error is returned. |
8362 | */ |
8363 | int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx) |
8364 | { |
8365 | const struct btf_param *arg; |
8366 | const struct btf_type *t; |
8367 | const char *name; |
8368 | int btf_id; |
8369 | |
8370 | if (btf_type_vlen(t: func) <= arg_idx) |
8371 | return -EINVAL; |
8372 | |
8373 | arg = &btf_params(t: func)[arg_idx]; |
8374 | t = btf_type_skip_modifiers(btf, id: arg->type, NULL); |
8375 | if (!t || !btf_type_is_ptr(t)) |
8376 | return -EINVAL; |
8377 | t = btf_type_skip_modifiers(btf, id: t->type, res_id: &btf_id); |
8378 | if (!t || !__btf_type_is_struct(t)) |
8379 | return -EINVAL; |
8380 | |
8381 | name = btf_name_by_offset(btf, offset: t->name_off); |
8382 | if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1)) |
8383 | return -EINVAL; |
8384 | |
8385 | return btf_id; |
8386 | } |
8387 | |
8388 | static int btf_check_iter_kfuncs(struct btf *btf, const char *func_name, |
8389 | const struct btf_type *func, u32 func_flags) |
8390 | { |
8391 | u32 flags = func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY); |
8392 | const char *sfx, *iter_name; |
8393 | const struct btf_type *t; |
8394 | char exp_name[128]; |
8395 | u32 nr_args; |
8396 | int btf_id; |
8397 | |
8398 | /* exactly one of KF_ITER_{NEW,NEXT,DESTROY} can be set */ |
8399 | if (!flags || (flags & (flags - 1))) |
8400 | return -EINVAL; |
8401 | |
8402 | /* any BPF iter kfunc should have `struct bpf_iter_<type> *` first arg */ |
8403 | nr_args = btf_type_vlen(t: func); |
8404 | if (nr_args < 1) |
8405 | return -EINVAL; |
8406 | |
8407 | btf_id = btf_check_iter_arg(btf, func, arg_idx: 0); |
8408 | if (btf_id < 0) |
8409 | return btf_id; |
8410 | |
8411 | /* sizeof(struct bpf_iter_<type>) should be a multiple of 8 to |
8412 | * fit nicely in stack slots |
8413 | */ |
8414 | t = btf_type_by_id(btf, btf_id); |
8415 | if (t->size == 0 || (t->size % 8)) |
8416 | return -EINVAL; |
8417 | |
8418 | /* validate bpf_iter_<type>_{new,next,destroy}(struct bpf_iter_<type> *) |
8419 | * naming pattern |
8420 | */ |
8421 | iter_name = btf_name_by_offset(btf, offset: t->name_off) + sizeof(ITER_PREFIX) - 1; |
8422 | if (flags & KF_ITER_NEW) |
8423 | sfx = "new"; |
8424 | else if (flags & KF_ITER_NEXT) |
8425 | sfx = "next"; |
8426 | else /* (flags & KF_ITER_DESTROY) */ |
8427 | sfx = "destroy"; |
8428 | |
8429 | snprintf(buf: exp_name, size: sizeof(exp_name), fmt: "bpf_iter_%s_%s", iter_name, sfx); |
8430 | if (strcmp(func_name, exp_name)) |
8431 | return -EINVAL; |
8432 | |
8433 | /* only iter constructor should have extra arguments */ |
8434 | if (!(flags & KF_ITER_NEW) && nr_args != 1) |
8435 | return -EINVAL; |
8436 | |
8437 | if (flags & KF_ITER_NEXT) { |
8438 | /* bpf_iter_<type>_next() should return pointer */ |
8439 | t = btf_type_skip_modifiers(btf, id: func->type, NULL); |
8440 | if (!t || !btf_type_is_ptr(t)) |
8441 | return -EINVAL; |
8442 | } |
8443 | |
8444 | if (flags & KF_ITER_DESTROY) { |
8445 | /* bpf_iter_<type>_destroy() should return void */ |
8446 | t = btf_type_by_id(btf, func->type); |
8447 | if (!t || !btf_type_is_void(t)) |
8448 | return -EINVAL; |
8449 | } |
8450 | |
8451 | return 0; |
8452 | } |
8453 | |
8454 | static int btf_check_kfunc_protos(struct btf *btf, u32 func_id, u32 func_flags) |
8455 | { |
8456 | const struct btf_type *func; |
8457 | const char *func_name; |
8458 | int err; |
8459 | |
8460 | /* any kfunc should be FUNC -> FUNC_PROTO */ |
8461 | func = btf_type_by_id(btf, func_id); |
8462 | if (!func || !btf_type_is_func(t: func)) |
8463 | return -EINVAL; |
8464 | |
8465 | /* sanity check kfunc name */ |
8466 | func_name = btf_name_by_offset(btf, offset: func->name_off); |
8467 | if (!func_name || !func_name[0]) |
8468 | return -EINVAL; |
8469 | |
8470 | func = btf_type_by_id(btf, func->type); |
8471 | if (!func || !btf_type_is_func_proto(t: func)) |
8472 | return -EINVAL; |
8473 | |
8474 | if (func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY)) { |
8475 | err = btf_check_iter_kfuncs(btf, func_name, func, func_flags); |
8476 | if (err) |
8477 | return err; |
8478 | } |
8479 | |
8480 | return 0; |
8481 | } |
8482 | |
8483 | /* Kernel Function (kfunc) BTF ID set registration API */ |
8484 | |
8485 | static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, |
8486 | const struct btf_kfunc_id_set *kset) |
8487 | { |
8488 | struct btf_kfunc_hook_filter *hook_filter; |
8489 | struct btf_id_set8 *add_set = kset->set; |
8490 | bool vmlinux_set = !btf_is_module(btf); |
8491 | bool add_filter = !!kset->filter; |
8492 | struct btf_kfunc_set_tab *tab; |
8493 | struct btf_id_set8 *set; |
8494 | u32 set_cnt, i; |
8495 | int ret; |
8496 | |
8497 | if (hook >= BTF_KFUNC_HOOK_MAX) { |
8498 | ret = -EINVAL; |
8499 | goto end; |
8500 | } |
8501 | |
8502 | if (!add_set->cnt) |
8503 | return 0; |
8504 | |
8505 | tab = btf->kfunc_set_tab; |
8506 | |
8507 | if (tab && add_filter) { |
8508 | u32 i; |
8509 | |
8510 | hook_filter = &tab->hook_filters[hook]; |
8511 | for (i = 0; i < hook_filter->nr_filters; i++) { |
8512 | if (hook_filter->filters[i] == kset->filter) { |
8513 | add_filter = false; |
8514 | break; |
8515 | } |
8516 | } |
8517 | |
8518 | if (add_filter && hook_filter->nr_filters == BTF_KFUNC_FILTER_MAX_CNT) { |
8519 | ret = -E2BIG; |
8520 | goto end; |
8521 | } |
8522 | } |
8523 | |
8524 | if (!tab) { |
8525 | tab = kzalloc(sizeof(*tab), GFP_KERNEL | __GFP_NOWARN); |
8526 | if (!tab) |
8527 | return -ENOMEM; |
8528 | btf->kfunc_set_tab = tab; |
8529 | } |
8530 | |
8531 | set = tab->sets[hook]; |
8532 | /* Warn when register_btf_kfunc_id_set is called twice for the same hook |
8533 | * for module sets. |
8534 | */ |
8535 | if (WARN_ON_ONCE(set && !vmlinux_set)) { |
8536 | ret = -EINVAL; |
8537 | goto end; |
8538 | } |
8539 | |
8540 | /* In case of vmlinux sets, there may be more than one set being |
8541 | * registered per hook. To create a unified set, we allocate a new set |
8542 | * and concatenate all individual sets being registered. While each set |
8543 | * is individually sorted, they may become unsorted when concatenated, |
8544 | * hence re-sorting the final set again is required to make binary |
8545 | * searching the set using btf_id_set8_contains function work. |
8546 | * |
8547 | * For module sets, we need to allocate as we may need to relocate |
8548 | * BTF ids. |
8549 | */ |
8550 | set_cnt = set ? set->cnt : 0; |
8551 | |
8552 | if (set_cnt > U32_MAX - add_set->cnt) { |
8553 | ret = -EOVERFLOW; |
8554 | goto end; |
8555 | } |
8556 | |
8557 | if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) { |
8558 | ret = -E2BIG; |
8559 | goto end; |
8560 | } |
8561 | |
8562 | /* Grow set */ |
8563 | set = krealloc(tab->sets[hook], |
8564 | struct_size(set, pairs, set_cnt + add_set->cnt), |
8565 | GFP_KERNEL | __GFP_NOWARN); |
8566 | if (!set) { |
8567 | ret = -ENOMEM; |
8568 | goto end; |
8569 | } |
8570 | |
8571 | /* For newly allocated set, initialize set->cnt to 0 */ |
8572 | if (!tab->sets[hook]) |
8573 | set->cnt = 0; |
8574 | tab->sets[hook] = set; |
8575 | |
8576 | /* Concatenate the two sets */ |
8577 | memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0])); |
8578 | /* Now that the set is copied, update with relocated BTF ids */ |
8579 | for (i = set->cnt; i < set->cnt + add_set->cnt; i++) |
8580 | set->pairs[i].id = btf_relocate_id(btf, id: set->pairs[i].id); |
8581 | |
8582 | set->cnt += add_set->cnt; |
8583 | |
8584 | sort(base: set->pairs, num: set->cnt, size: sizeof(set->pairs[0]), cmp_func: btf_id_cmp_func, NULL); |
8585 | |
8586 | if (add_filter) { |
8587 | hook_filter = &tab->hook_filters[hook]; |
8588 | hook_filter->filters[hook_filter->nr_filters++] = kset->filter; |
8589 | } |
8590 | return 0; |
8591 | end: |
8592 | btf_free_kfunc_set_tab(btf); |
8593 | return ret; |
8594 | } |
8595 | |
8596 | static u32 *__btf_kfunc_id_set_contains(const struct btf *btf, |
8597 | enum btf_kfunc_hook hook, |
8598 | u32 kfunc_btf_id, |
8599 | const struct bpf_prog *prog) |
8600 | { |
8601 | struct btf_kfunc_hook_filter *hook_filter; |
8602 | struct btf_id_set8 *set; |
8603 | u32 *id, i; |
8604 | |
8605 | if (hook >= BTF_KFUNC_HOOK_MAX) |
8606 | return NULL; |
8607 | if (!btf->kfunc_set_tab) |
8608 | return NULL; |
8609 | hook_filter = &btf->kfunc_set_tab->hook_filters[hook]; |
8610 | for (i = 0; i < hook_filter->nr_filters; i++) { |
8611 | if (hook_filter->filters[i](prog, kfunc_btf_id)) |
8612 | return NULL; |
8613 | } |
8614 | set = btf->kfunc_set_tab->sets[hook]; |
8615 | if (!set) |
8616 | return NULL; |
8617 | id = btf_id_set8_contains(set, id: kfunc_btf_id); |
8618 | if (!id) |
8619 | return NULL; |
8620 | /* The flags for BTF ID are located next to it */ |
8621 | return id + 1; |
8622 | } |
8623 | |
8624 | static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type) |
8625 | { |
8626 | switch (prog_type) { |
8627 | case BPF_PROG_TYPE_UNSPEC: |
8628 | return BTF_KFUNC_HOOK_COMMON; |
8629 | case BPF_PROG_TYPE_XDP: |
8630 | return BTF_KFUNC_HOOK_XDP; |
8631 | case BPF_PROG_TYPE_SCHED_CLS: |
8632 | return BTF_KFUNC_HOOK_TC; |
8633 | case BPF_PROG_TYPE_STRUCT_OPS: |
8634 | return BTF_KFUNC_HOOK_STRUCT_OPS; |
8635 | case BPF_PROG_TYPE_TRACING: |
8636 | case BPF_PROG_TYPE_TRACEPOINT: |
8637 | case BPF_PROG_TYPE_PERF_EVENT: |
8638 | case BPF_PROG_TYPE_LSM: |
8639 | return BTF_KFUNC_HOOK_TRACING; |
8640 | case BPF_PROG_TYPE_SYSCALL: |
8641 | return BTF_KFUNC_HOOK_SYSCALL; |
8642 | case BPF_PROG_TYPE_CGROUP_SKB: |
8643 | case BPF_PROG_TYPE_CGROUP_SOCK: |
8644 | case BPF_PROG_TYPE_CGROUP_DEVICE: |
8645 | case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: |
8646 | case BPF_PROG_TYPE_CGROUP_SOCKOPT: |
8647 | case BPF_PROG_TYPE_CGROUP_SYSCTL: |
8648 | case BPF_PROG_TYPE_SOCK_OPS: |
8649 | return BTF_KFUNC_HOOK_CGROUP; |
8650 | case BPF_PROG_TYPE_SCHED_ACT: |
8651 | return BTF_KFUNC_HOOK_SCHED_ACT; |
8652 | case BPF_PROG_TYPE_SK_SKB: |
8653 | return BTF_KFUNC_HOOK_SK_SKB; |
8654 | case BPF_PROG_TYPE_SOCKET_FILTER: |
8655 | return BTF_KFUNC_HOOK_SOCKET_FILTER; |
8656 | case BPF_PROG_TYPE_LWT_OUT: |
8657 | case BPF_PROG_TYPE_LWT_IN: |
8658 | case BPF_PROG_TYPE_LWT_XMIT: |
8659 | case BPF_PROG_TYPE_LWT_SEG6LOCAL: |
8660 | return BTF_KFUNC_HOOK_LWT; |
8661 | case BPF_PROG_TYPE_NETFILTER: |
8662 | return BTF_KFUNC_HOOK_NETFILTER; |
8663 | case BPF_PROG_TYPE_KPROBE: |
8664 | return BTF_KFUNC_HOOK_KPROBE; |
8665 | default: |
8666 | return BTF_KFUNC_HOOK_MAX; |
8667 | } |
8668 | } |
8669 | |
8670 | /* Caution: |
8671 | * Reference to the module (obtained using btf_try_get_module) corresponding to |
8672 | * the struct btf *MUST* be held when calling this function from verifier |
8673 | * context. This is usually true as we stash references in prog's kfunc_btf_tab; |
8674 | * keeping the reference for the duration of the call provides the necessary |
8675 | * protection for looking up a well-formed btf->kfunc_set_tab. |
8676 | */ |
8677 | u32 *btf_kfunc_id_set_contains(const struct btf *btf, |
8678 | u32 kfunc_btf_id, |
8679 | const struct bpf_prog *prog) |
8680 | { |
8681 | enum bpf_prog_type prog_type = resolve_prog_type(prog); |
8682 | enum btf_kfunc_hook hook; |
8683 | u32 *kfunc_flags; |
8684 | |
8685 | kfunc_flags = __btf_kfunc_id_set_contains(btf, hook: BTF_KFUNC_HOOK_COMMON, kfunc_btf_id, prog); |
8686 | if (kfunc_flags) |
8687 | return kfunc_flags; |
8688 | |
8689 | hook = bpf_prog_type_to_kfunc_hook(prog_type); |
8690 | return __btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id, prog); |
8691 | } |
8692 | |
8693 | u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id, |
8694 | const struct bpf_prog *prog) |
8695 | { |
8696 | return __btf_kfunc_id_set_contains(btf, hook: BTF_KFUNC_HOOK_FMODRET, kfunc_btf_id, prog); |
8697 | } |
8698 | |
8699 | static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook, |
8700 | const struct btf_kfunc_id_set *kset) |
8701 | { |
8702 | struct btf *btf; |
8703 | int ret, i; |
8704 | |
8705 | btf = btf_get_module_btf(module: kset->owner); |
8706 | if (!btf) |
8707 | return check_btf_kconfigs(module: kset->owner, feature: "kfunc"); |
8708 | if (IS_ERR(ptr: btf)) |
8709 | return PTR_ERR(ptr: btf); |
8710 | |
8711 | for (i = 0; i < kset->set->cnt; i++) { |
8712 | ret = btf_check_kfunc_protos(btf, func_id: btf_relocate_id(btf, id: kset->set->pairs[i].id), |
8713 | func_flags: kset->set->pairs[i].flags); |
8714 | if (ret) |
8715 | goto err_out; |
8716 | } |
8717 | |
8718 | ret = btf_populate_kfunc_set(btf, hook, kset); |
8719 | |
8720 | err_out: |
8721 | btf_put(btf); |
8722 | return ret; |
8723 | } |
8724 | |
8725 | /* This function must be invoked only from initcalls/module init functions */ |
8726 | int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, |
8727 | const struct btf_kfunc_id_set *kset) |
8728 | { |
8729 | enum btf_kfunc_hook hook; |
8730 | |
8731 | /* All kfuncs need to be tagged as such in BTF. |
8732 | * WARN() for initcall registrations that do not check errors. |
8733 | */ |
8734 | if (!(kset->set->flags & BTF_SET8_KFUNCS)) { |
8735 | WARN_ON(!kset->owner); |
8736 | return -EINVAL; |
8737 | } |
8738 | |
8739 | hook = bpf_prog_type_to_kfunc_hook(prog_type); |
8740 | return __register_btf_kfunc_id_set(hook, kset); |
8741 | } |
8742 | EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set); |
8743 | |
8744 | /* This function must be invoked only from initcalls/module init functions */ |
8745 | int register_btf_fmodret_id_set(const struct btf_kfunc_id_set *kset) |
8746 | { |
8747 | return __register_btf_kfunc_id_set(hook: BTF_KFUNC_HOOK_FMODRET, kset); |
8748 | } |
8749 | EXPORT_SYMBOL_GPL(register_btf_fmodret_id_set); |
8750 | |
8751 | s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id) |
8752 | { |
8753 | struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab; |
8754 | struct btf_id_dtor_kfunc *dtor; |
8755 | |
8756 | if (!tab) |
8757 | return -ENOENT; |
8758 | /* Even though the size of tab->dtors[0] is > sizeof(u32), we only need |
8759 | * to compare the first u32 with btf_id, so we can reuse btf_id_cmp_func. |
8760 | */ |
8761 | BUILD_BUG_ON(offsetof(struct btf_id_dtor_kfunc, btf_id) != 0); |
8762 | dtor = bsearch(key: &btf_id, base: tab->dtors, num: tab->cnt, size: sizeof(tab->dtors[0]), cmp: btf_id_cmp_func); |
8763 | if (!dtor) |
8764 | return -ENOENT; |
8765 | return dtor->kfunc_btf_id; |
8766 | } |
8767 | |
8768 | static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc *dtors, u32 cnt) |
8769 | { |
8770 | const struct btf_type *dtor_func, *dtor_func_proto, *t; |
8771 | const struct btf_param *args; |
8772 | s32 dtor_btf_id; |
8773 | u32 nr_args, i; |
8774 | |
8775 | for (i = 0; i < cnt; i++) { |
8776 | dtor_btf_id = btf_relocate_id(btf, id: dtors[i].kfunc_btf_id); |
8777 | |
8778 | dtor_func = btf_type_by_id(btf, dtor_btf_id); |
8779 | if (!dtor_func || !btf_type_is_func(t: dtor_func)) |
8780 | return -EINVAL; |
8781 | |
8782 | dtor_func_proto = btf_type_by_id(btf, dtor_func->type); |
8783 | if (!dtor_func_proto || !btf_type_is_func_proto(t: dtor_func_proto)) |
8784 | return -EINVAL; |
8785 | |
8786 | /* Make sure the prototype of the destructor kfunc is 'void func(type *)' */ |
8787 | t = btf_type_by_id(btf, dtor_func_proto->type); |
8788 | if (!t || !btf_type_is_void(t)) |
8789 | return -EINVAL; |
8790 | |
8791 | nr_args = btf_type_vlen(t: dtor_func_proto); |
8792 | if (nr_args != 1) |
8793 | return -EINVAL; |
8794 | args = btf_params(t: dtor_func_proto); |
8795 | t = btf_type_by_id(btf, args[0].type); |
8796 | /* Allow any pointer type, as width on targets Linux supports |
8797 | * will be same for all pointer types (i.e. sizeof(void *)) |
8798 | */ |
8799 | if (!t || !btf_type_is_ptr(t)) |
8800 | return -EINVAL; |
8801 | } |
8802 | return 0; |
8803 | } |
8804 | |
8805 | /* This function must be invoked only from initcalls/module init functions */ |
8806 | int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt, |
8807 | struct module *owner) |
8808 | { |
8809 | struct btf_id_dtor_kfunc_tab *tab; |
8810 | struct btf *btf; |
8811 | u32 tab_cnt, i; |
8812 | int ret; |
8813 | |
8814 | btf = btf_get_module_btf(module: owner); |
8815 | if (!btf) |
8816 | return check_btf_kconfigs(module: owner, feature: "dtor kfuncs"); |
8817 | if (IS_ERR(ptr: btf)) |
8818 | return PTR_ERR(ptr: btf); |
8819 | |
8820 | if (add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) { |
8821 | pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT); |
8822 | ret = -E2BIG; |
8823 | goto end; |
8824 | } |
8825 | |
8826 | /* Ensure that the prototype of dtor kfuncs being registered is sane */ |
8827 | ret = btf_check_dtor_kfuncs(btf, dtors, cnt: add_cnt); |
8828 | if (ret < 0) |
8829 | goto end; |
8830 | |
8831 | tab = btf->dtor_kfunc_tab; |
8832 | /* Only one call allowed for modules */ |
8833 | if (WARN_ON_ONCE(tab && btf_is_module(btf))) { |
8834 | ret = -EINVAL; |
8835 | goto end; |
8836 | } |
8837 | |
8838 | tab_cnt = tab ? tab->cnt : 0; |
8839 | if (tab_cnt > U32_MAX - add_cnt) { |
8840 | ret = -EOVERFLOW; |
8841 | goto end; |
8842 | } |
8843 | if (tab_cnt + add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) { |
8844 | pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT); |
8845 | ret = -E2BIG; |
8846 | goto end; |
8847 | } |
8848 | |
8849 | tab = krealloc(btf->dtor_kfunc_tab, |
8850 | struct_size(tab, dtors, tab_cnt + add_cnt), |
8851 | GFP_KERNEL | __GFP_NOWARN); |
8852 | if (!tab) { |
8853 | ret = -ENOMEM; |
8854 | goto end; |
8855 | } |
8856 | |
8857 | if (!btf->dtor_kfunc_tab) |
8858 | tab->cnt = 0; |
8859 | btf->dtor_kfunc_tab = tab; |
8860 | |
8861 | memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0])); |
8862 | |
8863 | /* remap BTF ids based on BTF relocation (if any) */ |
8864 | for (i = tab_cnt; i < tab_cnt + add_cnt; i++) { |
8865 | tab->dtors[i].btf_id = btf_relocate_id(btf, id: tab->dtors[i].btf_id); |
8866 | tab->dtors[i].kfunc_btf_id = btf_relocate_id(btf, id: tab->dtors[i].kfunc_btf_id); |
8867 | } |
8868 | |
8869 | tab->cnt += add_cnt; |
8870 | |
8871 | sort(base: tab->dtors, num: tab->cnt, size: sizeof(tab->dtors[0]), cmp_func: btf_id_cmp_func, NULL); |
8872 | |
8873 | end: |
8874 | if (ret) |
8875 | btf_free_dtor_kfunc_tab(btf); |
8876 | btf_put(btf); |
8877 | return ret; |
8878 | } |
8879 | EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs); |
8880 | |
8881 | #define MAX_TYPES_ARE_COMPAT_DEPTH 2 |
8882 | |
8883 | /* Check local and target types for compatibility. This check is used for |
8884 | * type-based CO-RE relocations and follow slightly different rules than |
8885 | * field-based relocations. This function assumes that root types were already |
8886 | * checked for name match. Beyond that initial root-level name check, names |
8887 | * are completely ignored. Compatibility rules are as follows: |
8888 | * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but |
8889 | * kind should match for local and target types (i.e., STRUCT is not |
8890 | * compatible with UNION); |
8891 | * - for ENUMs/ENUM64s, the size is ignored; |
8892 | * - for INT, size and signedness are ignored; |
8893 | * - for ARRAY, dimensionality is ignored, element types are checked for |
8894 | * compatibility recursively; |
8895 | * - CONST/VOLATILE/RESTRICT modifiers are ignored; |
8896 | * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; |
8897 | * - FUNC_PROTOs are compatible if they have compatible signature: same |
8898 | * number of input args and compatible return and argument types. |
8899 | * These rules are not set in stone and probably will be adjusted as we get |
8900 | * more experience with using BPF CO-RE relocations. |
8901 | */ |
8902 | int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, |
8903 | const struct btf *targ_btf, __u32 targ_id) |
8904 | { |
8905 | return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, |
8906 | MAX_TYPES_ARE_COMPAT_DEPTH); |
8907 | } |
8908 | |
8909 | #define MAX_TYPES_MATCH_DEPTH 2 |
8910 | |
8911 | int bpf_core_types_match(const struct btf *local_btf, u32 local_id, |
8912 | const struct btf *targ_btf, u32 targ_id) |
8913 | { |
8914 | return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, behind_ptr: false, |
8915 | MAX_TYPES_MATCH_DEPTH); |
8916 | } |
8917 | |
8918 | static bool bpf_core_is_flavor_sep(const char *s) |
8919 | { |
8920 | /* check X___Y name pattern, where X and Y are not underscores */ |
8921 | return s[0] != '_' && /* X */ |
8922 | s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */ |
8923 | s[4] != '_'; /* Y */ |
8924 | } |
8925 | |
8926 | size_t bpf_core_essential_name_len(const char *name) |
8927 | { |
8928 | size_t n = strlen(name); |
8929 | int i; |
8930 | |
8931 | for (i = n - 5; i >= 0; i--) { |
8932 | if (bpf_core_is_flavor_sep(s: name + i)) |
8933 | return i + 1; |
8934 | } |
8935 | return n; |
8936 | } |
8937 | |
8938 | static void bpf_free_cands(struct bpf_cand_cache *cands) |
8939 | { |
8940 | if (!cands->cnt) |
8941 | /* empty candidate array was allocated on stack */ |
8942 | return; |
8943 | kfree(objp: cands); |
8944 | } |
8945 | |
8946 | static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands) |
8947 | { |
8948 | kfree(objp: cands->name); |
8949 | kfree(objp: cands); |
8950 | } |
8951 | |
8952 | #define VMLINUX_CAND_CACHE_SIZE 31 |
8953 | static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE]; |
8954 | |
8955 | #define MODULE_CAND_CACHE_SIZE 31 |
8956 | static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE]; |
8957 | |
8958 | static void __print_cand_cache(struct bpf_verifier_log *log, |
8959 | struct bpf_cand_cache **cache, |
8960 | int cache_size) |
8961 | { |
8962 | struct bpf_cand_cache *cc; |
8963 | int i, j; |
8964 | |
8965 | for (i = 0; i < cache_size; i++) { |
8966 | cc = cache[i]; |
8967 | if (!cc) |
8968 | continue; |
8969 | bpf_log(log, fmt: "[%d]%s(", i, cc->name); |
8970 | for (j = 0; j < cc->cnt; j++) { |
8971 | bpf_log(log, fmt: "%d", cc->cands[j].id); |
8972 | if (j < cc->cnt - 1) |
8973 | bpf_log(log, fmt: " "); |
8974 | } |
8975 | bpf_log(log, fmt: "), "); |
8976 | } |
8977 | } |
8978 | |
8979 | static void print_cand_cache(struct bpf_verifier_log *log) |
8980 | { |
8981 | mutex_lock(&cand_cache_mutex); |
8982 | bpf_log(log, fmt: "vmlinux_cand_cache:"); |
8983 | __print_cand_cache(log, cache: vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); |
8984 | bpf_log(log, fmt: "\nmodule_cand_cache:"); |
8985 | __print_cand_cache(log, cache: module_cand_cache, MODULE_CAND_CACHE_SIZE); |
8986 | bpf_log(log, fmt: "\n"); |
8987 | mutex_unlock(lock: &cand_cache_mutex); |
8988 | } |
8989 | |
8990 | static u32 hash_cands(struct bpf_cand_cache *cands) |
8991 | { |
8992 | return jhash(key: cands->name, length: cands->name_len, initval: 0); |
8993 | } |
8994 | |
8995 | static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands, |
8996 | struct bpf_cand_cache **cache, |
8997 | int cache_size) |
8998 | { |
8999 | struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size]; |
9000 | |
9001 | if (cc && cc->name_len == cands->name_len && |
9002 | !strncmp(cc->name, cands->name, cands->name_len)) |
9003 | return cc; |
9004 | return NULL; |
9005 | } |
9006 | |
9007 | static size_t sizeof_cands(int cnt) |
9008 | { |
9009 | return offsetof(struct bpf_cand_cache, cands[cnt]); |
9010 | } |
9011 | |
9012 | static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands, |
9013 | struct bpf_cand_cache **cache, |
9014 | int cache_size) |
9015 | { |
9016 | struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands; |
9017 | |
9018 | if (*cc) { |
9019 | bpf_free_cands_from_cache(cands: *cc); |
9020 | *cc = NULL; |
9021 | } |
9022 | new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL); |
9023 | if (!new_cands) { |
9024 | bpf_free_cands(cands); |
9025 | return ERR_PTR(error: -ENOMEM); |
9026 | } |
9027 | /* strdup the name, since it will stay in cache. |
9028 | * the cands->name points to strings in prog's BTF and the prog can be unloaded. |
9029 | */ |
9030 | new_cands->name = kmemdup_nul(s: cands->name, len: cands->name_len, GFP_KERNEL); |
9031 | bpf_free_cands(cands); |
9032 | if (!new_cands->name) { |
9033 | kfree(objp: new_cands); |
9034 | return ERR_PTR(error: -ENOMEM); |
9035 | } |
9036 | *cc = new_cands; |
9037 | return new_cands; |
9038 | } |
9039 | |
9040 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
9041 | static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache, |
9042 | int cache_size) |
9043 | { |
9044 | struct bpf_cand_cache *cc; |
9045 | int i, j; |
9046 | |
9047 | for (i = 0; i < cache_size; i++) { |
9048 | cc = cache[i]; |
9049 | if (!cc) |
9050 | continue; |
9051 | if (!btf) { |
9052 | /* when new module is loaded purge all of module_cand_cache, |
9053 | * since new module might have candidates with the name |
9054 | * that matches cached cands. |
9055 | */ |
9056 | bpf_free_cands_from_cache(cc); |
9057 | cache[i] = NULL; |
9058 | continue; |
9059 | } |
9060 | /* when module is unloaded purge cache entries |
9061 | * that match module's btf |
9062 | */ |
9063 | for (j = 0; j < cc->cnt; j++) |
9064 | if (cc->cands[j].btf == btf) { |
9065 | bpf_free_cands_from_cache(cc); |
9066 | cache[i] = NULL; |
9067 | break; |
9068 | } |
9069 | } |
9070 | |
9071 | } |
9072 | |
9073 | static void purge_cand_cache(struct btf *btf) |
9074 | { |
9075 | mutex_lock(&cand_cache_mutex); |
9076 | __purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE); |
9077 | mutex_unlock(&cand_cache_mutex); |
9078 | } |
9079 | #endif |
9080 | |
9081 | static struct bpf_cand_cache * |
9082 | bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf, |
9083 | int targ_start_id) |
9084 | { |
9085 | struct bpf_cand_cache *new_cands; |
9086 | const struct btf_type *t; |
9087 | const char *targ_name; |
9088 | size_t targ_essent_len; |
9089 | int n, i; |
9090 | |
9091 | n = btf_nr_types(btf: targ_btf); |
9092 | for (i = targ_start_id; i < n; i++) { |
9093 | t = btf_type_by_id(targ_btf, i); |
9094 | if (btf_kind(t) != cands->kind) |
9095 | continue; |
9096 | |
9097 | targ_name = btf_name_by_offset(btf: targ_btf, offset: t->name_off); |
9098 | if (!targ_name) |
9099 | continue; |
9100 | |
9101 | /* the resched point is before strncmp to make sure that search |
9102 | * for non-existing name will have a chance to schedule(). |
9103 | */ |
9104 | cond_resched(); |
9105 | |
9106 | if (strncmp(cands->name, targ_name, cands->name_len) != 0) |
9107 | continue; |
9108 | |
9109 | targ_essent_len = bpf_core_essential_name_len(name: targ_name); |
9110 | if (targ_essent_len != cands->name_len) |
9111 | continue; |
9112 | |
9113 | /* most of the time there is only one candidate for a given kind+name pair */ |
9114 | new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL); |
9115 | if (!new_cands) { |
9116 | bpf_free_cands(cands); |
9117 | return ERR_PTR(error: -ENOMEM); |
9118 | } |
9119 | |
9120 | memcpy(new_cands, cands, sizeof_cands(cands->cnt)); |
9121 | bpf_free_cands(cands); |
9122 | cands = new_cands; |
9123 | cands->cands[cands->cnt].btf = targ_btf; |
9124 | cands->cands[cands->cnt].id = i; |
9125 | cands->cnt++; |
9126 | } |
9127 | return cands; |
9128 | } |
9129 | |
9130 | static struct bpf_cand_cache * |
9131 | bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id) |
9132 | { |
9133 | struct bpf_cand_cache *cands, *cc, local_cand = {}; |
9134 | const struct btf *local_btf = ctx->btf; |
9135 | const struct btf_type *local_type; |
9136 | const struct btf *main_btf; |
9137 | size_t local_essent_len; |
9138 | struct btf *mod_btf; |
9139 | const char *name; |
9140 | int id; |
9141 | |
9142 | main_btf = bpf_get_btf_vmlinux(); |
9143 | if (IS_ERR(ptr: main_btf)) |
9144 | return ERR_CAST(ptr: main_btf); |
9145 | if (!main_btf) |
9146 | return ERR_PTR(error: -EINVAL); |
9147 | |
9148 | local_type = btf_type_by_id(local_btf, local_type_id); |
9149 | if (!local_type) |
9150 | return ERR_PTR(error: -EINVAL); |
9151 | |
9152 | name = btf_name_by_offset(btf: local_btf, offset: local_type->name_off); |
9153 | if (str_is_empty(s: name)) |
9154 | return ERR_PTR(error: -EINVAL); |
9155 | local_essent_len = bpf_core_essential_name_len(name); |
9156 | |
9157 | cands = &local_cand; |
9158 | cands->name = name; |
9159 | cands->kind = btf_kind(t: local_type); |
9160 | cands->name_len = local_essent_len; |
9161 | |
9162 | cc = check_cand_cache(cands, cache: vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); |
9163 | /* cands is a pointer to stack here */ |
9164 | if (cc) { |
9165 | if (cc->cnt) |
9166 | return cc; |
9167 | goto check_modules; |
9168 | } |
9169 | |
9170 | /* Attempt to find target candidates in vmlinux BTF first */ |
9171 | cands = bpf_core_add_cands(cands, targ_btf: main_btf, targ_start_id: 1); |
9172 | if (IS_ERR(ptr: cands)) |
9173 | return ERR_CAST(ptr: cands); |
9174 | |
9175 | /* cands is a pointer to kmalloced memory here if cands->cnt > 0 */ |
9176 | |
9177 | /* populate cache even when cands->cnt == 0 */ |
9178 | cc = populate_cand_cache(cands, cache: vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); |
9179 | if (IS_ERR(ptr: cc)) |
9180 | return ERR_CAST(ptr: cc); |
9181 | |
9182 | /* if vmlinux BTF has any candidate, don't go for module BTFs */ |
9183 | if (cc->cnt) |
9184 | return cc; |
9185 | |
9186 | check_modules: |
9187 | /* cands is a pointer to stack here and cands->cnt == 0 */ |
9188 | cc = check_cand_cache(cands, cache: module_cand_cache, MODULE_CAND_CACHE_SIZE); |
9189 | if (cc) |
9190 | /* if cache has it return it even if cc->cnt == 0 */ |
9191 | return cc; |
9192 | |
9193 | /* If candidate is not found in vmlinux's BTF then search in module's BTFs */ |
9194 | spin_lock_bh(lock: &btf_idr_lock); |
9195 | idr_for_each_entry(&btf_idr, mod_btf, id) { |
9196 | if (!btf_is_module(btf: mod_btf)) |
9197 | continue; |
9198 | /* linear search could be slow hence unlock/lock |
9199 | * the IDR to avoiding holding it for too long |
9200 | */ |
9201 | btf_get(btf: mod_btf); |
9202 | spin_unlock_bh(lock: &btf_idr_lock); |
9203 | cands = bpf_core_add_cands(cands, targ_btf: mod_btf, targ_start_id: btf_nr_types(btf: main_btf)); |
9204 | btf_put(btf: mod_btf); |
9205 | if (IS_ERR(ptr: cands)) |
9206 | return ERR_CAST(ptr: cands); |
9207 | spin_lock_bh(lock: &btf_idr_lock); |
9208 | } |
9209 | spin_unlock_bh(lock: &btf_idr_lock); |
9210 | /* cands is a pointer to kmalloced memory here if cands->cnt > 0 |
9211 | * or pointer to stack if cands->cnd == 0. |
9212 | * Copy it into the cache even when cands->cnt == 0 and |
9213 | * return the result. |
9214 | */ |
9215 | return populate_cand_cache(cands, cache: module_cand_cache, MODULE_CAND_CACHE_SIZE); |
9216 | } |
9217 | |
9218 | int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, |
9219 | int relo_idx, void *insn) |
9220 | { |
9221 | bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL; |
9222 | struct bpf_core_cand_list cands = {}; |
9223 | struct bpf_core_relo_res targ_res; |
9224 | struct bpf_core_spec *specs; |
9225 | const struct btf_type *type; |
9226 | int err; |
9227 | |
9228 | /* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5" |
9229 | * into arrays of btf_ids of struct fields and array indices. |
9230 | */ |
9231 | specs = kcalloc(3, sizeof(*specs), GFP_KERNEL); |
9232 | if (!specs) |
9233 | return -ENOMEM; |
9234 | |
9235 | type = btf_type_by_id(ctx->btf, relo->type_id); |
9236 | if (!type) { |
9237 | bpf_log(log: ctx->log, fmt: "relo #%u: bad type id %u\n", |
9238 | relo_idx, relo->type_id); |
9239 | kfree(objp: specs); |
9240 | return -EINVAL; |
9241 | } |
9242 | |
9243 | if (need_cands) { |
9244 | struct bpf_cand_cache *cc; |
9245 | int i; |
9246 | |
9247 | mutex_lock(&cand_cache_mutex); |
9248 | cc = bpf_core_find_cands(ctx, local_type_id: relo->type_id); |
9249 | if (IS_ERR(ptr: cc)) { |
9250 | bpf_log(log: ctx->log, fmt: "target candidate search failed for %d\n", |
9251 | relo->type_id); |
9252 | err = PTR_ERR(ptr: cc); |
9253 | goto out; |
9254 | } |
9255 | if (cc->cnt) { |
9256 | cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL); |
9257 | if (!cands.cands) { |
9258 | err = -ENOMEM; |
9259 | goto out; |
9260 | } |
9261 | } |
9262 | for (i = 0; i < cc->cnt; i++) { |
9263 | bpf_log(log: ctx->log, |
9264 | fmt: "CO-RE relocating %s %s: found target candidate [%d]\n", |
9265 | btf_kind_str[cc->kind], cc->name, cc->cands[i].id); |
9266 | cands.cands[i].btf = cc->cands[i].btf; |
9267 | cands.cands[i].id = cc->cands[i].id; |
9268 | } |
9269 | cands.len = cc->cnt; |
9270 | /* cand_cache_mutex needs to span the cache lookup and |
9271 | * copy of btf pointer into bpf_core_cand_list, |
9272 | * since module can be unloaded while bpf_core_calc_relo_insn |
9273 | * is working with module's btf. |
9274 | */ |
9275 | } |
9276 | |
9277 | err = bpf_core_calc_relo_insn(prog_name: (void *)ctx->log, relo, relo_idx, local_btf: ctx->btf, cands: &cands, specs_scratch: specs, |
9278 | targ_res: &targ_res); |
9279 | if (err) |
9280 | goto out; |
9281 | |
9282 | err = bpf_core_patch_insn(prog_name: (void *)ctx->log, insn, insn_idx: relo->insn_off / 8, relo, relo_idx, |
9283 | res: &targ_res); |
9284 | |
9285 | out: |
9286 | kfree(objp: specs); |
9287 | if (need_cands) { |
9288 | kfree(objp: cands.cands); |
9289 | mutex_unlock(lock: &cand_cache_mutex); |
9290 | if (ctx->log->level & BPF_LOG_LEVEL2) |
9291 | print_cand_cache(log: ctx->log); |
9292 | } |
9293 | return err; |
9294 | } |
9295 | |
9296 | bool btf_nested_type_is_trusted(struct bpf_verifier_log *log, |
9297 | const struct bpf_reg_state *reg, |
9298 | const char *field_name, u32 btf_id, const char *suffix) |
9299 | { |
9300 | struct btf *btf = reg->btf; |
9301 | const struct btf_type *walk_type, *safe_type; |
9302 | const char *tname; |
9303 | char safe_tname[64]; |
9304 | long ret, safe_id; |
9305 | const struct btf_member *member; |
9306 | u32 i; |
9307 | |
9308 | walk_type = btf_type_by_id(btf, reg->btf_id); |
9309 | if (!walk_type) |
9310 | return false; |
9311 | |
9312 | tname = btf_name_by_offset(btf, offset: walk_type->name_off); |
9313 | |
9314 | ret = snprintf(buf: safe_tname, size: sizeof(safe_tname), fmt: "%s%s", tname, suffix); |
9315 | if (ret >= sizeof(safe_tname)) |
9316 | return false; |
9317 | |
9318 | safe_id = btf_find_by_name_kind(btf, name: safe_tname, BTF_INFO_KIND(walk_type->info)); |
9319 | if (safe_id < 0) |
9320 | return false; |
9321 | |
9322 | safe_type = btf_type_by_id(btf, safe_id); |
9323 | if (!safe_type) |
9324 | return false; |
9325 | |
9326 | for_each_member(i, safe_type, member) { |
9327 | const char *m_name = __btf_name_by_offset(btf, offset: member->name_off); |
9328 | const struct btf_type *mtype = btf_type_by_id(btf, member->type); |
9329 | u32 id; |
9330 | |
9331 | if (!btf_type_is_ptr(t: mtype)) |
9332 | continue; |
9333 | |
9334 | btf_type_skip_modifiers(btf, id: mtype->type, res_id: &id); |
9335 | /* If we match on both type and name, the field is considered trusted. */ |
9336 | if (btf_id == id && !strcmp(field_name, m_name)) |
9337 | return true; |
9338 | } |
9339 | |
9340 | return false; |
9341 | } |
9342 | |
9343 | bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log, |
9344 | const struct btf *reg_btf, u32 reg_id, |
9345 | const struct btf *arg_btf, u32 arg_id) |
9346 | { |
9347 | const char *reg_name, *arg_name, *search_needle; |
9348 | const struct btf_type *reg_type, *arg_type; |
9349 | int reg_len, arg_len, cmp_len; |
9350 | size_t pattern_len = sizeof(NOCAST_ALIAS_SUFFIX) - sizeof(char); |
9351 | |
9352 | reg_type = btf_type_by_id(reg_btf, reg_id); |
9353 | if (!reg_type) |
9354 | return false; |
9355 | |
9356 | arg_type = btf_type_by_id(arg_btf, arg_id); |
9357 | if (!arg_type) |
9358 | return false; |
9359 | |
9360 | reg_name = btf_name_by_offset(btf: reg_btf, offset: reg_type->name_off); |
9361 | arg_name = btf_name_by_offset(btf: arg_btf, offset: arg_type->name_off); |
9362 | |
9363 | reg_len = strlen(reg_name); |
9364 | arg_len = strlen(arg_name); |
9365 | |
9366 | /* Exactly one of the two type names may be suffixed with ___init, so |
9367 | * if the strings are the same size, they can't possibly be no-cast |
9368 | * aliases of one another. If you have two of the same type names, e.g. |
9369 | * they're both nf_conn___init, it would be improper to return true |
9370 | * because they are _not_ no-cast aliases, they are the same type. |
9371 | */ |
9372 | if (reg_len == arg_len) |
9373 | return false; |
9374 | |
9375 | /* Either of the two names must be the other name, suffixed with ___init. */ |
9376 | if ((reg_len != arg_len + pattern_len) && |
9377 | (arg_len != reg_len + pattern_len)) |
9378 | return false; |
9379 | |
9380 | if (reg_len < arg_len) { |
9381 | search_needle = strstr(arg_name, NOCAST_ALIAS_SUFFIX); |
9382 | cmp_len = reg_len; |
9383 | } else { |
9384 | search_needle = strstr(reg_name, NOCAST_ALIAS_SUFFIX); |
9385 | cmp_len = arg_len; |
9386 | } |
9387 | |
9388 | if (!search_needle) |
9389 | return false; |
9390 | |
9391 | /* ___init suffix must come at the end of the name */ |
9392 | if (*(search_needle + pattern_len) != '\0') |
9393 | return false; |
9394 | |
9395 | return !strncmp(reg_name, arg_name, cmp_len); |
9396 | } |
9397 | |
9398 | #ifdef CONFIG_BPF_JIT |
9399 | static int |
9400 | btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops, |
9401 | struct bpf_verifier_log *log) |
9402 | { |
9403 | struct btf_struct_ops_tab *tab, *new_tab; |
9404 | int i, err; |
9405 | |
9406 | tab = btf->struct_ops_tab; |
9407 | if (!tab) { |
9408 | tab = kzalloc(struct_size(tab, ops, 4), GFP_KERNEL); |
9409 | if (!tab) |
9410 | return -ENOMEM; |
9411 | tab->capacity = 4; |
9412 | btf->struct_ops_tab = tab; |
9413 | } |
9414 | |
9415 | for (i = 0; i < tab->cnt; i++) |
9416 | if (tab->ops[i].st_ops == st_ops) |
9417 | return -EEXIST; |
9418 | |
9419 | if (tab->cnt == tab->capacity) { |
9420 | new_tab = krealloc(tab, |
9421 | struct_size(tab, ops, tab->capacity * 2), |
9422 | GFP_KERNEL); |
9423 | if (!new_tab) |
9424 | return -ENOMEM; |
9425 | tab = new_tab; |
9426 | tab->capacity *= 2; |
9427 | btf->struct_ops_tab = tab; |
9428 | } |
9429 | |
9430 | tab->ops[btf->struct_ops_tab->cnt].st_ops = st_ops; |
9431 | |
9432 | err = bpf_struct_ops_desc_init(st_ops_desc: &tab->ops[btf->struct_ops_tab->cnt], btf, log); |
9433 | if (err) |
9434 | return err; |
9435 | |
9436 | btf->struct_ops_tab->cnt++; |
9437 | |
9438 | return 0; |
9439 | } |
9440 | |
9441 | const struct bpf_struct_ops_desc * |
9442 | bpf_struct_ops_find_value(struct btf *btf, u32 value_id) |
9443 | { |
9444 | const struct bpf_struct_ops_desc *st_ops_list; |
9445 | unsigned int i; |
9446 | u32 cnt; |
9447 | |
9448 | if (!value_id) |
9449 | return NULL; |
9450 | if (!btf->struct_ops_tab) |
9451 | return NULL; |
9452 | |
9453 | cnt = btf->struct_ops_tab->cnt; |
9454 | st_ops_list = btf->struct_ops_tab->ops; |
9455 | for (i = 0; i < cnt; i++) { |
9456 | if (st_ops_list[i].value_id == value_id) |
9457 | return &st_ops_list[i]; |
9458 | } |
9459 | |
9460 | return NULL; |
9461 | } |
9462 | |
9463 | const struct bpf_struct_ops_desc * |
9464 | bpf_struct_ops_find(struct btf *btf, u32 type_id) |
9465 | { |
9466 | const struct bpf_struct_ops_desc *st_ops_list; |
9467 | unsigned int i; |
9468 | u32 cnt; |
9469 | |
9470 | if (!type_id) |
9471 | return NULL; |
9472 | if (!btf->struct_ops_tab) |
9473 | return NULL; |
9474 | |
9475 | cnt = btf->struct_ops_tab->cnt; |
9476 | st_ops_list = btf->struct_ops_tab->ops; |
9477 | for (i = 0; i < cnt; i++) { |
9478 | if (st_ops_list[i].type_id == type_id) |
9479 | return &st_ops_list[i]; |
9480 | } |
9481 | |
9482 | return NULL; |
9483 | } |
9484 | |
9485 | int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops) |
9486 | { |
9487 | struct bpf_verifier_log *log; |
9488 | struct btf *btf; |
9489 | int err = 0; |
9490 | |
9491 | btf = btf_get_module_btf(module: st_ops->owner); |
9492 | if (!btf) |
9493 | return check_btf_kconfigs(module: st_ops->owner, feature: "struct_ops"); |
9494 | if (IS_ERR(ptr: btf)) |
9495 | return PTR_ERR(ptr: btf); |
9496 | |
9497 | log = kzalloc(sizeof(*log), GFP_KERNEL | __GFP_NOWARN); |
9498 | if (!log) { |
9499 | err = -ENOMEM; |
9500 | goto errout; |
9501 | } |
9502 | |
9503 | log->level = BPF_LOG_KERNEL; |
9504 | |
9505 | err = btf_add_struct_ops(btf, st_ops, log); |
9506 | |
9507 | errout: |
9508 | kfree(objp: log); |
9509 | btf_put(btf); |
9510 | |
9511 | return err; |
9512 | } |
9513 | EXPORT_SYMBOL_GPL(__register_bpf_struct_ops); |
9514 | #endif |
9515 | |
9516 | bool btf_param_match_suffix(const struct btf *btf, |
9517 | const struct btf_param *arg, |
9518 | const char *suffix) |
9519 | { |
9520 | int suffix_len = strlen(suffix), len; |
9521 | const char *param_name; |
9522 | |
9523 | /* In the future, this can be ported to use BTF tagging */ |
9524 | param_name = btf_name_by_offset(btf, offset: arg->name_off); |
9525 | if (str_is_empty(s: param_name)) |
9526 | return false; |
9527 | len = strlen(param_name); |
9528 | if (len <= suffix_len) |
9529 | return false; |
9530 | param_name += len - suffix_len; |
9531 | return !strncmp(param_name, suffix, suffix_len); |
9532 | } |
9533 |
Definitions
- btf_idr
- btf_idr_lock
- btf_kfunc_hook
- btf_kfunc_hook_filter
- btf_kfunc_set_tab
- btf_id_dtor_kfunc_tab
- btf_struct_ops_tab
- btf
- verifier_phase
- resolve_vertex
- visit_state
- resolve_mode
- btf_sec_info
- btf_verifier_env
- btf_kind_str
- btf_type_str
- btf_show
- btf_kind_operations
- kind_ops
- btf_void
- btf_type_is_modifier
- btf_type_is_void
- btf_type_is_datasec
- btf_type_is_decl_tag
- btf_type_nosize
- btf_type_nosize_or_null
- btf_type_is_decl_tag_target
- btf_is_vmlinux
- btf_nr_types
- btf_find_by_name_kind
- bpf_find_btf_id
- btf_type_skip_modifiers
- btf_type_resolve_ptr
- btf_type_resolve_func_ptr
- btf_type_is_resolve_source_only
- btf_type_needs_resolve
- btf_type_has_size
- btf_int_encoding_str
- btf_type_int
- btf_type_array
- btf_type_enum
- btf_type_var
- btf_type_decl_tag
- btf_type_enum64
- btf_type_ops
- btf_name_offset_valid
- __btf_name_char_ok
- btf_str_by_offset
- btf_name_valid_identifier
- btf_name_valid_section
- __btf_name_by_offset
- btf_name_by_offset
- btf_type_by_id
- btf_type_int_is_regular
- btf_member_is_reg_int
- btf_type_skip_qualifiers
- btf_show_name
- __btf_show_indent
- btf_show_indent
- btf_show_newline
- btf_show_delim
- btf_show
- btf_show_obj_size_left
- btf_show_obj_is_safe
- __btf_show_obj_safe
- btf_show_obj_safe
- btf_show_start_type
- btf_show_end_type
- btf_show_start_aggr_type
- btf_show_end_aggr_type
- btf_show_start_member
- btf_show_start_array_member
- btf_show_end_member
- btf_show_end_array_member
- btf_show_start_array_type
- btf_show_end_array_type
- btf_show_start_struct_type
- btf_show_end_struct_type
- __btf_verifier_log
- btf_verifier_log
- __btf_verifier_log_type
- btf_verifier_log_member
- btf_verifier_log_vsi
- btf_verifier_log_hdr
- btf_add_type
- btf_alloc_id
- btf_free_id
- btf_free_kfunc_set_tab
- btf_free_dtor_kfunc_tab
- btf_struct_metas_free
- btf_free_struct_meta_tab
- btf_free_struct_ops_tab
- btf_free
- btf_free_rcu
- btf_get_name
- btf_get
- btf_put
- btf_base_btf
- btf_header
- btf_set_base_btf
- env_resolve_init
- btf_verifier_env_free
- env_type_is_resolve_sink
- env_type_is_resolved
- env_stack_push
- env_stack_set_next_member
- env_stack_pop_resolved
- env_stack_peak
- __btf_resolve_size
- btf_resolve_size
- btf_resolved_type_id
- btf_type_id_resolve
- btf_resolved_type_size
- btf_type_id_size
- btf_df_check_member
- btf_df_check_kflag_member
- btf_generic_check_kflag_member
- btf_df_resolve
- btf_df_show
- btf_int_check_member
- btf_int_check_kflag_member
- btf_int_check_meta
- btf_int_log
- btf_int128_print
- btf_int128_shift
- btf_bitfield_show
- btf_int_bits_show
- btf_int_show
- int_ops
- btf_modifier_check_member
- btf_modifier_check_kflag_member
- btf_ptr_check_member
- btf_ref_type_check_meta
- btf_modifier_resolve
- btf_var_resolve
- btf_ptr_resolve
- btf_modifier_show
- btf_var_show
- btf_ptr_show
- btf_ref_type_log
- modifier_ops
- ptr_ops
- btf_fwd_check_meta
- btf_fwd_type_log
- fwd_ops
- btf_array_check_member
- btf_array_check_meta
- btf_array_resolve
- btf_array_log
- __btf_array_show
- btf_array_show
- array_ops
- btf_struct_check_member
- btf_struct_check_meta
- btf_struct_resolve
- btf_struct_log
- btf_field_info
- btf_find_struct
- btf_find_kptr
- btf_find_next_decl_tag
- btf_find_decl_tag_value
- btf_find_graph_root
- btf_get_field_type
- btf_repeat_fields
- btf_find_nested_struct
- btf_find_field_one
- btf_find_struct_field
- btf_find_datasec_var
- btf_find_field
- btf_parse_kptr
- btf_parse_graph_root
- btf_parse_list_head
- btf_parse_rb_root
- btf_field_cmp
- btf_parse_fields
- btf_check_and_fixup_fields
- __btf_struct_show
- btf_struct_show
- struct_ops
- btf_enum_check_member
- btf_enum_check_kflag_member
- btf_enum_check_meta
- btf_enum_log
- btf_enum_show
- enum_ops
- btf_enum64_check_meta
- btf_enum64_show
- enum64_ops
- btf_func_proto_check_meta
- btf_func_proto_log
- func_proto_ops
- btf_func_check_meta
- btf_func_resolve
- func_ops
- btf_var_check_meta
- btf_var_log
- var_ops
- btf_datasec_check_meta
- btf_datasec_resolve
- btf_datasec_log
- btf_datasec_show
- datasec_ops
- btf_float_check_meta
- btf_float_check_member
- btf_float_log
- float_ops
- btf_decl_tag_check_meta
- btf_decl_tag_resolve
- btf_decl_tag_log
- decl_tag_ops
- btf_func_proto_check
- btf_func_check
- kind_ops
- btf_check_meta
- btf_check_all_metas
- btf_resolve_valid
- btf_resolve
- btf_check_all_types
- btf_parse_type_sec
- btf_parse_str_sec
- btf_sec_info_offset
- btf_sec_info_cmp
- btf_check_sec_info
- btf_parse_hdr
- alloc_obj_fields
- btf_parse_struct_metas
- btf_find_struct_meta
- btf_check_type_tags
- finalize_log
- btf_parse
- bpf_ctx_convert
- bpf_ctx_convert
- bpf_ctx_convert_map
- find_canonical_prog_ctx_type
- find_kern_ctx_type_id
- btf_is_projection_of
- btf_is_prog_ctx_type
- btf_validate_prog_ctx_type
- btf_translate_to_vmlinux
- get_kern_ctx_btf_id
- bpf_ctx_convert_btf_id
- btf_parse_base
- btf_parse_vmlinux
- btf_relocate_id
- bpf_prog_get_target_btf
- is_void_or_int_ptr
- btf_ctx_arg_idx
- prog_args_trusted
- btf_ctx_arg_offset
- bpf_raw_tp_null_args
- raw_tp_null_args
- btf_ctx_access
- bpf_struct_walk_result
- btf_struct_walk
- btf_struct_access
- btf_types_are_same
- btf_struct_ids_match
- __get_type_size
- __get_type_fmodel_flags
- btf_distill_func_proto
- btf_check_func_type_match
- btf_check_type_match
- btf_is_dynptr_ptr
- bpf_cand_cache
- cand_cache_mutex
- btf_get_ptr_to_btf_id
- btf_arg_tag
- btf_prepare_func_args
- btf_type_show
- btf_seq_show
- btf_type_seq_show_flags
- btf_type_seq_show
- btf_show_snprintf
- btf_snprintf_show
- btf_type_snprintf_show
- bpf_btf_show_fdinfo
- btf_release
- btf_fops
- __btf_new_fd
- btf_new_fd
- btf_get_by_fd
- btf_get_info_by_fd
- btf_get_fd_by_id
- btf_obj_id
- btf_is_kernel
- btf_is_module
- btf_try_get_module
- btf_get_module_btf
- check_btf_kconfigs
- bpf_btf_find_by_name_kind
- bpf_btf_find_by_name_kind_proto
- btf_tracing_ids
- btf_check_iter_arg
- btf_check_iter_kfuncs
- btf_check_kfunc_protos
- btf_populate_kfunc_set
- __btf_kfunc_id_set_contains
- bpf_prog_type_to_kfunc_hook
- btf_kfunc_id_set_contains
- btf_kfunc_is_modify_return
- __register_btf_kfunc_id_set
- register_btf_kfunc_id_set
- register_btf_fmodret_id_set
- btf_find_dtor_kfunc
- btf_check_dtor_kfuncs
- register_btf_id_dtor_kfuncs
- bpf_core_types_are_compat
- bpf_core_types_match
- bpf_core_is_flavor_sep
- bpf_core_essential_name_len
- bpf_free_cands
- bpf_free_cands_from_cache
- vmlinux_cand_cache
- module_cand_cache
- __print_cand_cache
- print_cand_cache
- hash_cands
- check_cand_cache
- sizeof_cands
- populate_cand_cache
- bpf_core_add_cands
- bpf_core_find_cands
- bpf_core_apply
- btf_nested_type_is_trusted
- btf_type_ids_nocast_alias
- btf_add_struct_ops
- bpf_struct_ops_find_value
- bpf_struct_ops_find
- __register_bpf_struct_ops
Improve your Profiling and Debugging skills
Find out more