1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2018 Facebook */ |
3 | |
4 | #include <uapi/linux/btf.h> |
5 | #include <uapi/linux/bpf.h> |
6 | #include <uapi/linux/bpf_perf_event.h> |
7 | #include <uapi/linux/types.h> |
8 | #include <linux/seq_file.h> |
9 | #include <linux/compiler.h> |
10 | #include <linux/ctype.h> |
11 | #include <linux/errno.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/anon_inodes.h> |
14 | #include <linux/file.h> |
15 | #include <linux/uaccess.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/idr.h> |
18 | #include <linux/sort.h> |
19 | #include <linux/bpf_verifier.h> |
20 | #include <linux/btf.h> |
21 | #include <linux/btf_ids.h> |
22 | #include <linux/bpf.h> |
23 | #include <linux/bpf_lsm.h> |
24 | #include <linux/skmsg.h> |
25 | #include <linux/perf_event.h> |
26 | #include <linux/bsearch.h> |
27 | #include <linux/kobject.h> |
28 | #include <linux/sysfs.h> |
29 | |
30 | #include <net/netfilter/nf_bpf_link.h> |
31 | |
32 | #include <net/sock.h> |
33 | #include <net/xdp.h> |
34 | #include "../tools/lib/bpf/relo_core.h" |
35 | |
36 | /* BTF (BPF Type Format) is the meta data format which describes |
37 | * the data types of BPF program/map. Hence, it basically focus |
38 | * on the C programming language which the modern BPF is primary |
39 | * using. |
40 | * |
41 | * ELF Section: |
42 | * ~~~~~~~~~~~ |
43 | * The BTF data is stored under the ".BTF" ELF section |
44 | * |
45 | * struct btf_type: |
46 | * ~~~~~~~~~~~~~~~ |
47 | * Each 'struct btf_type' object describes a C data type. |
48 | * Depending on the type it is describing, a 'struct btf_type' |
49 | * object may be followed by more data. F.e. |
50 | * To describe an array, 'struct btf_type' is followed by |
51 | * 'struct btf_array'. |
52 | * |
53 | * 'struct btf_type' and any extra data following it are |
54 | * 4 bytes aligned. |
55 | * |
56 | * Type section: |
57 | * ~~~~~~~~~~~~~ |
58 | * The BTF type section contains a list of 'struct btf_type' objects. |
59 | * Each one describes a C type. Recall from the above section |
60 | * that a 'struct btf_type' object could be immediately followed by extra |
61 | * data in order to describe some particular C types. |
62 | * |
63 | * type_id: |
64 | * ~~~~~~~ |
65 | * Each btf_type object is identified by a type_id. The type_id |
66 | * is implicitly implied by the location of the btf_type object in |
67 | * the BTF type section. The first one has type_id 1. The second |
68 | * one has type_id 2...etc. Hence, an earlier btf_type has |
69 | * a smaller type_id. |
70 | * |
71 | * A btf_type object may refer to another btf_type object by using |
72 | * type_id (i.e. the "type" in the "struct btf_type"). |
73 | * |
74 | * NOTE that we cannot assume any reference-order. |
75 | * A btf_type object can refer to an earlier btf_type object |
76 | * but it can also refer to a later btf_type object. |
77 | * |
78 | * For example, to describe "const void *". A btf_type |
79 | * object describing "const" may refer to another btf_type |
80 | * object describing "void *". This type-reference is done |
81 | * by specifying type_id: |
82 | * |
83 | * [1] CONST (anon) type_id=2 |
84 | * [2] PTR (anon) type_id=0 |
85 | * |
86 | * The above is the btf_verifier debug log: |
87 | * - Each line started with "[?]" is a btf_type object |
88 | * - [?] is the type_id of the btf_type object. |
89 | * - CONST/PTR is the BTF_KIND_XXX |
90 | * - "(anon)" is the name of the type. It just |
91 | * happens that CONST and PTR has no name. |
92 | * - type_id=XXX is the 'u32 type' in btf_type |
93 | * |
94 | * NOTE: "void" has type_id 0 |
95 | * |
96 | * String section: |
97 | * ~~~~~~~~~~~~~~ |
98 | * The BTF string section contains the names used by the type section. |
99 | * Each string is referred by an "offset" from the beginning of the |
100 | * string section. |
101 | * |
102 | * Each string is '\0' terminated. |
103 | * |
104 | * The first character in the string section must be '\0' |
105 | * which is used to mean 'anonymous'. Some btf_type may not |
106 | * have a name. |
107 | */ |
108 | |
109 | /* BTF verification: |
110 | * |
111 | * To verify BTF data, two passes are needed. |
112 | * |
113 | * Pass #1 |
114 | * ~~~~~~~ |
115 | * The first pass is to collect all btf_type objects to |
116 | * an array: "btf->types". |
117 | * |
118 | * Depending on the C type that a btf_type is describing, |
119 | * a btf_type may be followed by extra data. We don't know |
120 | * how many btf_type is there, and more importantly we don't |
121 | * know where each btf_type is located in the type section. |
122 | * |
123 | * Without knowing the location of each type_id, most verifications |
124 | * cannot be done. e.g. an earlier btf_type may refer to a later |
125 | * btf_type (recall the "const void *" above), so we cannot |
126 | * check this type-reference in the first pass. |
127 | * |
128 | * In the first pass, it still does some verifications (e.g. |
129 | * checking the name is a valid offset to the string section). |
130 | * |
131 | * Pass #2 |
132 | * ~~~~~~~ |
133 | * The main focus is to resolve a btf_type that is referring |
134 | * to another type. |
135 | * |
136 | * We have to ensure the referring type: |
137 | * 1) does exist in the BTF (i.e. in btf->types[]) |
138 | * 2) does not cause a loop: |
139 | * struct A { |
140 | * struct B b; |
141 | * }; |
142 | * |
143 | * struct B { |
144 | * struct A a; |
145 | * }; |
146 | * |
147 | * btf_type_needs_resolve() decides if a btf_type needs |
148 | * to be resolved. |
149 | * |
150 | * The needs_resolve type implements the "resolve()" ops which |
151 | * essentially does a DFS and detects backedge. |
152 | * |
153 | * During resolve (or DFS), different C types have different |
154 | * "RESOLVED" conditions. |
155 | * |
156 | * When resolving a BTF_KIND_STRUCT, we need to resolve all its |
157 | * members because a member is always referring to another |
158 | * type. A struct's member can be treated as "RESOLVED" if |
159 | * it is referring to a BTF_KIND_PTR. Otherwise, the |
160 | * following valid C struct would be rejected: |
161 | * |
162 | * struct A { |
163 | * int m; |
164 | * struct A *a; |
165 | * }; |
166 | * |
167 | * When resolving a BTF_KIND_PTR, it needs to keep resolving if |
168 | * it is referring to another BTF_KIND_PTR. Otherwise, we cannot |
169 | * detect a pointer loop, e.g.: |
170 | * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR + |
171 | * ^ | |
172 | * +-----------------------------------------+ |
173 | * |
174 | */ |
175 | |
176 | #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2) |
177 | #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1) |
178 | #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK) |
179 | #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3) |
180 | #define BITS_ROUNDUP_BYTES(bits) \ |
181 | (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits)) |
182 | |
183 | #define BTF_INFO_MASK 0x9f00ffff |
184 | #define BTF_INT_MASK 0x0fffffff |
185 | #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE) |
186 | #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET) |
187 | |
188 | /* 16MB for 64k structs and each has 16 members and |
189 | * a few MB spaces for the string section. |
190 | * The hard limit is S32_MAX. |
191 | */ |
192 | #define BTF_MAX_SIZE (16 * 1024 * 1024) |
193 | |
194 | #define for_each_member_from(i, from, struct_type, member) \ |
195 | for (i = from, member = btf_type_member(struct_type) + from; \ |
196 | i < btf_type_vlen(struct_type); \ |
197 | i++, member++) |
198 | |
199 | #define for_each_vsi_from(i, from, struct_type, member) \ |
200 | for (i = from, member = btf_type_var_secinfo(struct_type) + from; \ |
201 | i < btf_type_vlen(struct_type); \ |
202 | i++, member++) |
203 | |
204 | DEFINE_IDR(btf_idr); |
205 | DEFINE_SPINLOCK(btf_idr_lock); |
206 | |
207 | enum btf_kfunc_hook { |
208 | BTF_KFUNC_HOOK_COMMON, |
209 | BTF_KFUNC_HOOK_XDP, |
210 | BTF_KFUNC_HOOK_TC, |
211 | BTF_KFUNC_HOOK_STRUCT_OPS, |
212 | BTF_KFUNC_HOOK_TRACING, |
213 | BTF_KFUNC_HOOK_SYSCALL, |
214 | BTF_KFUNC_HOOK_FMODRET, |
215 | BTF_KFUNC_HOOK_CGROUP_SKB, |
216 | BTF_KFUNC_HOOK_SCHED_ACT, |
217 | BTF_KFUNC_HOOK_SK_SKB, |
218 | BTF_KFUNC_HOOK_SOCKET_FILTER, |
219 | BTF_KFUNC_HOOK_LWT, |
220 | BTF_KFUNC_HOOK_NETFILTER, |
221 | BTF_KFUNC_HOOK_MAX, |
222 | }; |
223 | |
224 | enum { |
225 | BTF_KFUNC_SET_MAX_CNT = 256, |
226 | BTF_DTOR_KFUNC_MAX_CNT = 256, |
227 | BTF_KFUNC_FILTER_MAX_CNT = 16, |
228 | }; |
229 | |
230 | struct btf_kfunc_hook_filter { |
231 | btf_kfunc_filter_t filters[BTF_KFUNC_FILTER_MAX_CNT]; |
232 | u32 nr_filters; |
233 | }; |
234 | |
235 | struct btf_kfunc_set_tab { |
236 | struct btf_id_set8 *sets[BTF_KFUNC_HOOK_MAX]; |
237 | struct btf_kfunc_hook_filter hook_filters[BTF_KFUNC_HOOK_MAX]; |
238 | }; |
239 | |
240 | struct btf_id_dtor_kfunc_tab { |
241 | u32 cnt; |
242 | struct btf_id_dtor_kfunc dtors[]; |
243 | }; |
244 | |
245 | struct btf_struct_ops_tab { |
246 | u32 cnt; |
247 | u32 capacity; |
248 | struct bpf_struct_ops_desc ops[]; |
249 | }; |
250 | |
251 | struct btf { |
252 | void *data; |
253 | struct btf_type **types; |
254 | u32 *resolved_ids; |
255 | u32 *resolved_sizes; |
256 | const char *strings; |
257 | void *nohdr_data; |
258 | struct btf_header hdr; |
259 | u32 nr_types; /* includes VOID for base BTF */ |
260 | u32 types_size; |
261 | u32 data_size; |
262 | refcount_t refcnt; |
263 | u32 id; |
264 | struct rcu_head rcu; |
265 | struct btf_kfunc_set_tab *kfunc_set_tab; |
266 | struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab; |
267 | struct btf_struct_metas *struct_meta_tab; |
268 | struct btf_struct_ops_tab *struct_ops_tab; |
269 | |
270 | /* split BTF support */ |
271 | struct btf *base_btf; |
272 | u32 start_id; /* first type ID in this BTF (0 for base BTF) */ |
273 | u32 start_str_off; /* first string offset (0 for base BTF) */ |
274 | char name[MODULE_NAME_LEN]; |
275 | bool kernel_btf; |
276 | }; |
277 | |
278 | enum verifier_phase { |
279 | CHECK_META, |
280 | CHECK_TYPE, |
281 | }; |
282 | |
283 | struct resolve_vertex { |
284 | const struct btf_type *t; |
285 | u32 type_id; |
286 | u16 next_member; |
287 | }; |
288 | |
289 | enum visit_state { |
290 | NOT_VISITED, |
291 | VISITED, |
292 | RESOLVED, |
293 | }; |
294 | |
295 | enum resolve_mode { |
296 | RESOLVE_TBD, /* To Be Determined */ |
297 | RESOLVE_PTR, /* Resolving for Pointer */ |
298 | RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union |
299 | * or array |
300 | */ |
301 | }; |
302 | |
303 | #define MAX_RESOLVE_DEPTH 32 |
304 | |
305 | struct btf_sec_info { |
306 | u32 off; |
307 | u32 len; |
308 | }; |
309 | |
310 | struct btf_verifier_env { |
311 | struct btf *btf; |
312 | u8 *visit_states; |
313 | struct resolve_vertex stack[MAX_RESOLVE_DEPTH]; |
314 | struct bpf_verifier_log log; |
315 | u32 log_type_id; |
316 | u32 top_stack; |
317 | enum verifier_phase phase; |
318 | enum resolve_mode resolve_mode; |
319 | }; |
320 | |
321 | static const char * const btf_kind_str[NR_BTF_KINDS] = { |
322 | [BTF_KIND_UNKN] = "UNKNOWN" , |
323 | [BTF_KIND_INT] = "INT" , |
324 | [BTF_KIND_PTR] = "PTR" , |
325 | [BTF_KIND_ARRAY] = "ARRAY" , |
326 | [BTF_KIND_STRUCT] = "STRUCT" , |
327 | [BTF_KIND_UNION] = "UNION" , |
328 | [BTF_KIND_ENUM] = "ENUM" , |
329 | [BTF_KIND_FWD] = "FWD" , |
330 | [BTF_KIND_TYPEDEF] = "TYPEDEF" , |
331 | [BTF_KIND_VOLATILE] = "VOLATILE" , |
332 | [BTF_KIND_CONST] = "CONST" , |
333 | [BTF_KIND_RESTRICT] = "RESTRICT" , |
334 | [BTF_KIND_FUNC] = "FUNC" , |
335 | [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO" , |
336 | [BTF_KIND_VAR] = "VAR" , |
337 | [BTF_KIND_DATASEC] = "DATASEC" , |
338 | [BTF_KIND_FLOAT] = "FLOAT" , |
339 | [BTF_KIND_DECL_TAG] = "DECL_TAG" , |
340 | [BTF_KIND_TYPE_TAG] = "TYPE_TAG" , |
341 | [BTF_KIND_ENUM64] = "ENUM64" , |
342 | }; |
343 | |
344 | const char *btf_type_str(const struct btf_type *t) |
345 | { |
346 | return btf_kind_str[BTF_INFO_KIND(t->info)]; |
347 | } |
348 | |
349 | /* Chunk size we use in safe copy of data to be shown. */ |
350 | #define BTF_SHOW_OBJ_SAFE_SIZE 32 |
351 | |
352 | /* |
353 | * This is the maximum size of a base type value (equivalent to a |
354 | * 128-bit int); if we are at the end of our safe buffer and have |
355 | * less than 16 bytes space we can't be assured of being able |
356 | * to copy the next type safely, so in such cases we will initiate |
357 | * a new copy. |
358 | */ |
359 | #define BTF_SHOW_OBJ_BASE_TYPE_SIZE 16 |
360 | |
361 | /* Type name size */ |
362 | #define BTF_SHOW_NAME_SIZE 80 |
363 | |
364 | /* |
365 | * The suffix of a type that indicates it cannot alias another type when |
366 | * comparing BTF IDs for kfunc invocations. |
367 | */ |
368 | #define NOCAST_ALIAS_SUFFIX "___init" |
369 | |
370 | /* |
371 | * Common data to all BTF show operations. Private show functions can add |
372 | * their own data to a structure containing a struct btf_show and consult it |
373 | * in the show callback. See btf_type_show() below. |
374 | * |
375 | * One challenge with showing nested data is we want to skip 0-valued |
376 | * data, but in order to figure out whether a nested object is all zeros |
377 | * we need to walk through it. As a result, we need to make two passes |
378 | * when handling structs, unions and arrays; the first path simply looks |
379 | * for nonzero data, while the second actually does the display. The first |
380 | * pass is signalled by show->state.depth_check being set, and if we |
381 | * encounter a non-zero value we set show->state.depth_to_show to |
382 | * the depth at which we encountered it. When we have completed the |
383 | * first pass, we will know if anything needs to be displayed if |
384 | * depth_to_show > depth. See btf_[struct,array]_show() for the |
385 | * implementation of this. |
386 | * |
387 | * Another problem is we want to ensure the data for display is safe to |
388 | * access. To support this, the anonymous "struct {} obj" tracks the data |
389 | * object and our safe copy of it. We copy portions of the data needed |
390 | * to the object "copy" buffer, but because its size is limited to |
391 | * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we |
392 | * traverse larger objects for display. |
393 | * |
394 | * The various data type show functions all start with a call to |
395 | * btf_show_start_type() which returns a pointer to the safe copy |
396 | * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the |
397 | * raw data itself). btf_show_obj_safe() is responsible for |
398 | * using copy_from_kernel_nofault() to update the safe data if necessary |
399 | * as we traverse the object's data. skbuff-like semantics are |
400 | * used: |
401 | * |
402 | * - obj.head points to the start of the toplevel object for display |
403 | * - obj.size is the size of the toplevel object |
404 | * - obj.data points to the current point in the original data at |
405 | * which our safe data starts. obj.data will advance as we copy |
406 | * portions of the data. |
407 | * |
408 | * In most cases a single copy will suffice, but larger data structures |
409 | * such as "struct task_struct" will require many copies. The logic in |
410 | * btf_show_obj_safe() handles the logic that determines if a new |
411 | * copy_from_kernel_nofault() is needed. |
412 | */ |
413 | struct btf_show { |
414 | u64 flags; |
415 | void *target; /* target of show operation (seq file, buffer) */ |
416 | void (*showfn)(struct btf_show *show, const char *fmt, va_list args); |
417 | const struct btf *btf; |
418 | /* below are used during iteration */ |
419 | struct { |
420 | u8 depth; |
421 | u8 depth_to_show; |
422 | u8 depth_check; |
423 | u8 array_member:1, |
424 | array_terminated:1; |
425 | u16 array_encoding; |
426 | u32 type_id; |
427 | int status; /* non-zero for error */ |
428 | const struct btf_type *type; |
429 | const struct btf_member *member; |
430 | char name[BTF_SHOW_NAME_SIZE]; /* space for member name/type */ |
431 | } state; |
432 | struct { |
433 | u32 size; |
434 | void *head; |
435 | void *data; |
436 | u8 safe[BTF_SHOW_OBJ_SAFE_SIZE]; |
437 | } obj; |
438 | }; |
439 | |
440 | struct btf_kind_operations { |
441 | s32 (*check_meta)(struct btf_verifier_env *env, |
442 | const struct btf_type *t, |
443 | u32 meta_left); |
444 | int (*resolve)(struct btf_verifier_env *env, |
445 | const struct resolve_vertex *v); |
446 | int (*check_member)(struct btf_verifier_env *env, |
447 | const struct btf_type *struct_type, |
448 | const struct btf_member *member, |
449 | const struct btf_type *member_type); |
450 | int (*check_kflag_member)(struct btf_verifier_env *env, |
451 | const struct btf_type *struct_type, |
452 | const struct btf_member *member, |
453 | const struct btf_type *member_type); |
454 | void (*log_details)(struct btf_verifier_env *env, |
455 | const struct btf_type *t); |
456 | void (*show)(const struct btf *btf, const struct btf_type *t, |
457 | u32 type_id, void *data, u8 bits_offsets, |
458 | struct btf_show *show); |
459 | }; |
460 | |
461 | static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS]; |
462 | static struct btf_type btf_void; |
463 | |
464 | static int btf_resolve(struct btf_verifier_env *env, |
465 | const struct btf_type *t, u32 type_id); |
466 | |
467 | static int btf_func_check(struct btf_verifier_env *env, |
468 | const struct btf_type *t); |
469 | |
470 | static bool btf_type_is_modifier(const struct btf_type *t) |
471 | { |
472 | /* Some of them is not strictly a C modifier |
473 | * but they are grouped into the same bucket |
474 | * for BTF concern: |
475 | * A type (t) that refers to another |
476 | * type through t->type AND its size cannot |
477 | * be determined without following the t->type. |
478 | * |
479 | * ptr does not fall into this bucket |
480 | * because its size is always sizeof(void *). |
481 | */ |
482 | switch (BTF_INFO_KIND(t->info)) { |
483 | case BTF_KIND_TYPEDEF: |
484 | case BTF_KIND_VOLATILE: |
485 | case BTF_KIND_CONST: |
486 | case BTF_KIND_RESTRICT: |
487 | case BTF_KIND_TYPE_TAG: |
488 | return true; |
489 | } |
490 | |
491 | return false; |
492 | } |
493 | |
494 | bool btf_type_is_void(const struct btf_type *t) |
495 | { |
496 | return t == &btf_void; |
497 | } |
498 | |
499 | static bool btf_type_is_fwd(const struct btf_type *t) |
500 | { |
501 | return BTF_INFO_KIND(t->info) == BTF_KIND_FWD; |
502 | } |
503 | |
504 | static bool btf_type_is_datasec(const struct btf_type *t) |
505 | { |
506 | return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC; |
507 | } |
508 | |
509 | static bool btf_type_is_decl_tag(const struct btf_type *t) |
510 | { |
511 | return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG; |
512 | } |
513 | |
514 | static bool btf_type_nosize(const struct btf_type *t) |
515 | { |
516 | return btf_type_is_void(t) || btf_type_is_fwd(t) || |
517 | btf_type_is_func(t) || btf_type_is_func_proto(t) || |
518 | btf_type_is_decl_tag(t); |
519 | } |
520 | |
521 | static bool btf_type_nosize_or_null(const struct btf_type *t) |
522 | { |
523 | return !t || btf_type_nosize(t); |
524 | } |
525 | |
526 | static bool btf_type_is_decl_tag_target(const struct btf_type *t) |
527 | { |
528 | return btf_type_is_func(t) || btf_type_is_struct(t) || |
529 | btf_type_is_var(t) || btf_type_is_typedef(t); |
530 | } |
531 | |
532 | u32 btf_nr_types(const struct btf *btf) |
533 | { |
534 | u32 total = 0; |
535 | |
536 | while (btf) { |
537 | total += btf->nr_types; |
538 | btf = btf->base_btf; |
539 | } |
540 | |
541 | return total; |
542 | } |
543 | |
544 | s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind) |
545 | { |
546 | const struct btf_type *t; |
547 | const char *tname; |
548 | u32 i, total; |
549 | |
550 | total = btf_nr_types(btf); |
551 | for (i = 1; i < total; i++) { |
552 | t = btf_type_by_id(btf, type_id: i); |
553 | if (BTF_INFO_KIND(t->info) != kind) |
554 | continue; |
555 | |
556 | tname = btf_name_by_offset(btf, offset: t->name_off); |
557 | if (!strcmp(tname, name)) |
558 | return i; |
559 | } |
560 | |
561 | return -ENOENT; |
562 | } |
563 | |
564 | s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p) |
565 | { |
566 | struct btf *btf; |
567 | s32 ret; |
568 | int id; |
569 | |
570 | btf = bpf_get_btf_vmlinux(); |
571 | if (IS_ERR(ptr: btf)) |
572 | return PTR_ERR(ptr: btf); |
573 | if (!btf) |
574 | return -EINVAL; |
575 | |
576 | ret = btf_find_by_name_kind(btf, name, kind); |
577 | /* ret is never zero, since btf_find_by_name_kind returns |
578 | * positive btf_id or negative error. |
579 | */ |
580 | if (ret > 0) { |
581 | btf_get(btf); |
582 | *btf_p = btf; |
583 | return ret; |
584 | } |
585 | |
586 | /* If name is not found in vmlinux's BTF then search in module's BTFs */ |
587 | spin_lock_bh(lock: &btf_idr_lock); |
588 | idr_for_each_entry(&btf_idr, btf, id) { |
589 | if (!btf_is_module(btf)) |
590 | continue; |
591 | /* linear search could be slow hence unlock/lock |
592 | * the IDR to avoiding holding it for too long |
593 | */ |
594 | btf_get(btf); |
595 | spin_unlock_bh(lock: &btf_idr_lock); |
596 | ret = btf_find_by_name_kind(btf, name, kind); |
597 | if (ret > 0) { |
598 | *btf_p = btf; |
599 | return ret; |
600 | } |
601 | btf_put(btf); |
602 | spin_lock_bh(lock: &btf_idr_lock); |
603 | } |
604 | spin_unlock_bh(lock: &btf_idr_lock); |
605 | return ret; |
606 | } |
607 | |
608 | const struct btf_type *btf_type_skip_modifiers(const struct btf *btf, |
609 | u32 id, u32 *res_id) |
610 | { |
611 | const struct btf_type *t = btf_type_by_id(btf, type_id: id); |
612 | |
613 | while (btf_type_is_modifier(t)) { |
614 | id = t->type; |
615 | t = btf_type_by_id(btf, type_id: t->type); |
616 | } |
617 | |
618 | if (res_id) |
619 | *res_id = id; |
620 | |
621 | return t; |
622 | } |
623 | |
624 | const struct btf_type *btf_type_resolve_ptr(const struct btf *btf, |
625 | u32 id, u32 *res_id) |
626 | { |
627 | const struct btf_type *t; |
628 | |
629 | t = btf_type_skip_modifiers(btf, id, NULL); |
630 | if (!btf_type_is_ptr(t)) |
631 | return NULL; |
632 | |
633 | return btf_type_skip_modifiers(btf, id: t->type, res_id); |
634 | } |
635 | |
636 | const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf, |
637 | u32 id, u32 *res_id) |
638 | { |
639 | const struct btf_type *ptype; |
640 | |
641 | ptype = btf_type_resolve_ptr(btf, id, res_id); |
642 | if (ptype && btf_type_is_func_proto(t: ptype)) |
643 | return ptype; |
644 | |
645 | return NULL; |
646 | } |
647 | |
648 | /* Types that act only as a source, not sink or intermediate |
649 | * type when resolving. |
650 | */ |
651 | static bool btf_type_is_resolve_source_only(const struct btf_type *t) |
652 | { |
653 | return btf_type_is_var(t) || |
654 | btf_type_is_decl_tag(t) || |
655 | btf_type_is_datasec(t); |
656 | } |
657 | |
658 | /* What types need to be resolved? |
659 | * |
660 | * btf_type_is_modifier() is an obvious one. |
661 | * |
662 | * btf_type_is_struct() because its member refers to |
663 | * another type (through member->type). |
664 | * |
665 | * btf_type_is_var() because the variable refers to |
666 | * another type. btf_type_is_datasec() holds multiple |
667 | * btf_type_is_var() types that need resolving. |
668 | * |
669 | * btf_type_is_array() because its element (array->type) |
670 | * refers to another type. Array can be thought of a |
671 | * special case of struct while array just has the same |
672 | * member-type repeated by array->nelems of times. |
673 | */ |
674 | static bool btf_type_needs_resolve(const struct btf_type *t) |
675 | { |
676 | return btf_type_is_modifier(t) || |
677 | btf_type_is_ptr(t) || |
678 | btf_type_is_struct(t) || |
679 | btf_type_is_array(t) || |
680 | btf_type_is_var(t) || |
681 | btf_type_is_func(t) || |
682 | btf_type_is_decl_tag(t) || |
683 | btf_type_is_datasec(t); |
684 | } |
685 | |
686 | /* t->size can be used */ |
687 | static bool btf_type_has_size(const struct btf_type *t) |
688 | { |
689 | switch (BTF_INFO_KIND(t->info)) { |
690 | case BTF_KIND_INT: |
691 | case BTF_KIND_STRUCT: |
692 | case BTF_KIND_UNION: |
693 | case BTF_KIND_ENUM: |
694 | case BTF_KIND_DATASEC: |
695 | case BTF_KIND_FLOAT: |
696 | case BTF_KIND_ENUM64: |
697 | return true; |
698 | } |
699 | |
700 | return false; |
701 | } |
702 | |
703 | static const char *btf_int_encoding_str(u8 encoding) |
704 | { |
705 | if (encoding == 0) |
706 | return "(none)" ; |
707 | else if (encoding == BTF_INT_SIGNED) |
708 | return "SIGNED" ; |
709 | else if (encoding == BTF_INT_CHAR) |
710 | return "CHAR" ; |
711 | else if (encoding == BTF_INT_BOOL) |
712 | return "BOOL" ; |
713 | else |
714 | return "UNKN" ; |
715 | } |
716 | |
717 | static u32 btf_type_int(const struct btf_type *t) |
718 | { |
719 | return *(u32 *)(t + 1); |
720 | } |
721 | |
722 | static const struct btf_array *btf_type_array(const struct btf_type *t) |
723 | { |
724 | return (const struct btf_array *)(t + 1); |
725 | } |
726 | |
727 | static const struct btf_enum *btf_type_enum(const struct btf_type *t) |
728 | { |
729 | return (const struct btf_enum *)(t + 1); |
730 | } |
731 | |
732 | static const struct btf_var *btf_type_var(const struct btf_type *t) |
733 | { |
734 | return (const struct btf_var *)(t + 1); |
735 | } |
736 | |
737 | static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t) |
738 | { |
739 | return (const struct btf_decl_tag *)(t + 1); |
740 | } |
741 | |
742 | static const struct btf_enum64 *btf_type_enum64(const struct btf_type *t) |
743 | { |
744 | return (const struct btf_enum64 *)(t + 1); |
745 | } |
746 | |
747 | static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) |
748 | { |
749 | return kind_ops[BTF_INFO_KIND(t->info)]; |
750 | } |
751 | |
752 | static bool btf_name_offset_valid(const struct btf *btf, u32 offset) |
753 | { |
754 | if (!BTF_STR_OFFSET_VALID(offset)) |
755 | return false; |
756 | |
757 | while (offset < btf->start_str_off) |
758 | btf = btf->base_btf; |
759 | |
760 | offset -= btf->start_str_off; |
761 | return offset < btf->hdr.str_len; |
762 | } |
763 | |
764 | static bool __btf_name_char_ok(char c, bool first) |
765 | { |
766 | if ((first ? !isalpha(c) : |
767 | !isalnum(c)) && |
768 | c != '_' && |
769 | c != '.') |
770 | return false; |
771 | return true; |
772 | } |
773 | |
774 | static const char *btf_str_by_offset(const struct btf *btf, u32 offset) |
775 | { |
776 | while (offset < btf->start_str_off) |
777 | btf = btf->base_btf; |
778 | |
779 | offset -= btf->start_str_off; |
780 | if (offset < btf->hdr.str_len) |
781 | return &btf->strings[offset]; |
782 | |
783 | return NULL; |
784 | } |
785 | |
786 | static bool __btf_name_valid(const struct btf *btf, u32 offset) |
787 | { |
788 | /* offset must be valid */ |
789 | const char *src = btf_str_by_offset(btf, offset); |
790 | const char *src_limit; |
791 | |
792 | if (!__btf_name_char_ok(c: *src, first: true)) |
793 | return false; |
794 | |
795 | /* set a limit on identifier length */ |
796 | src_limit = src + KSYM_NAME_LEN; |
797 | src++; |
798 | while (*src && src < src_limit) { |
799 | if (!__btf_name_char_ok(c: *src, first: false)) |
800 | return false; |
801 | src++; |
802 | } |
803 | |
804 | return !*src; |
805 | } |
806 | |
807 | static bool btf_name_valid_identifier(const struct btf *btf, u32 offset) |
808 | { |
809 | return __btf_name_valid(btf, offset); |
810 | } |
811 | |
812 | /* Allow any printable character in DATASEC names */ |
813 | static bool btf_name_valid_section(const struct btf *btf, u32 offset) |
814 | { |
815 | /* offset must be valid */ |
816 | const char *src = btf_str_by_offset(btf, offset); |
817 | const char *src_limit; |
818 | |
819 | /* set a limit on identifier length */ |
820 | src_limit = src + KSYM_NAME_LEN; |
821 | src++; |
822 | while (*src && src < src_limit) { |
823 | if (!isprint(*src)) |
824 | return false; |
825 | src++; |
826 | } |
827 | |
828 | return !*src; |
829 | } |
830 | |
831 | static const char *__btf_name_by_offset(const struct btf *btf, u32 offset) |
832 | { |
833 | const char *name; |
834 | |
835 | if (!offset) |
836 | return "(anon)" ; |
837 | |
838 | name = btf_str_by_offset(btf, offset); |
839 | return name ?: "(invalid-name-offset)" ; |
840 | } |
841 | |
842 | const char *btf_name_by_offset(const struct btf *btf, u32 offset) |
843 | { |
844 | return btf_str_by_offset(btf, offset); |
845 | } |
846 | |
847 | const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) |
848 | { |
849 | while (type_id < btf->start_id) |
850 | btf = btf->base_btf; |
851 | |
852 | type_id -= btf->start_id; |
853 | if (type_id >= btf->nr_types) |
854 | return NULL; |
855 | return btf->types[type_id]; |
856 | } |
857 | EXPORT_SYMBOL_GPL(btf_type_by_id); |
858 | |
859 | /* |
860 | * Regular int is not a bit field and it must be either |
861 | * u8/u16/u32/u64 or __int128. |
862 | */ |
863 | static bool btf_type_int_is_regular(const struct btf_type *t) |
864 | { |
865 | u8 nr_bits, nr_bytes; |
866 | u32 int_data; |
867 | |
868 | int_data = btf_type_int(t); |
869 | nr_bits = BTF_INT_BITS(int_data); |
870 | nr_bytes = BITS_ROUNDUP_BYTES(nr_bits); |
871 | if (BITS_PER_BYTE_MASKED(nr_bits) || |
872 | BTF_INT_OFFSET(int_data) || |
873 | (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) && |
874 | nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) && |
875 | nr_bytes != (2 * sizeof(u64)))) { |
876 | return false; |
877 | } |
878 | |
879 | return true; |
880 | } |
881 | |
882 | /* |
883 | * Check that given struct member is a regular int with expected |
884 | * offset and size. |
885 | */ |
886 | bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, |
887 | const struct btf_member *m, |
888 | u32 expected_offset, u32 expected_size) |
889 | { |
890 | const struct btf_type *t; |
891 | u32 id, int_data; |
892 | u8 nr_bits; |
893 | |
894 | id = m->type; |
895 | t = btf_type_id_size(btf, type_id: &id, NULL); |
896 | if (!t || !btf_type_is_int(t)) |
897 | return false; |
898 | |
899 | int_data = btf_type_int(t); |
900 | nr_bits = BTF_INT_BITS(int_data); |
901 | if (btf_type_kflag(t: s)) { |
902 | u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset); |
903 | u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset); |
904 | |
905 | /* if kflag set, int should be a regular int and |
906 | * bit offset should be at byte boundary. |
907 | */ |
908 | return !bitfield_size && |
909 | BITS_ROUNDUP_BYTES(bit_offset) == expected_offset && |
910 | BITS_ROUNDUP_BYTES(nr_bits) == expected_size; |
911 | } |
912 | |
913 | if (BTF_INT_OFFSET(int_data) || |
914 | BITS_PER_BYTE_MASKED(m->offset) || |
915 | BITS_ROUNDUP_BYTES(m->offset) != expected_offset || |
916 | BITS_PER_BYTE_MASKED(nr_bits) || |
917 | BITS_ROUNDUP_BYTES(nr_bits) != expected_size) |
918 | return false; |
919 | |
920 | return true; |
921 | } |
922 | |
923 | /* Similar to btf_type_skip_modifiers() but does not skip typedefs. */ |
924 | static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf, |
925 | u32 id) |
926 | { |
927 | const struct btf_type *t = btf_type_by_id(btf, id); |
928 | |
929 | while (btf_type_is_modifier(t) && |
930 | BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) { |
931 | t = btf_type_by_id(btf, t->type); |
932 | } |
933 | |
934 | return t; |
935 | } |
936 | |
937 | #define BTF_SHOW_MAX_ITER 10 |
938 | |
939 | #define BTF_KIND_BIT(kind) (1ULL << kind) |
940 | |
941 | /* |
942 | * Populate show->state.name with type name information. |
943 | * Format of type name is |
944 | * |
945 | * [.member_name = ] (type_name) |
946 | */ |
947 | static const char *btf_show_name(struct btf_show *show) |
948 | { |
949 | /* BTF_MAX_ITER array suffixes "[]" */ |
950 | const char *array_suffixes = "[][][][][][][][][][]" ; |
951 | const char *array_suffix = &array_suffixes[strlen(array_suffixes)]; |
952 | /* BTF_MAX_ITER pointer suffixes "*" */ |
953 | const char *ptr_suffixes = "**********" ; |
954 | const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)]; |
955 | const char *name = NULL, *prefix = "" , *parens = "" ; |
956 | const struct btf_member *m = show->state.member; |
957 | const struct btf_type *t; |
958 | const struct btf_array *array; |
959 | u32 id = show->state.type_id; |
960 | const char *member = NULL; |
961 | bool show_member = false; |
962 | u64 kinds = 0; |
963 | int i; |
964 | |
965 | show->state.name[0] = '\0'; |
966 | |
967 | /* |
968 | * Don't show type name if we're showing an array member; |
969 | * in that case we show the array type so don't need to repeat |
970 | * ourselves for each member. |
971 | */ |
972 | if (show->state.array_member) |
973 | return "" ; |
974 | |
975 | /* Retrieve member name, if any. */ |
976 | if (m) { |
977 | member = btf_name_by_offset(btf: show->btf, offset: m->name_off); |
978 | show_member = strlen(member) > 0; |
979 | id = m->type; |
980 | } |
981 | |
982 | /* |
983 | * Start with type_id, as we have resolved the struct btf_type * |
984 | * via btf_modifier_show() past the parent typedef to the child |
985 | * struct, int etc it is defined as. In such cases, the type_id |
986 | * still represents the starting type while the struct btf_type * |
987 | * in our show->state points at the resolved type of the typedef. |
988 | */ |
989 | t = btf_type_by_id(show->btf, id); |
990 | if (!t) |
991 | return "" ; |
992 | |
993 | /* |
994 | * The goal here is to build up the right number of pointer and |
995 | * array suffixes while ensuring the type name for a typedef |
996 | * is represented. Along the way we accumulate a list of |
997 | * BTF kinds we have encountered, since these will inform later |
998 | * display; for example, pointer types will not require an |
999 | * opening "{" for struct, we will just display the pointer value. |
1000 | * |
1001 | * We also want to accumulate the right number of pointer or array |
1002 | * indices in the format string while iterating until we get to |
1003 | * the typedef/pointee/array member target type. |
1004 | * |
1005 | * We start by pointing at the end of pointer and array suffix |
1006 | * strings; as we accumulate pointers and arrays we move the pointer |
1007 | * or array string backwards so it will show the expected number of |
1008 | * '*' or '[]' for the type. BTF_SHOW_MAX_ITER of nesting of pointers |
1009 | * and/or arrays and typedefs are supported as a precaution. |
1010 | * |
1011 | * We also want to get typedef name while proceeding to resolve |
1012 | * type it points to so that we can add parentheses if it is a |
1013 | * "typedef struct" etc. |
1014 | */ |
1015 | for (i = 0; i < BTF_SHOW_MAX_ITER; i++) { |
1016 | |
1017 | switch (BTF_INFO_KIND(t->info)) { |
1018 | case BTF_KIND_TYPEDEF: |
1019 | if (!name) |
1020 | name = btf_name_by_offset(btf: show->btf, |
1021 | offset: t->name_off); |
1022 | kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF); |
1023 | id = t->type; |
1024 | break; |
1025 | case BTF_KIND_ARRAY: |
1026 | kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY); |
1027 | parens = "[" ; |
1028 | if (!t) |
1029 | return "" ; |
1030 | array = btf_type_array(t); |
1031 | if (array_suffix > array_suffixes) |
1032 | array_suffix -= 2; |
1033 | id = array->type; |
1034 | break; |
1035 | case BTF_KIND_PTR: |
1036 | kinds |= BTF_KIND_BIT(BTF_KIND_PTR); |
1037 | if (ptr_suffix > ptr_suffixes) |
1038 | ptr_suffix -= 1; |
1039 | id = t->type; |
1040 | break; |
1041 | default: |
1042 | id = 0; |
1043 | break; |
1044 | } |
1045 | if (!id) |
1046 | break; |
1047 | t = btf_type_skip_qualifiers(btf: show->btf, id); |
1048 | } |
1049 | /* We may not be able to represent this type; bail to be safe */ |
1050 | if (i == BTF_SHOW_MAX_ITER) |
1051 | return "" ; |
1052 | |
1053 | if (!name) |
1054 | name = btf_name_by_offset(btf: show->btf, offset: t->name_off); |
1055 | |
1056 | switch (BTF_INFO_KIND(t->info)) { |
1057 | case BTF_KIND_STRUCT: |
1058 | case BTF_KIND_UNION: |
1059 | prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ? |
1060 | "struct" : "union" ; |
1061 | /* if it's an array of struct/union, parens is already set */ |
1062 | if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY)))) |
1063 | parens = "{" ; |
1064 | break; |
1065 | case BTF_KIND_ENUM: |
1066 | case BTF_KIND_ENUM64: |
1067 | prefix = "enum" ; |
1068 | break; |
1069 | default: |
1070 | break; |
1071 | } |
1072 | |
1073 | /* pointer does not require parens */ |
1074 | if (kinds & BTF_KIND_BIT(BTF_KIND_PTR)) |
1075 | parens = "" ; |
1076 | /* typedef does not require struct/union/enum prefix */ |
1077 | if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF)) |
1078 | prefix = "" ; |
1079 | |
1080 | if (!name) |
1081 | name = "" ; |
1082 | |
1083 | /* Even if we don't want type name info, we want parentheses etc */ |
1084 | if (show->flags & BTF_SHOW_NONAME) |
1085 | snprintf(buf: show->state.name, size: sizeof(show->state.name), fmt: "%s" , |
1086 | parens); |
1087 | else |
1088 | snprintf(buf: show->state.name, size: sizeof(show->state.name), |
1089 | fmt: "%s%s%s(%s%s%s%s%s%s)%s" , |
1090 | /* first 3 strings comprise ".member = " */ |
1091 | show_member ? "." : "" , |
1092 | show_member ? member : "" , |
1093 | show_member ? " = " : "" , |
1094 | /* ...next is our prefix (struct, enum, etc) */ |
1095 | prefix, |
1096 | strlen(prefix) > 0 && strlen(name) > 0 ? " " : "" , |
1097 | /* ...this is the type name itself */ |
1098 | name, |
1099 | /* ...suffixed by the appropriate '*', '[]' suffixes */ |
1100 | strlen(ptr_suffix) > 0 ? " " : "" , ptr_suffix, |
1101 | array_suffix, parens); |
1102 | |
1103 | return show->state.name; |
1104 | } |
1105 | |
1106 | static const char *__btf_show_indent(struct btf_show *show) |
1107 | { |
1108 | const char *indents = " " ; |
1109 | const char *indent = &indents[strlen(indents)]; |
1110 | |
1111 | if ((indent - show->state.depth) >= indents) |
1112 | return indent - show->state.depth; |
1113 | return indents; |
1114 | } |
1115 | |
1116 | static const char *btf_show_indent(struct btf_show *show) |
1117 | { |
1118 | return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show); |
1119 | } |
1120 | |
1121 | static const char *btf_show_newline(struct btf_show *show) |
1122 | { |
1123 | return show->flags & BTF_SHOW_COMPACT ? "" : "\n" ; |
1124 | } |
1125 | |
1126 | static const char *btf_show_delim(struct btf_show *show) |
1127 | { |
1128 | if (show->state.depth == 0) |
1129 | return "" ; |
1130 | |
1131 | if ((show->flags & BTF_SHOW_COMPACT) && show->state.type && |
1132 | BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION) |
1133 | return "|" ; |
1134 | |
1135 | return "," ; |
1136 | } |
1137 | |
1138 | __printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...) |
1139 | { |
1140 | va_list args; |
1141 | |
1142 | if (!show->state.depth_check) { |
1143 | va_start(args, fmt); |
1144 | show->showfn(show, fmt, args); |
1145 | va_end(args); |
1146 | } |
1147 | } |
1148 | |
1149 | /* Macros are used here as btf_show_type_value[s]() prepends and appends |
1150 | * format specifiers to the format specifier passed in; these do the work of |
1151 | * adding indentation, delimiters etc while the caller simply has to specify |
1152 | * the type value(s) in the format specifier + value(s). |
1153 | */ |
1154 | #define btf_show_type_value(show, fmt, value) \ |
1155 | do { \ |
1156 | if ((value) != (__typeof__(value))0 || \ |
1157 | (show->flags & BTF_SHOW_ZERO) || \ |
1158 | show->state.depth == 0) { \ |
1159 | btf_show(show, "%s%s" fmt "%s%s", \ |
1160 | btf_show_indent(show), \ |
1161 | btf_show_name(show), \ |
1162 | value, btf_show_delim(show), \ |
1163 | btf_show_newline(show)); \ |
1164 | if (show->state.depth > show->state.depth_to_show) \ |
1165 | show->state.depth_to_show = show->state.depth; \ |
1166 | } \ |
1167 | } while (0) |
1168 | |
1169 | #define btf_show_type_values(show, fmt, ...) \ |
1170 | do { \ |
1171 | btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show), \ |
1172 | btf_show_name(show), \ |
1173 | __VA_ARGS__, btf_show_delim(show), \ |
1174 | btf_show_newline(show)); \ |
1175 | if (show->state.depth > show->state.depth_to_show) \ |
1176 | show->state.depth_to_show = show->state.depth; \ |
1177 | } while (0) |
1178 | |
1179 | /* How much is left to copy to safe buffer after @data? */ |
1180 | static int btf_show_obj_size_left(struct btf_show *show, void *data) |
1181 | { |
1182 | return show->obj.head + show->obj.size - data; |
1183 | } |
1184 | |
1185 | /* Is object pointed to by @data of @size already copied to our safe buffer? */ |
1186 | static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size) |
1187 | { |
1188 | return data >= show->obj.data && |
1189 | (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE); |
1190 | } |
1191 | |
1192 | /* |
1193 | * If object pointed to by @data of @size falls within our safe buffer, return |
1194 | * the equivalent pointer to the same safe data. Assumes |
1195 | * copy_from_kernel_nofault() has already happened and our safe buffer is |
1196 | * populated. |
1197 | */ |
1198 | static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size) |
1199 | { |
1200 | if (btf_show_obj_is_safe(show, data, size)) |
1201 | return show->obj.safe + (data - show->obj.data); |
1202 | return NULL; |
1203 | } |
1204 | |
1205 | /* |
1206 | * Return a safe-to-access version of data pointed to by @data. |
1207 | * We do this by copying the relevant amount of information |
1208 | * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault(). |
1209 | * |
1210 | * If BTF_SHOW_UNSAFE is specified, just return data as-is; no |
1211 | * safe copy is needed. |
1212 | * |
1213 | * Otherwise we need to determine if we have the required amount |
1214 | * of data (determined by the @data pointer and the size of the |
1215 | * largest base type we can encounter (represented by |
1216 | * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures |
1217 | * that we will be able to print some of the current object, |
1218 | * and if more is needed a copy will be triggered. |
1219 | * Some objects such as structs will not fit into the buffer; |
1220 | * in such cases additional copies when we iterate over their |
1221 | * members may be needed. |
1222 | * |
1223 | * btf_show_obj_safe() is used to return a safe buffer for |
1224 | * btf_show_start_type(); this ensures that as we recurse into |
1225 | * nested types we always have safe data for the given type. |
1226 | * This approach is somewhat wasteful; it's possible for example |
1227 | * that when iterating over a large union we'll end up copying the |
1228 | * same data repeatedly, but the goal is safety not performance. |
1229 | * We use stack data as opposed to per-CPU buffers because the |
1230 | * iteration over a type can take some time, and preemption handling |
1231 | * would greatly complicate use of the safe buffer. |
1232 | */ |
1233 | static void *btf_show_obj_safe(struct btf_show *show, |
1234 | const struct btf_type *t, |
1235 | void *data) |
1236 | { |
1237 | const struct btf_type *rt; |
1238 | int size_left, size; |
1239 | void *safe = NULL; |
1240 | |
1241 | if (show->flags & BTF_SHOW_UNSAFE) |
1242 | return data; |
1243 | |
1244 | rt = btf_resolve_size(btf: show->btf, type: t, type_size: &size); |
1245 | if (IS_ERR(ptr: rt)) { |
1246 | show->state.status = PTR_ERR(ptr: rt); |
1247 | return NULL; |
1248 | } |
1249 | |
1250 | /* |
1251 | * Is this toplevel object? If so, set total object size and |
1252 | * initialize pointers. Otherwise check if we still fall within |
1253 | * our safe object data. |
1254 | */ |
1255 | if (show->state.depth == 0) { |
1256 | show->obj.size = size; |
1257 | show->obj.head = data; |
1258 | } else { |
1259 | /* |
1260 | * If the size of the current object is > our remaining |
1261 | * safe buffer we _may_ need to do a new copy. However |
1262 | * consider the case of a nested struct; it's size pushes |
1263 | * us over the safe buffer limit, but showing any individual |
1264 | * struct members does not. In such cases, we don't need |
1265 | * to initiate a fresh copy yet; however we definitely need |
1266 | * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left |
1267 | * in our buffer, regardless of the current object size. |
1268 | * The logic here is that as we resolve types we will |
1269 | * hit a base type at some point, and we need to be sure |
1270 | * the next chunk of data is safely available to display |
1271 | * that type info safely. We cannot rely on the size of |
1272 | * the current object here because it may be much larger |
1273 | * than our current buffer (e.g. task_struct is 8k). |
1274 | * All we want to do here is ensure that we can print the |
1275 | * next basic type, which we can if either |
1276 | * - the current type size is within the safe buffer; or |
1277 | * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in |
1278 | * the safe buffer. |
1279 | */ |
1280 | safe = __btf_show_obj_safe(show, data, |
1281 | min(size, |
1282 | BTF_SHOW_OBJ_BASE_TYPE_SIZE)); |
1283 | } |
1284 | |
1285 | /* |
1286 | * We need a new copy to our safe object, either because we haven't |
1287 | * yet copied and are initializing safe data, or because the data |
1288 | * we want falls outside the boundaries of the safe object. |
1289 | */ |
1290 | if (!safe) { |
1291 | size_left = btf_show_obj_size_left(show, data); |
1292 | if (size_left > BTF_SHOW_OBJ_SAFE_SIZE) |
1293 | size_left = BTF_SHOW_OBJ_SAFE_SIZE; |
1294 | show->state.status = copy_from_kernel_nofault(dst: show->obj.safe, |
1295 | src: data, size: size_left); |
1296 | if (!show->state.status) { |
1297 | show->obj.data = data; |
1298 | safe = show->obj.safe; |
1299 | } |
1300 | } |
1301 | |
1302 | return safe; |
1303 | } |
1304 | |
1305 | /* |
1306 | * Set the type we are starting to show and return a safe data pointer |
1307 | * to be used for showing the associated data. |
1308 | */ |
1309 | static void *btf_show_start_type(struct btf_show *show, |
1310 | const struct btf_type *t, |
1311 | u32 type_id, void *data) |
1312 | { |
1313 | show->state.type = t; |
1314 | show->state.type_id = type_id; |
1315 | show->state.name[0] = '\0'; |
1316 | |
1317 | return btf_show_obj_safe(show, t, data); |
1318 | } |
1319 | |
1320 | static void btf_show_end_type(struct btf_show *show) |
1321 | { |
1322 | show->state.type = NULL; |
1323 | show->state.type_id = 0; |
1324 | show->state.name[0] = '\0'; |
1325 | } |
1326 | |
1327 | static void *btf_show_start_aggr_type(struct btf_show *show, |
1328 | const struct btf_type *t, |
1329 | u32 type_id, void *data) |
1330 | { |
1331 | void *safe_data = btf_show_start_type(show, t, type_id, data); |
1332 | |
1333 | if (!safe_data) |
1334 | return safe_data; |
1335 | |
1336 | btf_show(show, fmt: "%s%s%s" , btf_show_indent(show), |
1337 | btf_show_name(show), |
1338 | btf_show_newline(show)); |
1339 | show->state.depth++; |
1340 | return safe_data; |
1341 | } |
1342 | |
1343 | static void btf_show_end_aggr_type(struct btf_show *show, |
1344 | const char *suffix) |
1345 | { |
1346 | show->state.depth--; |
1347 | btf_show(show, fmt: "%s%s%s%s" , btf_show_indent(show), suffix, |
1348 | btf_show_delim(show), btf_show_newline(show)); |
1349 | btf_show_end_type(show); |
1350 | } |
1351 | |
1352 | static void btf_show_start_member(struct btf_show *show, |
1353 | const struct btf_member *m) |
1354 | { |
1355 | show->state.member = m; |
1356 | } |
1357 | |
1358 | static void btf_show_start_array_member(struct btf_show *show) |
1359 | { |
1360 | show->state.array_member = 1; |
1361 | btf_show_start_member(show, NULL); |
1362 | } |
1363 | |
1364 | static void btf_show_end_member(struct btf_show *show) |
1365 | { |
1366 | show->state.member = NULL; |
1367 | } |
1368 | |
1369 | static void btf_show_end_array_member(struct btf_show *show) |
1370 | { |
1371 | show->state.array_member = 0; |
1372 | btf_show_end_member(show); |
1373 | } |
1374 | |
1375 | static void *btf_show_start_array_type(struct btf_show *show, |
1376 | const struct btf_type *t, |
1377 | u32 type_id, |
1378 | u16 array_encoding, |
1379 | void *data) |
1380 | { |
1381 | show->state.array_encoding = array_encoding; |
1382 | show->state.array_terminated = 0; |
1383 | return btf_show_start_aggr_type(show, t, type_id, data); |
1384 | } |
1385 | |
1386 | static void btf_show_end_array_type(struct btf_show *show) |
1387 | { |
1388 | show->state.array_encoding = 0; |
1389 | show->state.array_terminated = 0; |
1390 | btf_show_end_aggr_type(show, suffix: "]" ); |
1391 | } |
1392 | |
1393 | static void *btf_show_start_struct_type(struct btf_show *show, |
1394 | const struct btf_type *t, |
1395 | u32 type_id, |
1396 | void *data) |
1397 | { |
1398 | return btf_show_start_aggr_type(show, t, type_id, data); |
1399 | } |
1400 | |
1401 | static void btf_show_end_struct_type(struct btf_show *show) |
1402 | { |
1403 | btf_show_end_aggr_type(show, suffix: "}" ); |
1404 | } |
1405 | |
1406 | __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log, |
1407 | const char *fmt, ...) |
1408 | { |
1409 | va_list args; |
1410 | |
1411 | va_start(args, fmt); |
1412 | bpf_verifier_vlog(log, fmt, args); |
1413 | va_end(args); |
1414 | } |
1415 | |
1416 | __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env, |
1417 | const char *fmt, ...) |
1418 | { |
1419 | struct bpf_verifier_log *log = &env->log; |
1420 | va_list args; |
1421 | |
1422 | if (!bpf_verifier_log_needed(log)) |
1423 | return; |
1424 | |
1425 | va_start(args, fmt); |
1426 | bpf_verifier_vlog(log, fmt, args); |
1427 | va_end(args); |
1428 | } |
1429 | |
1430 | __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env, |
1431 | const struct btf_type *t, |
1432 | bool log_details, |
1433 | const char *fmt, ...) |
1434 | { |
1435 | struct bpf_verifier_log *log = &env->log; |
1436 | struct btf *btf = env->btf; |
1437 | va_list args; |
1438 | |
1439 | if (!bpf_verifier_log_needed(log)) |
1440 | return; |
1441 | |
1442 | if (log->level == BPF_LOG_KERNEL) { |
1443 | /* btf verifier prints all types it is processing via |
1444 | * btf_verifier_log_type(..., fmt = NULL). |
1445 | * Skip those prints for in-kernel BTF verification. |
1446 | */ |
1447 | if (!fmt) |
1448 | return; |
1449 | |
1450 | /* Skip logging when loading module BTF with mismatches permitted */ |
1451 | if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) |
1452 | return; |
1453 | } |
1454 | |
1455 | __btf_verifier_log(log, fmt: "[%u] %s %s%s" , |
1456 | env->log_type_id, |
1457 | btf_type_str(t), |
1458 | __btf_name_by_offset(btf, offset: t->name_off), |
1459 | log_details ? " " : "" ); |
1460 | |
1461 | if (log_details) |
1462 | btf_type_ops(t)->log_details(env, t); |
1463 | |
1464 | if (fmt && *fmt) { |
1465 | __btf_verifier_log(log, fmt: " " ); |
1466 | va_start(args, fmt); |
1467 | bpf_verifier_vlog(log, fmt, args); |
1468 | va_end(args); |
1469 | } |
1470 | |
1471 | __btf_verifier_log(log, fmt: "\n" ); |
1472 | } |
1473 | |
1474 | #define btf_verifier_log_type(env, t, ...) \ |
1475 | __btf_verifier_log_type((env), (t), true, __VA_ARGS__) |
1476 | #define btf_verifier_log_basic(env, t, ...) \ |
1477 | __btf_verifier_log_type((env), (t), false, __VA_ARGS__) |
1478 | |
1479 | __printf(4, 5) |
1480 | static void btf_verifier_log_member(struct btf_verifier_env *env, |
1481 | const struct btf_type *struct_type, |
1482 | const struct btf_member *member, |
1483 | const char *fmt, ...) |
1484 | { |
1485 | struct bpf_verifier_log *log = &env->log; |
1486 | struct btf *btf = env->btf; |
1487 | va_list args; |
1488 | |
1489 | if (!bpf_verifier_log_needed(log)) |
1490 | return; |
1491 | |
1492 | if (log->level == BPF_LOG_KERNEL) { |
1493 | if (!fmt) |
1494 | return; |
1495 | |
1496 | /* Skip logging when loading module BTF with mismatches permitted */ |
1497 | if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) |
1498 | return; |
1499 | } |
1500 | |
1501 | /* The CHECK_META phase already did a btf dump. |
1502 | * |
1503 | * If member is logged again, it must hit an error in |
1504 | * parsing this member. It is useful to print out which |
1505 | * struct this member belongs to. |
1506 | */ |
1507 | if (env->phase != CHECK_META) |
1508 | btf_verifier_log_type(env, struct_type, NULL); |
1509 | |
1510 | if (btf_type_kflag(t: struct_type)) |
1511 | __btf_verifier_log(log, |
1512 | fmt: "\t%s type_id=%u bitfield_size=%u bits_offset=%u" , |
1513 | __btf_name_by_offset(btf, offset: member->name_off), |
1514 | member->type, |
1515 | BTF_MEMBER_BITFIELD_SIZE(member->offset), |
1516 | BTF_MEMBER_BIT_OFFSET(member->offset)); |
1517 | else |
1518 | __btf_verifier_log(log, fmt: "\t%s type_id=%u bits_offset=%u" , |
1519 | __btf_name_by_offset(btf, offset: member->name_off), |
1520 | member->type, member->offset); |
1521 | |
1522 | if (fmt && *fmt) { |
1523 | __btf_verifier_log(log, fmt: " " ); |
1524 | va_start(args, fmt); |
1525 | bpf_verifier_vlog(log, fmt, args); |
1526 | va_end(args); |
1527 | } |
1528 | |
1529 | __btf_verifier_log(log, fmt: "\n" ); |
1530 | } |
1531 | |
1532 | __printf(4, 5) |
1533 | static void btf_verifier_log_vsi(struct btf_verifier_env *env, |
1534 | const struct btf_type *datasec_type, |
1535 | const struct btf_var_secinfo *vsi, |
1536 | const char *fmt, ...) |
1537 | { |
1538 | struct bpf_verifier_log *log = &env->log; |
1539 | va_list args; |
1540 | |
1541 | if (!bpf_verifier_log_needed(log)) |
1542 | return; |
1543 | if (log->level == BPF_LOG_KERNEL && !fmt) |
1544 | return; |
1545 | if (env->phase != CHECK_META) |
1546 | btf_verifier_log_type(env, datasec_type, NULL); |
1547 | |
1548 | __btf_verifier_log(log, fmt: "\t type_id=%u offset=%u size=%u" , |
1549 | vsi->type, vsi->offset, vsi->size); |
1550 | if (fmt && *fmt) { |
1551 | __btf_verifier_log(log, fmt: " " ); |
1552 | va_start(args, fmt); |
1553 | bpf_verifier_vlog(log, fmt, args); |
1554 | va_end(args); |
1555 | } |
1556 | |
1557 | __btf_verifier_log(log, fmt: "\n" ); |
1558 | } |
1559 | |
1560 | static void btf_verifier_log_hdr(struct btf_verifier_env *env, |
1561 | u32 btf_data_size) |
1562 | { |
1563 | struct bpf_verifier_log *log = &env->log; |
1564 | const struct btf *btf = env->btf; |
1565 | const struct btf_header *hdr; |
1566 | |
1567 | if (!bpf_verifier_log_needed(log)) |
1568 | return; |
1569 | |
1570 | if (log->level == BPF_LOG_KERNEL) |
1571 | return; |
1572 | hdr = &btf->hdr; |
1573 | __btf_verifier_log(log, fmt: "magic: 0x%x\n" , hdr->magic); |
1574 | __btf_verifier_log(log, fmt: "version: %u\n" , hdr->version); |
1575 | __btf_verifier_log(log, fmt: "flags: 0x%x\n" , hdr->flags); |
1576 | __btf_verifier_log(log, fmt: "hdr_len: %u\n" , hdr->hdr_len); |
1577 | __btf_verifier_log(log, fmt: "type_off: %u\n" , hdr->type_off); |
1578 | __btf_verifier_log(log, fmt: "type_len: %u\n" , hdr->type_len); |
1579 | __btf_verifier_log(log, fmt: "str_off: %u\n" , hdr->str_off); |
1580 | __btf_verifier_log(log, fmt: "str_len: %u\n" , hdr->str_len); |
1581 | __btf_verifier_log(log, fmt: "btf_total_size: %u\n" , btf_data_size); |
1582 | } |
1583 | |
1584 | static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t) |
1585 | { |
1586 | struct btf *btf = env->btf; |
1587 | |
1588 | if (btf->types_size == btf->nr_types) { |
1589 | /* Expand 'types' array */ |
1590 | |
1591 | struct btf_type **new_types; |
1592 | u32 expand_by, new_size; |
1593 | |
1594 | if (btf->start_id + btf->types_size == BTF_MAX_TYPE) { |
1595 | btf_verifier_log(env, fmt: "Exceeded max num of types" ); |
1596 | return -E2BIG; |
1597 | } |
1598 | |
1599 | expand_by = max_t(u32, btf->types_size >> 2, 16); |
1600 | new_size = min_t(u32, BTF_MAX_TYPE, |
1601 | btf->types_size + expand_by); |
1602 | |
1603 | new_types = kvcalloc(n: new_size, size: sizeof(*new_types), |
1604 | GFP_KERNEL | __GFP_NOWARN); |
1605 | if (!new_types) |
1606 | return -ENOMEM; |
1607 | |
1608 | if (btf->nr_types == 0) { |
1609 | if (!btf->base_btf) { |
1610 | /* lazily init VOID type */ |
1611 | new_types[0] = &btf_void; |
1612 | btf->nr_types++; |
1613 | } |
1614 | } else { |
1615 | memcpy(new_types, btf->types, |
1616 | sizeof(*btf->types) * btf->nr_types); |
1617 | } |
1618 | |
1619 | kvfree(addr: btf->types); |
1620 | btf->types = new_types; |
1621 | btf->types_size = new_size; |
1622 | } |
1623 | |
1624 | btf->types[btf->nr_types++] = t; |
1625 | |
1626 | return 0; |
1627 | } |
1628 | |
1629 | static int btf_alloc_id(struct btf *btf) |
1630 | { |
1631 | int id; |
1632 | |
1633 | idr_preload(GFP_KERNEL); |
1634 | spin_lock_bh(lock: &btf_idr_lock); |
1635 | id = idr_alloc_cyclic(&btf_idr, ptr: btf, start: 1, INT_MAX, GFP_ATOMIC); |
1636 | if (id > 0) |
1637 | btf->id = id; |
1638 | spin_unlock_bh(lock: &btf_idr_lock); |
1639 | idr_preload_end(); |
1640 | |
1641 | if (WARN_ON_ONCE(!id)) |
1642 | return -ENOSPC; |
1643 | |
1644 | return id > 0 ? 0 : id; |
1645 | } |
1646 | |
1647 | static void btf_free_id(struct btf *btf) |
1648 | { |
1649 | unsigned long flags; |
1650 | |
1651 | /* |
1652 | * In map-in-map, calling map_delete_elem() on outer |
1653 | * map will call bpf_map_put on the inner map. |
1654 | * It will then eventually call btf_free_id() |
1655 | * on the inner map. Some of the map_delete_elem() |
1656 | * implementation may have irq disabled, so |
1657 | * we need to use the _irqsave() version instead |
1658 | * of the _bh() version. |
1659 | */ |
1660 | spin_lock_irqsave(&btf_idr_lock, flags); |
1661 | idr_remove(&btf_idr, id: btf->id); |
1662 | spin_unlock_irqrestore(lock: &btf_idr_lock, flags); |
1663 | } |
1664 | |
1665 | static void btf_free_kfunc_set_tab(struct btf *btf) |
1666 | { |
1667 | struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab; |
1668 | int hook; |
1669 | |
1670 | if (!tab) |
1671 | return; |
1672 | /* For module BTF, we directly assign the sets being registered, so |
1673 | * there is nothing to free except kfunc_set_tab. |
1674 | */ |
1675 | if (btf_is_module(btf)) |
1676 | goto free_tab; |
1677 | for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++) |
1678 | kfree(objp: tab->sets[hook]); |
1679 | free_tab: |
1680 | kfree(objp: tab); |
1681 | btf->kfunc_set_tab = NULL; |
1682 | } |
1683 | |
1684 | static void btf_free_dtor_kfunc_tab(struct btf *btf) |
1685 | { |
1686 | struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab; |
1687 | |
1688 | if (!tab) |
1689 | return; |
1690 | kfree(objp: tab); |
1691 | btf->dtor_kfunc_tab = NULL; |
1692 | } |
1693 | |
1694 | static void btf_struct_metas_free(struct btf_struct_metas *tab) |
1695 | { |
1696 | int i; |
1697 | |
1698 | if (!tab) |
1699 | return; |
1700 | for (i = 0; i < tab->cnt; i++) |
1701 | btf_record_free(rec: tab->types[i].record); |
1702 | kfree(objp: tab); |
1703 | } |
1704 | |
1705 | static void btf_free_struct_meta_tab(struct btf *btf) |
1706 | { |
1707 | struct btf_struct_metas *tab = btf->struct_meta_tab; |
1708 | |
1709 | btf_struct_metas_free(tab); |
1710 | btf->struct_meta_tab = NULL; |
1711 | } |
1712 | |
1713 | static void btf_free_struct_ops_tab(struct btf *btf) |
1714 | { |
1715 | struct btf_struct_ops_tab *tab = btf->struct_ops_tab; |
1716 | u32 i; |
1717 | |
1718 | if (!tab) |
1719 | return; |
1720 | |
1721 | for (i = 0; i < tab->cnt; i++) |
1722 | bpf_struct_ops_desc_release(st_ops_desc: &tab->ops[i]); |
1723 | |
1724 | kfree(objp: tab); |
1725 | btf->struct_ops_tab = NULL; |
1726 | } |
1727 | |
1728 | static void btf_free(struct btf *btf) |
1729 | { |
1730 | btf_free_struct_meta_tab(btf); |
1731 | btf_free_dtor_kfunc_tab(btf); |
1732 | btf_free_kfunc_set_tab(btf); |
1733 | btf_free_struct_ops_tab(btf); |
1734 | kvfree(addr: btf->types); |
1735 | kvfree(addr: btf->resolved_sizes); |
1736 | kvfree(addr: btf->resolved_ids); |
1737 | kvfree(addr: btf->data); |
1738 | kfree(objp: btf); |
1739 | } |
1740 | |
1741 | static void btf_free_rcu(struct rcu_head *rcu) |
1742 | { |
1743 | struct btf *btf = container_of(rcu, struct btf, rcu); |
1744 | |
1745 | btf_free(btf); |
1746 | } |
1747 | |
1748 | const char *btf_get_name(const struct btf *btf) |
1749 | { |
1750 | return btf->name; |
1751 | } |
1752 | |
1753 | void btf_get(struct btf *btf) |
1754 | { |
1755 | refcount_inc(r: &btf->refcnt); |
1756 | } |
1757 | |
1758 | void btf_put(struct btf *btf) |
1759 | { |
1760 | if (btf && refcount_dec_and_test(r: &btf->refcnt)) { |
1761 | btf_free_id(btf); |
1762 | call_rcu(head: &btf->rcu, func: btf_free_rcu); |
1763 | } |
1764 | } |
1765 | |
1766 | static int env_resolve_init(struct btf_verifier_env *env) |
1767 | { |
1768 | struct btf *btf = env->btf; |
1769 | u32 nr_types = btf->nr_types; |
1770 | u32 *resolved_sizes = NULL; |
1771 | u32 *resolved_ids = NULL; |
1772 | u8 *visit_states = NULL; |
1773 | |
1774 | resolved_sizes = kvcalloc(n: nr_types, size: sizeof(*resolved_sizes), |
1775 | GFP_KERNEL | __GFP_NOWARN); |
1776 | if (!resolved_sizes) |
1777 | goto nomem; |
1778 | |
1779 | resolved_ids = kvcalloc(n: nr_types, size: sizeof(*resolved_ids), |
1780 | GFP_KERNEL | __GFP_NOWARN); |
1781 | if (!resolved_ids) |
1782 | goto nomem; |
1783 | |
1784 | visit_states = kvcalloc(n: nr_types, size: sizeof(*visit_states), |
1785 | GFP_KERNEL | __GFP_NOWARN); |
1786 | if (!visit_states) |
1787 | goto nomem; |
1788 | |
1789 | btf->resolved_sizes = resolved_sizes; |
1790 | btf->resolved_ids = resolved_ids; |
1791 | env->visit_states = visit_states; |
1792 | |
1793 | return 0; |
1794 | |
1795 | nomem: |
1796 | kvfree(addr: resolved_sizes); |
1797 | kvfree(addr: resolved_ids); |
1798 | kvfree(addr: visit_states); |
1799 | return -ENOMEM; |
1800 | } |
1801 | |
1802 | static void btf_verifier_env_free(struct btf_verifier_env *env) |
1803 | { |
1804 | kvfree(addr: env->visit_states); |
1805 | kfree(objp: env); |
1806 | } |
1807 | |
1808 | static bool env_type_is_resolve_sink(const struct btf_verifier_env *env, |
1809 | const struct btf_type *next_type) |
1810 | { |
1811 | switch (env->resolve_mode) { |
1812 | case RESOLVE_TBD: |
1813 | /* int, enum or void is a sink */ |
1814 | return !btf_type_needs_resolve(t: next_type); |
1815 | case RESOLVE_PTR: |
1816 | /* int, enum, void, struct, array, func or func_proto is a sink |
1817 | * for ptr |
1818 | */ |
1819 | return !btf_type_is_modifier(t: next_type) && |
1820 | !btf_type_is_ptr(t: next_type); |
1821 | case RESOLVE_STRUCT_OR_ARRAY: |
1822 | /* int, enum, void, ptr, func or func_proto is a sink |
1823 | * for struct and array |
1824 | */ |
1825 | return !btf_type_is_modifier(t: next_type) && |
1826 | !btf_type_is_array(t: next_type) && |
1827 | !btf_type_is_struct(t: next_type); |
1828 | default: |
1829 | BUG(); |
1830 | } |
1831 | } |
1832 | |
1833 | static bool env_type_is_resolved(const struct btf_verifier_env *env, |
1834 | u32 type_id) |
1835 | { |
1836 | /* base BTF types should be resolved by now */ |
1837 | if (type_id < env->btf->start_id) |
1838 | return true; |
1839 | |
1840 | return env->visit_states[type_id - env->btf->start_id] == RESOLVED; |
1841 | } |
1842 | |
1843 | static int env_stack_push(struct btf_verifier_env *env, |
1844 | const struct btf_type *t, u32 type_id) |
1845 | { |
1846 | const struct btf *btf = env->btf; |
1847 | struct resolve_vertex *v; |
1848 | |
1849 | if (env->top_stack == MAX_RESOLVE_DEPTH) |
1850 | return -E2BIG; |
1851 | |
1852 | if (type_id < btf->start_id |
1853 | || env->visit_states[type_id - btf->start_id] != NOT_VISITED) |
1854 | return -EEXIST; |
1855 | |
1856 | env->visit_states[type_id - btf->start_id] = VISITED; |
1857 | |
1858 | v = &env->stack[env->top_stack++]; |
1859 | v->t = t; |
1860 | v->type_id = type_id; |
1861 | v->next_member = 0; |
1862 | |
1863 | if (env->resolve_mode == RESOLVE_TBD) { |
1864 | if (btf_type_is_ptr(t)) |
1865 | env->resolve_mode = RESOLVE_PTR; |
1866 | else if (btf_type_is_struct(t) || btf_type_is_array(t)) |
1867 | env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY; |
1868 | } |
1869 | |
1870 | return 0; |
1871 | } |
1872 | |
1873 | static void env_stack_set_next_member(struct btf_verifier_env *env, |
1874 | u16 next_member) |
1875 | { |
1876 | env->stack[env->top_stack - 1].next_member = next_member; |
1877 | } |
1878 | |
1879 | static void env_stack_pop_resolved(struct btf_verifier_env *env, |
1880 | u32 resolved_type_id, |
1881 | u32 resolved_size) |
1882 | { |
1883 | u32 type_id = env->stack[--(env->top_stack)].type_id; |
1884 | struct btf *btf = env->btf; |
1885 | |
1886 | type_id -= btf->start_id; /* adjust to local type id */ |
1887 | btf->resolved_sizes[type_id] = resolved_size; |
1888 | btf->resolved_ids[type_id] = resolved_type_id; |
1889 | env->visit_states[type_id] = RESOLVED; |
1890 | } |
1891 | |
1892 | static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env) |
1893 | { |
1894 | return env->top_stack ? &env->stack[env->top_stack - 1] : NULL; |
1895 | } |
1896 | |
1897 | /* Resolve the size of a passed-in "type" |
1898 | * |
1899 | * type: is an array (e.g. u32 array[x][y]) |
1900 | * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY, |
1901 | * *type_size: (x * y * sizeof(u32)). Hence, *type_size always |
1902 | * corresponds to the return type. |
1903 | * *elem_type: u32 |
1904 | * *elem_id: id of u32 |
1905 | * *total_nelems: (x * y). Hence, individual elem size is |
1906 | * (*type_size / *total_nelems) |
1907 | * *type_id: id of type if it's changed within the function, 0 if not |
1908 | * |
1909 | * type: is not an array (e.g. const struct X) |
1910 | * return type: type "struct X" |
1911 | * *type_size: sizeof(struct X) |
1912 | * *elem_type: same as return type ("struct X") |
1913 | * *elem_id: 0 |
1914 | * *total_nelems: 1 |
1915 | * *type_id: id of type if it's changed within the function, 0 if not |
1916 | */ |
1917 | static const struct btf_type * |
1918 | __btf_resolve_size(const struct btf *btf, const struct btf_type *type, |
1919 | u32 *type_size, const struct btf_type **elem_type, |
1920 | u32 *elem_id, u32 *total_nelems, u32 *type_id) |
1921 | { |
1922 | const struct btf_type *array_type = NULL; |
1923 | const struct btf_array *array = NULL; |
1924 | u32 i, size, nelems = 1, id = 0; |
1925 | |
1926 | for (i = 0; i < MAX_RESOLVE_DEPTH; i++) { |
1927 | switch (BTF_INFO_KIND(type->info)) { |
1928 | /* type->size can be used */ |
1929 | case BTF_KIND_INT: |
1930 | case BTF_KIND_STRUCT: |
1931 | case BTF_KIND_UNION: |
1932 | case BTF_KIND_ENUM: |
1933 | case BTF_KIND_FLOAT: |
1934 | case BTF_KIND_ENUM64: |
1935 | size = type->size; |
1936 | goto resolved; |
1937 | |
1938 | case BTF_KIND_PTR: |
1939 | size = sizeof(void *); |
1940 | goto resolved; |
1941 | |
1942 | /* Modifiers */ |
1943 | case BTF_KIND_TYPEDEF: |
1944 | case BTF_KIND_VOLATILE: |
1945 | case BTF_KIND_CONST: |
1946 | case BTF_KIND_RESTRICT: |
1947 | case BTF_KIND_TYPE_TAG: |
1948 | id = type->type; |
1949 | type = btf_type_by_id(btf, type->type); |
1950 | break; |
1951 | |
1952 | case BTF_KIND_ARRAY: |
1953 | if (!array_type) |
1954 | array_type = type; |
1955 | array = btf_type_array(t: type); |
1956 | if (nelems && array->nelems > U32_MAX / nelems) |
1957 | return ERR_PTR(error: -EINVAL); |
1958 | nelems *= array->nelems; |
1959 | type = btf_type_by_id(btf, array->type); |
1960 | break; |
1961 | |
1962 | /* type without size */ |
1963 | default: |
1964 | return ERR_PTR(error: -EINVAL); |
1965 | } |
1966 | } |
1967 | |
1968 | return ERR_PTR(error: -EINVAL); |
1969 | |
1970 | resolved: |
1971 | if (nelems && size > U32_MAX / nelems) |
1972 | return ERR_PTR(error: -EINVAL); |
1973 | |
1974 | *type_size = nelems * size; |
1975 | if (total_nelems) |
1976 | *total_nelems = nelems; |
1977 | if (elem_type) |
1978 | *elem_type = type; |
1979 | if (elem_id) |
1980 | *elem_id = array ? array->type : 0; |
1981 | if (type_id && id) |
1982 | *type_id = id; |
1983 | |
1984 | return array_type ? : type; |
1985 | } |
1986 | |
1987 | const struct btf_type * |
1988 | btf_resolve_size(const struct btf *btf, const struct btf_type *type, |
1989 | u32 *type_size) |
1990 | { |
1991 | return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL); |
1992 | } |
1993 | |
1994 | static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id) |
1995 | { |
1996 | while (type_id < btf->start_id) |
1997 | btf = btf->base_btf; |
1998 | |
1999 | return btf->resolved_ids[type_id - btf->start_id]; |
2000 | } |
2001 | |
2002 | /* The input param "type_id" must point to a needs_resolve type */ |
2003 | static const struct btf_type *btf_type_id_resolve(const struct btf *btf, |
2004 | u32 *type_id) |
2005 | { |
2006 | *type_id = btf_resolved_type_id(btf, type_id: *type_id); |
2007 | return btf_type_by_id(btf, *type_id); |
2008 | } |
2009 | |
2010 | static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id) |
2011 | { |
2012 | while (type_id < btf->start_id) |
2013 | btf = btf->base_btf; |
2014 | |
2015 | return btf->resolved_sizes[type_id - btf->start_id]; |
2016 | } |
2017 | |
2018 | const struct btf_type *btf_type_id_size(const struct btf *btf, |
2019 | u32 *type_id, u32 *ret_size) |
2020 | { |
2021 | const struct btf_type *size_type; |
2022 | u32 size_type_id = *type_id; |
2023 | u32 size = 0; |
2024 | |
2025 | size_type = btf_type_by_id(btf, size_type_id); |
2026 | if (btf_type_nosize_or_null(t: size_type)) |
2027 | return NULL; |
2028 | |
2029 | if (btf_type_has_size(t: size_type)) { |
2030 | size = size_type->size; |
2031 | } else if (btf_type_is_array(t: size_type)) { |
2032 | size = btf_resolved_type_size(btf, type_id: size_type_id); |
2033 | } else if (btf_type_is_ptr(t: size_type)) { |
2034 | size = sizeof(void *); |
2035 | } else { |
2036 | if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) && |
2037 | !btf_type_is_var(size_type))) |
2038 | return NULL; |
2039 | |
2040 | size_type_id = btf_resolved_type_id(btf, type_id: size_type_id); |
2041 | size_type = btf_type_by_id(btf, size_type_id); |
2042 | if (btf_type_nosize_or_null(t: size_type)) |
2043 | return NULL; |
2044 | else if (btf_type_has_size(t: size_type)) |
2045 | size = size_type->size; |
2046 | else if (btf_type_is_array(t: size_type)) |
2047 | size = btf_resolved_type_size(btf, type_id: size_type_id); |
2048 | else if (btf_type_is_ptr(t: size_type)) |
2049 | size = sizeof(void *); |
2050 | else |
2051 | return NULL; |
2052 | } |
2053 | |
2054 | *type_id = size_type_id; |
2055 | if (ret_size) |
2056 | *ret_size = size; |
2057 | |
2058 | return size_type; |
2059 | } |
2060 | |
2061 | static int btf_df_check_member(struct btf_verifier_env *env, |
2062 | const struct btf_type *struct_type, |
2063 | const struct btf_member *member, |
2064 | const struct btf_type *member_type) |
2065 | { |
2066 | btf_verifier_log_basic(env, struct_type, |
2067 | "Unsupported check_member" ); |
2068 | return -EINVAL; |
2069 | } |
2070 | |
2071 | static int btf_df_check_kflag_member(struct btf_verifier_env *env, |
2072 | const struct btf_type *struct_type, |
2073 | const struct btf_member *member, |
2074 | const struct btf_type *member_type) |
2075 | { |
2076 | btf_verifier_log_basic(env, struct_type, |
2077 | "Unsupported check_kflag_member" ); |
2078 | return -EINVAL; |
2079 | } |
2080 | |
2081 | /* Used for ptr, array struct/union and float type members. |
2082 | * int, enum and modifier types have their specific callback functions. |
2083 | */ |
2084 | static int btf_generic_check_kflag_member(struct btf_verifier_env *env, |
2085 | const struct btf_type *struct_type, |
2086 | const struct btf_member *member, |
2087 | const struct btf_type *member_type) |
2088 | { |
2089 | if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) { |
2090 | btf_verifier_log_member(env, struct_type, member, |
2091 | fmt: "Invalid member bitfield_size" ); |
2092 | return -EINVAL; |
2093 | } |
2094 | |
2095 | /* bitfield size is 0, so member->offset represents bit offset only. |
2096 | * It is safe to call non kflag check_member variants. |
2097 | */ |
2098 | return btf_type_ops(t: member_type)->check_member(env, struct_type, |
2099 | member, |
2100 | member_type); |
2101 | } |
2102 | |
2103 | static int btf_df_resolve(struct btf_verifier_env *env, |
2104 | const struct resolve_vertex *v) |
2105 | { |
2106 | btf_verifier_log_basic(env, v->t, "Unsupported resolve" ); |
2107 | return -EINVAL; |
2108 | } |
2109 | |
2110 | static void btf_df_show(const struct btf *btf, const struct btf_type *t, |
2111 | u32 type_id, void *data, u8 bits_offsets, |
2112 | struct btf_show *show) |
2113 | { |
2114 | btf_show(show, fmt: "<unsupported kind:%u>" , BTF_INFO_KIND(t->info)); |
2115 | } |
2116 | |
2117 | static int btf_int_check_member(struct btf_verifier_env *env, |
2118 | const struct btf_type *struct_type, |
2119 | const struct btf_member *member, |
2120 | const struct btf_type *member_type) |
2121 | { |
2122 | u32 int_data = btf_type_int(t: member_type); |
2123 | u32 struct_bits_off = member->offset; |
2124 | u32 struct_size = struct_type->size; |
2125 | u32 nr_copy_bits; |
2126 | u32 bytes_offset; |
2127 | |
2128 | if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) { |
2129 | btf_verifier_log_member(env, struct_type, member, |
2130 | fmt: "bits_offset exceeds U32_MAX" ); |
2131 | return -EINVAL; |
2132 | } |
2133 | |
2134 | struct_bits_off += BTF_INT_OFFSET(int_data); |
2135 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
2136 | nr_copy_bits = BTF_INT_BITS(int_data) + |
2137 | BITS_PER_BYTE_MASKED(struct_bits_off); |
2138 | |
2139 | if (nr_copy_bits > BITS_PER_U128) { |
2140 | btf_verifier_log_member(env, struct_type, member, |
2141 | fmt: "nr_copy_bits exceeds 128" ); |
2142 | return -EINVAL; |
2143 | } |
2144 | |
2145 | if (struct_size < bytes_offset || |
2146 | struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { |
2147 | btf_verifier_log_member(env, struct_type, member, |
2148 | fmt: "Member exceeds struct_size" ); |
2149 | return -EINVAL; |
2150 | } |
2151 | |
2152 | return 0; |
2153 | } |
2154 | |
2155 | static int btf_int_check_kflag_member(struct btf_verifier_env *env, |
2156 | const struct btf_type *struct_type, |
2157 | const struct btf_member *member, |
2158 | const struct btf_type *member_type) |
2159 | { |
2160 | u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset; |
2161 | u32 int_data = btf_type_int(t: member_type); |
2162 | u32 struct_size = struct_type->size; |
2163 | u32 nr_copy_bits; |
2164 | |
2165 | /* a regular int type is required for the kflag int member */ |
2166 | if (!btf_type_int_is_regular(t: member_type)) { |
2167 | btf_verifier_log_member(env, struct_type, member, |
2168 | fmt: "Invalid member base type" ); |
2169 | return -EINVAL; |
2170 | } |
2171 | |
2172 | /* check sanity of bitfield size */ |
2173 | nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); |
2174 | struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); |
2175 | nr_int_data_bits = BTF_INT_BITS(int_data); |
2176 | if (!nr_bits) { |
2177 | /* Not a bitfield member, member offset must be at byte |
2178 | * boundary. |
2179 | */ |
2180 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
2181 | btf_verifier_log_member(env, struct_type, member, |
2182 | fmt: "Invalid member offset" ); |
2183 | return -EINVAL; |
2184 | } |
2185 | |
2186 | nr_bits = nr_int_data_bits; |
2187 | } else if (nr_bits > nr_int_data_bits) { |
2188 | btf_verifier_log_member(env, struct_type, member, |
2189 | fmt: "Invalid member bitfield_size" ); |
2190 | return -EINVAL; |
2191 | } |
2192 | |
2193 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
2194 | nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off); |
2195 | if (nr_copy_bits > BITS_PER_U128) { |
2196 | btf_verifier_log_member(env, struct_type, member, |
2197 | fmt: "nr_copy_bits exceeds 128" ); |
2198 | return -EINVAL; |
2199 | } |
2200 | |
2201 | if (struct_size < bytes_offset || |
2202 | struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { |
2203 | btf_verifier_log_member(env, struct_type, member, |
2204 | fmt: "Member exceeds struct_size" ); |
2205 | return -EINVAL; |
2206 | } |
2207 | |
2208 | return 0; |
2209 | } |
2210 | |
2211 | static s32 btf_int_check_meta(struct btf_verifier_env *env, |
2212 | const struct btf_type *t, |
2213 | u32 meta_left) |
2214 | { |
2215 | u32 int_data, nr_bits, meta_needed = sizeof(int_data); |
2216 | u16 encoding; |
2217 | |
2218 | if (meta_left < meta_needed) { |
2219 | btf_verifier_log_basic(env, t, |
2220 | "meta_left:%u meta_needed:%u" , |
2221 | meta_left, meta_needed); |
2222 | return -EINVAL; |
2223 | } |
2224 | |
2225 | if (btf_type_vlen(t)) { |
2226 | btf_verifier_log_type(env, t, "vlen != 0" ); |
2227 | return -EINVAL; |
2228 | } |
2229 | |
2230 | if (btf_type_kflag(t)) { |
2231 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
2232 | return -EINVAL; |
2233 | } |
2234 | |
2235 | int_data = btf_type_int(t); |
2236 | if (int_data & ~BTF_INT_MASK) { |
2237 | btf_verifier_log_basic(env, t, "Invalid int_data:%x" , |
2238 | int_data); |
2239 | return -EINVAL; |
2240 | } |
2241 | |
2242 | nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data); |
2243 | |
2244 | if (nr_bits > BITS_PER_U128) { |
2245 | btf_verifier_log_type(env, t, "nr_bits exceeds %zu" , |
2246 | BITS_PER_U128); |
2247 | return -EINVAL; |
2248 | } |
2249 | |
2250 | if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) { |
2251 | btf_verifier_log_type(env, t, "nr_bits exceeds type_size" ); |
2252 | return -EINVAL; |
2253 | } |
2254 | |
2255 | /* |
2256 | * Only one of the encoding bits is allowed and it |
2257 | * should be sufficient for the pretty print purpose (i.e. decoding). |
2258 | * Multiple bits can be allowed later if it is found |
2259 | * to be insufficient. |
2260 | */ |
2261 | encoding = BTF_INT_ENCODING(int_data); |
2262 | if (encoding && |
2263 | encoding != BTF_INT_SIGNED && |
2264 | encoding != BTF_INT_CHAR && |
2265 | encoding != BTF_INT_BOOL) { |
2266 | btf_verifier_log_type(env, t, "Unsupported encoding" ); |
2267 | return -ENOTSUPP; |
2268 | } |
2269 | |
2270 | btf_verifier_log_type(env, t, NULL); |
2271 | |
2272 | return meta_needed; |
2273 | } |
2274 | |
2275 | static void btf_int_log(struct btf_verifier_env *env, |
2276 | const struct btf_type *t) |
2277 | { |
2278 | int int_data = btf_type_int(t); |
2279 | |
2280 | btf_verifier_log(env, |
2281 | fmt: "size=%u bits_offset=%u nr_bits=%u encoding=%s" , |
2282 | t->size, BTF_INT_OFFSET(int_data), |
2283 | BTF_INT_BITS(int_data), |
2284 | btf_int_encoding_str(BTF_INT_ENCODING(int_data))); |
2285 | } |
2286 | |
2287 | static void btf_int128_print(struct btf_show *show, void *data) |
2288 | { |
2289 | /* data points to a __int128 number. |
2290 | * Suppose |
2291 | * int128_num = *(__int128 *)data; |
2292 | * The below formulas shows what upper_num and lower_num represents: |
2293 | * upper_num = int128_num >> 64; |
2294 | * lower_num = int128_num & 0xffffffffFFFFFFFFULL; |
2295 | */ |
2296 | u64 upper_num, lower_num; |
2297 | |
2298 | #ifdef __BIG_ENDIAN_BITFIELD |
2299 | upper_num = *(u64 *)data; |
2300 | lower_num = *(u64 *)(data + 8); |
2301 | #else |
2302 | upper_num = *(u64 *)(data + 8); |
2303 | lower_num = *(u64 *)data; |
2304 | #endif |
2305 | if (upper_num == 0) |
2306 | btf_show_type_value(show, "0x%llx" , lower_num); |
2307 | else |
2308 | btf_show_type_values(show, "0x%llx%016llx" , upper_num, |
2309 | lower_num); |
2310 | } |
2311 | |
2312 | static void btf_int128_shift(u64 *print_num, u16 left_shift_bits, |
2313 | u16 right_shift_bits) |
2314 | { |
2315 | u64 upper_num, lower_num; |
2316 | |
2317 | #ifdef __BIG_ENDIAN_BITFIELD |
2318 | upper_num = print_num[0]; |
2319 | lower_num = print_num[1]; |
2320 | #else |
2321 | upper_num = print_num[1]; |
2322 | lower_num = print_num[0]; |
2323 | #endif |
2324 | |
2325 | /* shake out un-needed bits by shift/or operations */ |
2326 | if (left_shift_bits >= 64) { |
2327 | upper_num = lower_num << (left_shift_bits - 64); |
2328 | lower_num = 0; |
2329 | } else { |
2330 | upper_num = (upper_num << left_shift_bits) | |
2331 | (lower_num >> (64 - left_shift_bits)); |
2332 | lower_num = lower_num << left_shift_bits; |
2333 | } |
2334 | |
2335 | if (right_shift_bits >= 64) { |
2336 | lower_num = upper_num >> (right_shift_bits - 64); |
2337 | upper_num = 0; |
2338 | } else { |
2339 | lower_num = (lower_num >> right_shift_bits) | |
2340 | (upper_num << (64 - right_shift_bits)); |
2341 | upper_num = upper_num >> right_shift_bits; |
2342 | } |
2343 | |
2344 | #ifdef __BIG_ENDIAN_BITFIELD |
2345 | print_num[0] = upper_num; |
2346 | print_num[1] = lower_num; |
2347 | #else |
2348 | print_num[0] = lower_num; |
2349 | print_num[1] = upper_num; |
2350 | #endif |
2351 | } |
2352 | |
2353 | static void btf_bitfield_show(void *data, u8 bits_offset, |
2354 | u8 nr_bits, struct btf_show *show) |
2355 | { |
2356 | u16 left_shift_bits, right_shift_bits; |
2357 | u8 nr_copy_bytes; |
2358 | u8 nr_copy_bits; |
2359 | u64 print_num[2] = {}; |
2360 | |
2361 | nr_copy_bits = nr_bits + bits_offset; |
2362 | nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); |
2363 | |
2364 | memcpy(print_num, data, nr_copy_bytes); |
2365 | |
2366 | #ifdef __BIG_ENDIAN_BITFIELD |
2367 | left_shift_bits = bits_offset; |
2368 | #else |
2369 | left_shift_bits = BITS_PER_U128 - nr_copy_bits; |
2370 | #endif |
2371 | right_shift_bits = BITS_PER_U128 - nr_bits; |
2372 | |
2373 | btf_int128_shift(print_num, left_shift_bits, right_shift_bits); |
2374 | btf_int128_print(show, data: print_num); |
2375 | } |
2376 | |
2377 | |
2378 | static void btf_int_bits_show(const struct btf *btf, |
2379 | const struct btf_type *t, |
2380 | void *data, u8 bits_offset, |
2381 | struct btf_show *show) |
2382 | { |
2383 | u32 int_data = btf_type_int(t); |
2384 | u8 nr_bits = BTF_INT_BITS(int_data); |
2385 | u8 total_bits_offset; |
2386 | |
2387 | /* |
2388 | * bits_offset is at most 7. |
2389 | * BTF_INT_OFFSET() cannot exceed 128 bits. |
2390 | */ |
2391 | total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); |
2392 | data += BITS_ROUNDDOWN_BYTES(total_bits_offset); |
2393 | bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); |
2394 | btf_bitfield_show(data, bits_offset, nr_bits, show); |
2395 | } |
2396 | |
2397 | static void btf_int_show(const struct btf *btf, const struct btf_type *t, |
2398 | u32 type_id, void *data, u8 bits_offset, |
2399 | struct btf_show *show) |
2400 | { |
2401 | u32 int_data = btf_type_int(t); |
2402 | u8 encoding = BTF_INT_ENCODING(int_data); |
2403 | bool sign = encoding & BTF_INT_SIGNED; |
2404 | u8 nr_bits = BTF_INT_BITS(int_data); |
2405 | void *safe_data; |
2406 | |
2407 | safe_data = btf_show_start_type(show, t, type_id, data); |
2408 | if (!safe_data) |
2409 | return; |
2410 | |
2411 | if (bits_offset || BTF_INT_OFFSET(int_data) || |
2412 | BITS_PER_BYTE_MASKED(nr_bits)) { |
2413 | btf_int_bits_show(btf, t, data: safe_data, bits_offset, show); |
2414 | goto out; |
2415 | } |
2416 | |
2417 | switch (nr_bits) { |
2418 | case 128: |
2419 | btf_int128_print(show, data: safe_data); |
2420 | break; |
2421 | case 64: |
2422 | if (sign) |
2423 | btf_show_type_value(show, "%lld" , *(s64 *)safe_data); |
2424 | else |
2425 | btf_show_type_value(show, "%llu" , *(u64 *)safe_data); |
2426 | break; |
2427 | case 32: |
2428 | if (sign) |
2429 | btf_show_type_value(show, "%d" , *(s32 *)safe_data); |
2430 | else |
2431 | btf_show_type_value(show, "%u" , *(u32 *)safe_data); |
2432 | break; |
2433 | case 16: |
2434 | if (sign) |
2435 | btf_show_type_value(show, "%d" , *(s16 *)safe_data); |
2436 | else |
2437 | btf_show_type_value(show, "%u" , *(u16 *)safe_data); |
2438 | break; |
2439 | case 8: |
2440 | if (show->state.array_encoding == BTF_INT_CHAR) { |
2441 | /* check for null terminator */ |
2442 | if (show->state.array_terminated) |
2443 | break; |
2444 | if (*(char *)data == '\0') { |
2445 | show->state.array_terminated = 1; |
2446 | break; |
2447 | } |
2448 | if (isprint(*(char *)data)) { |
2449 | btf_show_type_value(show, "'%c'" , |
2450 | *(char *)safe_data); |
2451 | break; |
2452 | } |
2453 | } |
2454 | if (sign) |
2455 | btf_show_type_value(show, "%d" , *(s8 *)safe_data); |
2456 | else |
2457 | btf_show_type_value(show, "%u" , *(u8 *)safe_data); |
2458 | break; |
2459 | default: |
2460 | btf_int_bits_show(btf, t, data: safe_data, bits_offset, show); |
2461 | break; |
2462 | } |
2463 | out: |
2464 | btf_show_end_type(show); |
2465 | } |
2466 | |
2467 | static const struct btf_kind_operations int_ops = { |
2468 | .check_meta = btf_int_check_meta, |
2469 | .resolve = btf_df_resolve, |
2470 | .check_member = btf_int_check_member, |
2471 | .check_kflag_member = btf_int_check_kflag_member, |
2472 | .log_details = btf_int_log, |
2473 | .show = btf_int_show, |
2474 | }; |
2475 | |
2476 | static int btf_modifier_check_member(struct btf_verifier_env *env, |
2477 | const struct btf_type *struct_type, |
2478 | const struct btf_member *member, |
2479 | const struct btf_type *member_type) |
2480 | { |
2481 | const struct btf_type *resolved_type; |
2482 | u32 resolved_type_id = member->type; |
2483 | struct btf_member resolved_member; |
2484 | struct btf *btf = env->btf; |
2485 | |
2486 | resolved_type = btf_type_id_size(btf, type_id: &resolved_type_id, NULL); |
2487 | if (!resolved_type) { |
2488 | btf_verifier_log_member(env, struct_type, member, |
2489 | fmt: "Invalid member" ); |
2490 | return -EINVAL; |
2491 | } |
2492 | |
2493 | resolved_member = *member; |
2494 | resolved_member.type = resolved_type_id; |
2495 | |
2496 | return btf_type_ops(t: resolved_type)->check_member(env, struct_type, |
2497 | &resolved_member, |
2498 | resolved_type); |
2499 | } |
2500 | |
2501 | static int btf_modifier_check_kflag_member(struct btf_verifier_env *env, |
2502 | const struct btf_type *struct_type, |
2503 | const struct btf_member *member, |
2504 | const struct btf_type *member_type) |
2505 | { |
2506 | const struct btf_type *resolved_type; |
2507 | u32 resolved_type_id = member->type; |
2508 | struct btf_member resolved_member; |
2509 | struct btf *btf = env->btf; |
2510 | |
2511 | resolved_type = btf_type_id_size(btf, type_id: &resolved_type_id, NULL); |
2512 | if (!resolved_type) { |
2513 | btf_verifier_log_member(env, struct_type, member, |
2514 | fmt: "Invalid member" ); |
2515 | return -EINVAL; |
2516 | } |
2517 | |
2518 | resolved_member = *member; |
2519 | resolved_member.type = resolved_type_id; |
2520 | |
2521 | return btf_type_ops(t: resolved_type)->check_kflag_member(env, struct_type, |
2522 | &resolved_member, |
2523 | resolved_type); |
2524 | } |
2525 | |
2526 | static int btf_ptr_check_member(struct btf_verifier_env *env, |
2527 | const struct btf_type *struct_type, |
2528 | const struct btf_member *member, |
2529 | const struct btf_type *member_type) |
2530 | { |
2531 | u32 struct_size, struct_bits_off, bytes_offset; |
2532 | |
2533 | struct_size = struct_type->size; |
2534 | struct_bits_off = member->offset; |
2535 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
2536 | |
2537 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
2538 | btf_verifier_log_member(env, struct_type, member, |
2539 | fmt: "Member is not byte aligned" ); |
2540 | return -EINVAL; |
2541 | } |
2542 | |
2543 | if (struct_size - bytes_offset < sizeof(void *)) { |
2544 | btf_verifier_log_member(env, struct_type, member, |
2545 | fmt: "Member exceeds struct_size" ); |
2546 | return -EINVAL; |
2547 | } |
2548 | |
2549 | return 0; |
2550 | } |
2551 | |
2552 | static int btf_ref_type_check_meta(struct btf_verifier_env *env, |
2553 | const struct btf_type *t, |
2554 | u32 meta_left) |
2555 | { |
2556 | const char *value; |
2557 | |
2558 | if (btf_type_vlen(t)) { |
2559 | btf_verifier_log_type(env, t, "vlen != 0" ); |
2560 | return -EINVAL; |
2561 | } |
2562 | |
2563 | if (btf_type_kflag(t)) { |
2564 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
2565 | return -EINVAL; |
2566 | } |
2567 | |
2568 | if (!BTF_TYPE_ID_VALID(t->type)) { |
2569 | btf_verifier_log_type(env, t, "Invalid type_id" ); |
2570 | return -EINVAL; |
2571 | } |
2572 | |
2573 | /* typedef/type_tag type must have a valid name, and other ref types, |
2574 | * volatile, const, restrict, should have a null name. |
2575 | */ |
2576 | if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) { |
2577 | if (!t->name_off || |
2578 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
2579 | btf_verifier_log_type(env, t, "Invalid name" ); |
2580 | return -EINVAL; |
2581 | } |
2582 | } else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) { |
2583 | value = btf_name_by_offset(btf: env->btf, offset: t->name_off); |
2584 | if (!value || !value[0]) { |
2585 | btf_verifier_log_type(env, t, "Invalid name" ); |
2586 | return -EINVAL; |
2587 | } |
2588 | } else { |
2589 | if (t->name_off) { |
2590 | btf_verifier_log_type(env, t, "Invalid name" ); |
2591 | return -EINVAL; |
2592 | } |
2593 | } |
2594 | |
2595 | btf_verifier_log_type(env, t, NULL); |
2596 | |
2597 | return 0; |
2598 | } |
2599 | |
2600 | static int btf_modifier_resolve(struct btf_verifier_env *env, |
2601 | const struct resolve_vertex *v) |
2602 | { |
2603 | const struct btf_type *t = v->t; |
2604 | const struct btf_type *next_type; |
2605 | u32 next_type_id = t->type; |
2606 | struct btf *btf = env->btf; |
2607 | |
2608 | next_type = btf_type_by_id(btf, next_type_id); |
2609 | if (!next_type || btf_type_is_resolve_source_only(t: next_type)) { |
2610 | btf_verifier_log_type(env, v->t, "Invalid type_id" ); |
2611 | return -EINVAL; |
2612 | } |
2613 | |
2614 | if (!env_type_is_resolve_sink(env, next_type) && |
2615 | !env_type_is_resolved(env, type_id: next_type_id)) |
2616 | return env_stack_push(env, t: next_type, type_id: next_type_id); |
2617 | |
2618 | /* Figure out the resolved next_type_id with size. |
2619 | * They will be stored in the current modifier's |
2620 | * resolved_ids and resolved_sizes such that it can |
2621 | * save us a few type-following when we use it later (e.g. in |
2622 | * pretty print). |
2623 | */ |
2624 | if (!btf_type_id_size(btf, type_id: &next_type_id, NULL)) { |
2625 | if (env_type_is_resolved(env, type_id: next_type_id)) |
2626 | next_type = btf_type_id_resolve(btf, type_id: &next_type_id); |
2627 | |
2628 | /* "typedef void new_void", "const void"...etc */ |
2629 | if (!btf_type_is_void(t: next_type) && |
2630 | !btf_type_is_fwd(t: next_type) && |
2631 | !btf_type_is_func_proto(t: next_type)) { |
2632 | btf_verifier_log_type(env, v->t, "Invalid type_id" ); |
2633 | return -EINVAL; |
2634 | } |
2635 | } |
2636 | |
2637 | env_stack_pop_resolved(env, resolved_type_id: next_type_id, resolved_size: 0); |
2638 | |
2639 | return 0; |
2640 | } |
2641 | |
2642 | static int btf_var_resolve(struct btf_verifier_env *env, |
2643 | const struct resolve_vertex *v) |
2644 | { |
2645 | const struct btf_type *next_type; |
2646 | const struct btf_type *t = v->t; |
2647 | u32 next_type_id = t->type; |
2648 | struct btf *btf = env->btf; |
2649 | |
2650 | next_type = btf_type_by_id(btf, next_type_id); |
2651 | if (!next_type || btf_type_is_resolve_source_only(t: next_type)) { |
2652 | btf_verifier_log_type(env, v->t, "Invalid type_id" ); |
2653 | return -EINVAL; |
2654 | } |
2655 | |
2656 | if (!env_type_is_resolve_sink(env, next_type) && |
2657 | !env_type_is_resolved(env, type_id: next_type_id)) |
2658 | return env_stack_push(env, t: next_type, type_id: next_type_id); |
2659 | |
2660 | if (btf_type_is_modifier(t: next_type)) { |
2661 | const struct btf_type *resolved_type; |
2662 | u32 resolved_type_id; |
2663 | |
2664 | resolved_type_id = next_type_id; |
2665 | resolved_type = btf_type_id_resolve(btf, type_id: &resolved_type_id); |
2666 | |
2667 | if (btf_type_is_ptr(t: resolved_type) && |
2668 | !env_type_is_resolve_sink(env, next_type: resolved_type) && |
2669 | !env_type_is_resolved(env, type_id: resolved_type_id)) |
2670 | return env_stack_push(env, t: resolved_type, |
2671 | type_id: resolved_type_id); |
2672 | } |
2673 | |
2674 | /* We must resolve to something concrete at this point, no |
2675 | * forward types or similar that would resolve to size of |
2676 | * zero is allowed. |
2677 | */ |
2678 | if (!btf_type_id_size(btf, type_id: &next_type_id, NULL)) { |
2679 | btf_verifier_log_type(env, v->t, "Invalid type_id" ); |
2680 | return -EINVAL; |
2681 | } |
2682 | |
2683 | env_stack_pop_resolved(env, resolved_type_id: next_type_id, resolved_size: 0); |
2684 | |
2685 | return 0; |
2686 | } |
2687 | |
2688 | static int btf_ptr_resolve(struct btf_verifier_env *env, |
2689 | const struct resolve_vertex *v) |
2690 | { |
2691 | const struct btf_type *next_type; |
2692 | const struct btf_type *t = v->t; |
2693 | u32 next_type_id = t->type; |
2694 | struct btf *btf = env->btf; |
2695 | |
2696 | next_type = btf_type_by_id(btf, next_type_id); |
2697 | if (!next_type || btf_type_is_resolve_source_only(t: next_type)) { |
2698 | btf_verifier_log_type(env, v->t, "Invalid type_id" ); |
2699 | return -EINVAL; |
2700 | } |
2701 | |
2702 | if (!env_type_is_resolve_sink(env, next_type) && |
2703 | !env_type_is_resolved(env, type_id: next_type_id)) |
2704 | return env_stack_push(env, t: next_type, type_id: next_type_id); |
2705 | |
2706 | /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY, |
2707 | * the modifier may have stopped resolving when it was resolved |
2708 | * to a ptr (last-resolved-ptr). |
2709 | * |
2710 | * We now need to continue from the last-resolved-ptr to |
2711 | * ensure the last-resolved-ptr will not referring back to |
2712 | * the current ptr (t). |
2713 | */ |
2714 | if (btf_type_is_modifier(t: next_type)) { |
2715 | const struct btf_type *resolved_type; |
2716 | u32 resolved_type_id; |
2717 | |
2718 | resolved_type_id = next_type_id; |
2719 | resolved_type = btf_type_id_resolve(btf, type_id: &resolved_type_id); |
2720 | |
2721 | if (btf_type_is_ptr(t: resolved_type) && |
2722 | !env_type_is_resolve_sink(env, next_type: resolved_type) && |
2723 | !env_type_is_resolved(env, type_id: resolved_type_id)) |
2724 | return env_stack_push(env, t: resolved_type, |
2725 | type_id: resolved_type_id); |
2726 | } |
2727 | |
2728 | if (!btf_type_id_size(btf, type_id: &next_type_id, NULL)) { |
2729 | if (env_type_is_resolved(env, type_id: next_type_id)) |
2730 | next_type = btf_type_id_resolve(btf, type_id: &next_type_id); |
2731 | |
2732 | if (!btf_type_is_void(t: next_type) && |
2733 | !btf_type_is_fwd(t: next_type) && |
2734 | !btf_type_is_func_proto(t: next_type)) { |
2735 | btf_verifier_log_type(env, v->t, "Invalid type_id" ); |
2736 | return -EINVAL; |
2737 | } |
2738 | } |
2739 | |
2740 | env_stack_pop_resolved(env, resolved_type_id: next_type_id, resolved_size: 0); |
2741 | |
2742 | return 0; |
2743 | } |
2744 | |
2745 | static void btf_modifier_show(const struct btf *btf, |
2746 | const struct btf_type *t, |
2747 | u32 type_id, void *data, |
2748 | u8 bits_offset, struct btf_show *show) |
2749 | { |
2750 | if (btf->resolved_ids) |
2751 | t = btf_type_id_resolve(btf, type_id: &type_id); |
2752 | else |
2753 | t = btf_type_skip_modifiers(btf, id: type_id, NULL); |
2754 | |
2755 | btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show); |
2756 | } |
2757 | |
2758 | static void btf_var_show(const struct btf *btf, const struct btf_type *t, |
2759 | u32 type_id, void *data, u8 bits_offset, |
2760 | struct btf_show *show) |
2761 | { |
2762 | t = btf_type_id_resolve(btf, type_id: &type_id); |
2763 | |
2764 | btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show); |
2765 | } |
2766 | |
2767 | static void btf_ptr_show(const struct btf *btf, const struct btf_type *t, |
2768 | u32 type_id, void *data, u8 bits_offset, |
2769 | struct btf_show *show) |
2770 | { |
2771 | void *safe_data; |
2772 | |
2773 | safe_data = btf_show_start_type(show, t, type_id, data); |
2774 | if (!safe_data) |
2775 | return; |
2776 | |
2777 | /* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */ |
2778 | if (show->flags & BTF_SHOW_PTR_RAW) |
2779 | btf_show_type_value(show, "0x%px" , *(void **)safe_data); |
2780 | else |
2781 | btf_show_type_value(show, "0x%p" , *(void **)safe_data); |
2782 | btf_show_end_type(show); |
2783 | } |
2784 | |
2785 | static void btf_ref_type_log(struct btf_verifier_env *env, |
2786 | const struct btf_type *t) |
2787 | { |
2788 | btf_verifier_log(env, fmt: "type_id=%u" , t->type); |
2789 | } |
2790 | |
2791 | static struct btf_kind_operations modifier_ops = { |
2792 | .check_meta = btf_ref_type_check_meta, |
2793 | .resolve = btf_modifier_resolve, |
2794 | .check_member = btf_modifier_check_member, |
2795 | .check_kflag_member = btf_modifier_check_kflag_member, |
2796 | .log_details = btf_ref_type_log, |
2797 | .show = btf_modifier_show, |
2798 | }; |
2799 | |
2800 | static struct btf_kind_operations ptr_ops = { |
2801 | .check_meta = btf_ref_type_check_meta, |
2802 | .resolve = btf_ptr_resolve, |
2803 | .check_member = btf_ptr_check_member, |
2804 | .check_kflag_member = btf_generic_check_kflag_member, |
2805 | .log_details = btf_ref_type_log, |
2806 | .show = btf_ptr_show, |
2807 | }; |
2808 | |
2809 | static s32 btf_fwd_check_meta(struct btf_verifier_env *env, |
2810 | const struct btf_type *t, |
2811 | u32 meta_left) |
2812 | { |
2813 | if (btf_type_vlen(t)) { |
2814 | btf_verifier_log_type(env, t, "vlen != 0" ); |
2815 | return -EINVAL; |
2816 | } |
2817 | |
2818 | if (t->type) { |
2819 | btf_verifier_log_type(env, t, "type != 0" ); |
2820 | return -EINVAL; |
2821 | } |
2822 | |
2823 | /* fwd type must have a valid name */ |
2824 | if (!t->name_off || |
2825 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
2826 | btf_verifier_log_type(env, t, "Invalid name" ); |
2827 | return -EINVAL; |
2828 | } |
2829 | |
2830 | btf_verifier_log_type(env, t, NULL); |
2831 | |
2832 | return 0; |
2833 | } |
2834 | |
2835 | static void btf_fwd_type_log(struct btf_verifier_env *env, |
2836 | const struct btf_type *t) |
2837 | { |
2838 | btf_verifier_log(env, fmt: "%s" , btf_type_kflag(t) ? "union" : "struct" ); |
2839 | } |
2840 | |
2841 | static struct btf_kind_operations fwd_ops = { |
2842 | .check_meta = btf_fwd_check_meta, |
2843 | .resolve = btf_df_resolve, |
2844 | .check_member = btf_df_check_member, |
2845 | .check_kflag_member = btf_df_check_kflag_member, |
2846 | .log_details = btf_fwd_type_log, |
2847 | .show = btf_df_show, |
2848 | }; |
2849 | |
2850 | static int btf_array_check_member(struct btf_verifier_env *env, |
2851 | const struct btf_type *struct_type, |
2852 | const struct btf_member *member, |
2853 | const struct btf_type *member_type) |
2854 | { |
2855 | u32 struct_bits_off = member->offset; |
2856 | u32 struct_size, bytes_offset; |
2857 | u32 array_type_id, array_size; |
2858 | struct btf *btf = env->btf; |
2859 | |
2860 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
2861 | btf_verifier_log_member(env, struct_type, member, |
2862 | fmt: "Member is not byte aligned" ); |
2863 | return -EINVAL; |
2864 | } |
2865 | |
2866 | array_type_id = member->type; |
2867 | btf_type_id_size(btf, type_id: &array_type_id, ret_size: &array_size); |
2868 | struct_size = struct_type->size; |
2869 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
2870 | if (struct_size - bytes_offset < array_size) { |
2871 | btf_verifier_log_member(env, struct_type, member, |
2872 | fmt: "Member exceeds struct_size" ); |
2873 | return -EINVAL; |
2874 | } |
2875 | |
2876 | return 0; |
2877 | } |
2878 | |
2879 | static s32 btf_array_check_meta(struct btf_verifier_env *env, |
2880 | const struct btf_type *t, |
2881 | u32 meta_left) |
2882 | { |
2883 | const struct btf_array *array = btf_type_array(t); |
2884 | u32 meta_needed = sizeof(*array); |
2885 | |
2886 | if (meta_left < meta_needed) { |
2887 | btf_verifier_log_basic(env, t, |
2888 | "meta_left:%u meta_needed:%u" , |
2889 | meta_left, meta_needed); |
2890 | return -EINVAL; |
2891 | } |
2892 | |
2893 | /* array type should not have a name */ |
2894 | if (t->name_off) { |
2895 | btf_verifier_log_type(env, t, "Invalid name" ); |
2896 | return -EINVAL; |
2897 | } |
2898 | |
2899 | if (btf_type_vlen(t)) { |
2900 | btf_verifier_log_type(env, t, "vlen != 0" ); |
2901 | return -EINVAL; |
2902 | } |
2903 | |
2904 | if (btf_type_kflag(t)) { |
2905 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
2906 | return -EINVAL; |
2907 | } |
2908 | |
2909 | if (t->size) { |
2910 | btf_verifier_log_type(env, t, "size != 0" ); |
2911 | return -EINVAL; |
2912 | } |
2913 | |
2914 | /* Array elem type and index type cannot be in type void, |
2915 | * so !array->type and !array->index_type are not allowed. |
2916 | */ |
2917 | if (!array->type || !BTF_TYPE_ID_VALID(array->type)) { |
2918 | btf_verifier_log_type(env, t, "Invalid elem" ); |
2919 | return -EINVAL; |
2920 | } |
2921 | |
2922 | if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) { |
2923 | btf_verifier_log_type(env, t, "Invalid index" ); |
2924 | return -EINVAL; |
2925 | } |
2926 | |
2927 | btf_verifier_log_type(env, t, NULL); |
2928 | |
2929 | return meta_needed; |
2930 | } |
2931 | |
2932 | static int btf_array_resolve(struct btf_verifier_env *env, |
2933 | const struct resolve_vertex *v) |
2934 | { |
2935 | const struct btf_array *array = btf_type_array(t: v->t); |
2936 | const struct btf_type *elem_type, *index_type; |
2937 | u32 elem_type_id, index_type_id; |
2938 | struct btf *btf = env->btf; |
2939 | u32 elem_size; |
2940 | |
2941 | /* Check array->index_type */ |
2942 | index_type_id = array->index_type; |
2943 | index_type = btf_type_by_id(btf, index_type_id); |
2944 | if (btf_type_nosize_or_null(t: index_type) || |
2945 | btf_type_is_resolve_source_only(t: index_type)) { |
2946 | btf_verifier_log_type(env, v->t, "Invalid index" ); |
2947 | return -EINVAL; |
2948 | } |
2949 | |
2950 | if (!env_type_is_resolve_sink(env, next_type: index_type) && |
2951 | !env_type_is_resolved(env, type_id: index_type_id)) |
2952 | return env_stack_push(env, t: index_type, type_id: index_type_id); |
2953 | |
2954 | index_type = btf_type_id_size(btf, type_id: &index_type_id, NULL); |
2955 | if (!index_type || !btf_type_is_int(t: index_type) || |
2956 | !btf_type_int_is_regular(t: index_type)) { |
2957 | btf_verifier_log_type(env, v->t, "Invalid index" ); |
2958 | return -EINVAL; |
2959 | } |
2960 | |
2961 | /* Check array->type */ |
2962 | elem_type_id = array->type; |
2963 | elem_type = btf_type_by_id(btf, elem_type_id); |
2964 | if (btf_type_nosize_or_null(t: elem_type) || |
2965 | btf_type_is_resolve_source_only(t: elem_type)) { |
2966 | btf_verifier_log_type(env, v->t, |
2967 | "Invalid elem" ); |
2968 | return -EINVAL; |
2969 | } |
2970 | |
2971 | if (!env_type_is_resolve_sink(env, next_type: elem_type) && |
2972 | !env_type_is_resolved(env, type_id: elem_type_id)) |
2973 | return env_stack_push(env, t: elem_type, type_id: elem_type_id); |
2974 | |
2975 | elem_type = btf_type_id_size(btf, type_id: &elem_type_id, ret_size: &elem_size); |
2976 | if (!elem_type) { |
2977 | btf_verifier_log_type(env, v->t, "Invalid elem" ); |
2978 | return -EINVAL; |
2979 | } |
2980 | |
2981 | if (btf_type_is_int(t: elem_type) && !btf_type_int_is_regular(t: elem_type)) { |
2982 | btf_verifier_log_type(env, v->t, "Invalid array of int" ); |
2983 | return -EINVAL; |
2984 | } |
2985 | |
2986 | if (array->nelems && elem_size > U32_MAX / array->nelems) { |
2987 | btf_verifier_log_type(env, v->t, |
2988 | "Array size overflows U32_MAX" ); |
2989 | return -EINVAL; |
2990 | } |
2991 | |
2992 | env_stack_pop_resolved(env, resolved_type_id: elem_type_id, resolved_size: elem_size * array->nelems); |
2993 | |
2994 | return 0; |
2995 | } |
2996 | |
2997 | static void btf_array_log(struct btf_verifier_env *env, |
2998 | const struct btf_type *t) |
2999 | { |
3000 | const struct btf_array *array = btf_type_array(t); |
3001 | |
3002 | btf_verifier_log(env, fmt: "type_id=%u index_type_id=%u nr_elems=%u" , |
3003 | array->type, array->index_type, array->nelems); |
3004 | } |
3005 | |
3006 | static void __btf_array_show(const struct btf *btf, const struct btf_type *t, |
3007 | u32 type_id, void *data, u8 bits_offset, |
3008 | struct btf_show *show) |
3009 | { |
3010 | const struct btf_array *array = btf_type_array(t); |
3011 | const struct btf_kind_operations *elem_ops; |
3012 | const struct btf_type *elem_type; |
3013 | u32 i, elem_size = 0, elem_type_id; |
3014 | u16 encoding = 0; |
3015 | |
3016 | elem_type_id = array->type; |
3017 | elem_type = btf_type_skip_modifiers(btf, id: elem_type_id, NULL); |
3018 | if (elem_type && btf_type_has_size(t: elem_type)) |
3019 | elem_size = elem_type->size; |
3020 | |
3021 | if (elem_type && btf_type_is_int(t: elem_type)) { |
3022 | u32 int_type = btf_type_int(t: elem_type); |
3023 | |
3024 | encoding = BTF_INT_ENCODING(int_type); |
3025 | |
3026 | /* |
3027 | * BTF_INT_CHAR encoding never seems to be set for |
3028 | * char arrays, so if size is 1 and element is |
3029 | * printable as a char, we'll do that. |
3030 | */ |
3031 | if (elem_size == 1) |
3032 | encoding = BTF_INT_CHAR; |
3033 | } |
3034 | |
3035 | if (!btf_show_start_array_type(show, t, type_id, array_encoding: encoding, data)) |
3036 | return; |
3037 | |
3038 | if (!elem_type) |
3039 | goto out; |
3040 | elem_ops = btf_type_ops(t: elem_type); |
3041 | |
3042 | for (i = 0; i < array->nelems; i++) { |
3043 | |
3044 | btf_show_start_array_member(show); |
3045 | |
3046 | elem_ops->show(btf, elem_type, elem_type_id, data, |
3047 | bits_offset, show); |
3048 | data += elem_size; |
3049 | |
3050 | btf_show_end_array_member(show); |
3051 | |
3052 | if (show->state.array_terminated) |
3053 | break; |
3054 | } |
3055 | out: |
3056 | btf_show_end_array_type(show); |
3057 | } |
3058 | |
3059 | static void btf_array_show(const struct btf *btf, const struct btf_type *t, |
3060 | u32 type_id, void *data, u8 bits_offset, |
3061 | struct btf_show *show) |
3062 | { |
3063 | const struct btf_member *m = show->state.member; |
3064 | |
3065 | /* |
3066 | * First check if any members would be shown (are non-zero). |
3067 | * See comments above "struct btf_show" definition for more |
3068 | * details on how this works at a high-level. |
3069 | */ |
3070 | if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) { |
3071 | if (!show->state.depth_check) { |
3072 | show->state.depth_check = show->state.depth + 1; |
3073 | show->state.depth_to_show = 0; |
3074 | } |
3075 | __btf_array_show(btf, t, type_id, data, bits_offset, show); |
3076 | show->state.member = m; |
3077 | |
3078 | if (show->state.depth_check != show->state.depth + 1) |
3079 | return; |
3080 | show->state.depth_check = 0; |
3081 | |
3082 | if (show->state.depth_to_show <= show->state.depth) |
3083 | return; |
3084 | /* |
3085 | * Reaching here indicates we have recursed and found |
3086 | * non-zero array member(s). |
3087 | */ |
3088 | } |
3089 | __btf_array_show(btf, t, type_id, data, bits_offset, show); |
3090 | } |
3091 | |
3092 | static struct btf_kind_operations array_ops = { |
3093 | .check_meta = btf_array_check_meta, |
3094 | .resolve = btf_array_resolve, |
3095 | .check_member = btf_array_check_member, |
3096 | .check_kflag_member = btf_generic_check_kflag_member, |
3097 | .log_details = btf_array_log, |
3098 | .show = btf_array_show, |
3099 | }; |
3100 | |
3101 | static int btf_struct_check_member(struct btf_verifier_env *env, |
3102 | const struct btf_type *struct_type, |
3103 | const struct btf_member *member, |
3104 | const struct btf_type *member_type) |
3105 | { |
3106 | u32 struct_bits_off = member->offset; |
3107 | u32 struct_size, bytes_offset; |
3108 | |
3109 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
3110 | btf_verifier_log_member(env, struct_type, member, |
3111 | fmt: "Member is not byte aligned" ); |
3112 | return -EINVAL; |
3113 | } |
3114 | |
3115 | struct_size = struct_type->size; |
3116 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
3117 | if (struct_size - bytes_offset < member_type->size) { |
3118 | btf_verifier_log_member(env, struct_type, member, |
3119 | fmt: "Member exceeds struct_size" ); |
3120 | return -EINVAL; |
3121 | } |
3122 | |
3123 | return 0; |
3124 | } |
3125 | |
3126 | static s32 btf_struct_check_meta(struct btf_verifier_env *env, |
3127 | const struct btf_type *t, |
3128 | u32 meta_left) |
3129 | { |
3130 | bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION; |
3131 | const struct btf_member *member; |
3132 | u32 meta_needed, last_offset; |
3133 | struct btf *btf = env->btf; |
3134 | u32 struct_size = t->size; |
3135 | u32 offset; |
3136 | u16 i; |
3137 | |
3138 | meta_needed = btf_type_vlen(t) * sizeof(*member); |
3139 | if (meta_left < meta_needed) { |
3140 | btf_verifier_log_basic(env, t, |
3141 | "meta_left:%u meta_needed:%u" , |
3142 | meta_left, meta_needed); |
3143 | return -EINVAL; |
3144 | } |
3145 | |
3146 | /* struct type either no name or a valid one */ |
3147 | if (t->name_off && |
3148 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
3149 | btf_verifier_log_type(env, t, "Invalid name" ); |
3150 | return -EINVAL; |
3151 | } |
3152 | |
3153 | btf_verifier_log_type(env, t, NULL); |
3154 | |
3155 | last_offset = 0; |
3156 | for_each_member(i, t, member) { |
3157 | if (!btf_name_offset_valid(btf, offset: member->name_off)) { |
3158 | btf_verifier_log_member(env, struct_type: t, member, |
3159 | fmt: "Invalid member name_offset:%u" , |
3160 | member->name_off); |
3161 | return -EINVAL; |
3162 | } |
3163 | |
3164 | /* struct member either no name or a valid one */ |
3165 | if (member->name_off && |
3166 | !btf_name_valid_identifier(btf, offset: member->name_off)) { |
3167 | btf_verifier_log_member(env, struct_type: t, member, fmt: "Invalid name" ); |
3168 | return -EINVAL; |
3169 | } |
3170 | /* A member cannot be in type void */ |
3171 | if (!member->type || !BTF_TYPE_ID_VALID(member->type)) { |
3172 | btf_verifier_log_member(env, struct_type: t, member, |
3173 | fmt: "Invalid type_id" ); |
3174 | return -EINVAL; |
3175 | } |
3176 | |
3177 | offset = __btf_member_bit_offset(struct_type: t, member); |
3178 | if (is_union && offset) { |
3179 | btf_verifier_log_member(env, struct_type: t, member, |
3180 | fmt: "Invalid member bits_offset" ); |
3181 | return -EINVAL; |
3182 | } |
3183 | |
3184 | /* |
3185 | * ">" instead of ">=" because the last member could be |
3186 | * "char a[0];" |
3187 | */ |
3188 | if (last_offset > offset) { |
3189 | btf_verifier_log_member(env, struct_type: t, member, |
3190 | fmt: "Invalid member bits_offset" ); |
3191 | return -EINVAL; |
3192 | } |
3193 | |
3194 | if (BITS_ROUNDUP_BYTES(offset) > struct_size) { |
3195 | btf_verifier_log_member(env, struct_type: t, member, |
3196 | fmt: "Member bits_offset exceeds its struct size" ); |
3197 | return -EINVAL; |
3198 | } |
3199 | |
3200 | btf_verifier_log_member(env, struct_type: t, member, NULL); |
3201 | last_offset = offset; |
3202 | } |
3203 | |
3204 | return meta_needed; |
3205 | } |
3206 | |
3207 | static int btf_struct_resolve(struct btf_verifier_env *env, |
3208 | const struct resolve_vertex *v) |
3209 | { |
3210 | const struct btf_member *member; |
3211 | int err; |
3212 | u16 i; |
3213 | |
3214 | /* Before continue resolving the next_member, |
3215 | * ensure the last member is indeed resolved to a |
3216 | * type with size info. |
3217 | */ |
3218 | if (v->next_member) { |
3219 | const struct btf_type *last_member_type; |
3220 | const struct btf_member *last_member; |
3221 | u32 last_member_type_id; |
3222 | |
3223 | last_member = btf_type_member(t: v->t) + v->next_member - 1; |
3224 | last_member_type_id = last_member->type; |
3225 | if (WARN_ON_ONCE(!env_type_is_resolved(env, |
3226 | last_member_type_id))) |
3227 | return -EINVAL; |
3228 | |
3229 | last_member_type = btf_type_by_id(env->btf, |
3230 | last_member_type_id); |
3231 | if (btf_type_kflag(t: v->t)) |
3232 | err = btf_type_ops(t: last_member_type)->check_kflag_member(env, v->t, |
3233 | last_member, |
3234 | last_member_type); |
3235 | else |
3236 | err = btf_type_ops(t: last_member_type)->check_member(env, v->t, |
3237 | last_member, |
3238 | last_member_type); |
3239 | if (err) |
3240 | return err; |
3241 | } |
3242 | |
3243 | for_each_member_from(i, v->next_member, v->t, member) { |
3244 | u32 member_type_id = member->type; |
3245 | const struct btf_type *member_type = btf_type_by_id(env->btf, |
3246 | member_type_id); |
3247 | |
3248 | if (btf_type_nosize_or_null(t: member_type) || |
3249 | btf_type_is_resolve_source_only(t: member_type)) { |
3250 | btf_verifier_log_member(env, struct_type: v->t, member, |
3251 | fmt: "Invalid member" ); |
3252 | return -EINVAL; |
3253 | } |
3254 | |
3255 | if (!env_type_is_resolve_sink(env, next_type: member_type) && |
3256 | !env_type_is_resolved(env, type_id: member_type_id)) { |
3257 | env_stack_set_next_member(env, next_member: i + 1); |
3258 | return env_stack_push(env, t: member_type, type_id: member_type_id); |
3259 | } |
3260 | |
3261 | if (btf_type_kflag(t: v->t)) |
3262 | err = btf_type_ops(t: member_type)->check_kflag_member(env, v->t, |
3263 | member, |
3264 | member_type); |
3265 | else |
3266 | err = btf_type_ops(t: member_type)->check_member(env, v->t, |
3267 | member, |
3268 | member_type); |
3269 | if (err) |
3270 | return err; |
3271 | } |
3272 | |
3273 | env_stack_pop_resolved(env, resolved_type_id: 0, resolved_size: 0); |
3274 | |
3275 | return 0; |
3276 | } |
3277 | |
3278 | static void btf_struct_log(struct btf_verifier_env *env, |
3279 | const struct btf_type *t) |
3280 | { |
3281 | btf_verifier_log(env, fmt: "size=%u vlen=%u" , t->size, btf_type_vlen(t)); |
3282 | } |
3283 | |
3284 | enum { |
3285 | BTF_FIELD_IGNORE = 0, |
3286 | BTF_FIELD_FOUND = 1, |
3287 | }; |
3288 | |
3289 | struct btf_field_info { |
3290 | enum btf_field_type type; |
3291 | u32 off; |
3292 | union { |
3293 | struct { |
3294 | u32 type_id; |
3295 | } kptr; |
3296 | struct { |
3297 | const char *node_name; |
3298 | u32 value_btf_id; |
3299 | } graph_root; |
3300 | }; |
3301 | }; |
3302 | |
3303 | static int btf_find_struct(const struct btf *btf, const struct btf_type *t, |
3304 | u32 off, int sz, enum btf_field_type field_type, |
3305 | struct btf_field_info *info) |
3306 | { |
3307 | if (!__btf_type_is_struct(t)) |
3308 | return BTF_FIELD_IGNORE; |
3309 | if (t->size != sz) |
3310 | return BTF_FIELD_IGNORE; |
3311 | info->type = field_type; |
3312 | info->off = off; |
3313 | return BTF_FIELD_FOUND; |
3314 | } |
3315 | |
3316 | static int btf_find_kptr(const struct btf *btf, const struct btf_type *t, |
3317 | u32 off, int sz, struct btf_field_info *info) |
3318 | { |
3319 | enum btf_field_type type; |
3320 | u32 res_id; |
3321 | |
3322 | /* Permit modifiers on the pointer itself */ |
3323 | if (btf_type_is_volatile(t)) |
3324 | t = btf_type_by_id(btf, t->type); |
3325 | /* For PTR, sz is always == 8 */ |
3326 | if (!btf_type_is_ptr(t)) |
3327 | return BTF_FIELD_IGNORE; |
3328 | t = btf_type_by_id(btf, t->type); |
3329 | |
3330 | if (!btf_type_is_type_tag(t)) |
3331 | return BTF_FIELD_IGNORE; |
3332 | /* Reject extra tags */ |
3333 | if (btf_type_is_type_tag(t: btf_type_by_id(btf, t->type))) |
3334 | return -EINVAL; |
3335 | if (!strcmp("kptr_untrusted" , __btf_name_by_offset(btf, offset: t->name_off))) |
3336 | type = BPF_KPTR_UNREF; |
3337 | else if (!strcmp("kptr" , __btf_name_by_offset(btf, offset: t->name_off))) |
3338 | type = BPF_KPTR_REF; |
3339 | else if (!strcmp("percpu_kptr" , __btf_name_by_offset(btf, offset: t->name_off))) |
3340 | type = BPF_KPTR_PERCPU; |
3341 | else |
3342 | return -EINVAL; |
3343 | |
3344 | /* Get the base type */ |
3345 | t = btf_type_skip_modifiers(btf, id: t->type, res_id: &res_id); |
3346 | /* Only pointer to struct is allowed */ |
3347 | if (!__btf_type_is_struct(t)) |
3348 | return -EINVAL; |
3349 | |
3350 | info->type = type; |
3351 | info->off = off; |
3352 | info->kptr.type_id = res_id; |
3353 | return BTF_FIELD_FOUND; |
3354 | } |
3355 | |
3356 | int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt, |
3357 | int comp_idx, const char *tag_key, int last_id) |
3358 | { |
3359 | int len = strlen(tag_key); |
3360 | int i, n; |
3361 | |
3362 | for (i = last_id + 1, n = btf_nr_types(btf); i < n; i++) { |
3363 | const struct btf_type *t = btf_type_by_id(btf, i); |
3364 | |
3365 | if (!btf_type_is_decl_tag(t)) |
3366 | continue; |
3367 | if (pt != btf_type_by_id(btf, t->type)) |
3368 | continue; |
3369 | if (btf_type_decl_tag(t)->component_idx != comp_idx) |
3370 | continue; |
3371 | if (strncmp(__btf_name_by_offset(btf, offset: t->name_off), tag_key, len)) |
3372 | continue; |
3373 | return i; |
3374 | } |
3375 | return -ENOENT; |
3376 | } |
3377 | |
3378 | const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt, |
3379 | int comp_idx, const char *tag_key) |
3380 | { |
3381 | const char *value = NULL; |
3382 | const struct btf_type *t; |
3383 | int len, id; |
3384 | |
3385 | id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key, last_id: 0); |
3386 | if (id < 0) |
3387 | return ERR_PTR(error: id); |
3388 | |
3389 | t = btf_type_by_id(btf, id); |
3390 | len = strlen(tag_key); |
3391 | value = __btf_name_by_offset(btf, offset: t->name_off) + len; |
3392 | |
3393 | /* Prevent duplicate entries for same type */ |
3394 | id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key, last_id: id); |
3395 | if (id >= 0) |
3396 | return ERR_PTR(error: -EEXIST); |
3397 | |
3398 | return value; |
3399 | } |
3400 | |
3401 | static int |
3402 | btf_find_graph_root(const struct btf *btf, const struct btf_type *pt, |
3403 | const struct btf_type *t, int comp_idx, u32 off, |
3404 | int sz, struct btf_field_info *info, |
3405 | enum btf_field_type head_type) |
3406 | { |
3407 | const char *node_field_name; |
3408 | const char *value_type; |
3409 | s32 id; |
3410 | |
3411 | if (!__btf_type_is_struct(t)) |
3412 | return BTF_FIELD_IGNORE; |
3413 | if (t->size != sz) |
3414 | return BTF_FIELD_IGNORE; |
3415 | value_type = btf_find_decl_tag_value(btf, pt, comp_idx, tag_key: "contains:" ); |
3416 | if (IS_ERR(ptr: value_type)) |
3417 | return -EINVAL; |
3418 | node_field_name = strstr(value_type, ":" ); |
3419 | if (!node_field_name) |
3420 | return -EINVAL; |
3421 | value_type = kstrndup(s: value_type, len: node_field_name - value_type, GFP_KERNEL | __GFP_NOWARN); |
3422 | if (!value_type) |
3423 | return -ENOMEM; |
3424 | id = btf_find_by_name_kind(btf, name: value_type, kind: BTF_KIND_STRUCT); |
3425 | kfree(objp: value_type); |
3426 | if (id < 0) |
3427 | return id; |
3428 | node_field_name++; |
3429 | if (str_is_empty(s: node_field_name)) |
3430 | return -EINVAL; |
3431 | info->type = head_type; |
3432 | info->off = off; |
3433 | info->graph_root.value_btf_id = id; |
3434 | info->graph_root.node_name = node_field_name; |
3435 | return BTF_FIELD_FOUND; |
3436 | } |
3437 | |
3438 | #define field_mask_test_name(field_type, field_type_str) \ |
3439 | if (field_mask & field_type && !strcmp(name, field_type_str)) { \ |
3440 | type = field_type; \ |
3441 | goto end; \ |
3442 | } |
3443 | |
3444 | static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask, |
3445 | int *align, int *sz) |
3446 | { |
3447 | int type = 0; |
3448 | |
3449 | if (field_mask & BPF_SPIN_LOCK) { |
3450 | if (!strcmp(name, "bpf_spin_lock" )) { |
3451 | if (*seen_mask & BPF_SPIN_LOCK) |
3452 | return -E2BIG; |
3453 | *seen_mask |= BPF_SPIN_LOCK; |
3454 | type = BPF_SPIN_LOCK; |
3455 | goto end; |
3456 | } |
3457 | } |
3458 | if (field_mask & BPF_TIMER) { |
3459 | if (!strcmp(name, "bpf_timer" )) { |
3460 | if (*seen_mask & BPF_TIMER) |
3461 | return -E2BIG; |
3462 | *seen_mask |= BPF_TIMER; |
3463 | type = BPF_TIMER; |
3464 | goto end; |
3465 | } |
3466 | } |
3467 | field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head" ); |
3468 | field_mask_test_name(BPF_LIST_NODE, "bpf_list_node" ); |
3469 | field_mask_test_name(BPF_RB_ROOT, "bpf_rb_root" ); |
3470 | field_mask_test_name(BPF_RB_NODE, "bpf_rb_node" ); |
3471 | field_mask_test_name(BPF_REFCOUNT, "bpf_refcount" ); |
3472 | |
3473 | /* Only return BPF_KPTR when all other types with matchable names fail */ |
3474 | if (field_mask & BPF_KPTR) { |
3475 | type = BPF_KPTR_REF; |
3476 | goto end; |
3477 | } |
3478 | return 0; |
3479 | end: |
3480 | *sz = btf_field_type_size(type); |
3481 | *align = btf_field_type_align(type); |
3482 | return type; |
3483 | } |
3484 | |
3485 | #undef field_mask_test_name |
3486 | |
3487 | static int btf_find_struct_field(const struct btf *btf, |
3488 | const struct btf_type *t, u32 field_mask, |
3489 | struct btf_field_info *info, int info_cnt) |
3490 | { |
3491 | int ret, idx = 0, align, sz, field_type; |
3492 | const struct btf_member *member; |
3493 | struct btf_field_info tmp; |
3494 | u32 i, off, seen_mask = 0; |
3495 | |
3496 | for_each_member(i, t, member) { |
3497 | const struct btf_type *member_type = btf_type_by_id(btf, |
3498 | member->type); |
3499 | |
3500 | field_type = btf_get_field_type(name: __btf_name_by_offset(btf, offset: member_type->name_off), |
3501 | field_mask, seen_mask: &seen_mask, align: &align, sz: &sz); |
3502 | if (field_type == 0) |
3503 | continue; |
3504 | if (field_type < 0) |
3505 | return field_type; |
3506 | |
3507 | off = __btf_member_bit_offset(struct_type: t, member); |
3508 | if (off % 8) |
3509 | /* valid C code cannot generate such BTF */ |
3510 | return -EINVAL; |
3511 | off /= 8; |
3512 | if (off % align) |
3513 | continue; |
3514 | |
3515 | switch (field_type) { |
3516 | case BPF_SPIN_LOCK: |
3517 | case BPF_TIMER: |
3518 | case BPF_LIST_NODE: |
3519 | case BPF_RB_NODE: |
3520 | case BPF_REFCOUNT: |
3521 | ret = btf_find_struct(btf, t: member_type, off, sz, field_type, |
3522 | info: idx < info_cnt ? &info[idx] : &tmp); |
3523 | if (ret < 0) |
3524 | return ret; |
3525 | break; |
3526 | case BPF_KPTR_UNREF: |
3527 | case BPF_KPTR_REF: |
3528 | case BPF_KPTR_PERCPU: |
3529 | ret = btf_find_kptr(btf, t: member_type, off, sz, |
3530 | info: idx < info_cnt ? &info[idx] : &tmp); |
3531 | if (ret < 0) |
3532 | return ret; |
3533 | break; |
3534 | case BPF_LIST_HEAD: |
3535 | case BPF_RB_ROOT: |
3536 | ret = btf_find_graph_root(btf, pt: t, t: member_type, |
3537 | comp_idx: i, off, sz, |
3538 | info: idx < info_cnt ? &info[idx] : &tmp, |
3539 | head_type: field_type); |
3540 | if (ret < 0) |
3541 | return ret; |
3542 | break; |
3543 | default: |
3544 | return -EFAULT; |
3545 | } |
3546 | |
3547 | if (ret == BTF_FIELD_IGNORE) |
3548 | continue; |
3549 | if (idx >= info_cnt) |
3550 | return -E2BIG; |
3551 | ++idx; |
3552 | } |
3553 | return idx; |
3554 | } |
3555 | |
3556 | static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t, |
3557 | u32 field_mask, struct btf_field_info *info, |
3558 | int info_cnt) |
3559 | { |
3560 | int ret, idx = 0, align, sz, field_type; |
3561 | const struct btf_var_secinfo *vsi; |
3562 | struct btf_field_info tmp; |
3563 | u32 i, off, seen_mask = 0; |
3564 | |
3565 | for_each_vsi(i, t, vsi) { |
3566 | const struct btf_type *var = btf_type_by_id(btf, vsi->type); |
3567 | const struct btf_type *var_type = btf_type_by_id(btf, var->type); |
3568 | |
3569 | field_type = btf_get_field_type(name: __btf_name_by_offset(btf, offset: var_type->name_off), |
3570 | field_mask, seen_mask: &seen_mask, align: &align, sz: &sz); |
3571 | if (field_type == 0) |
3572 | continue; |
3573 | if (field_type < 0) |
3574 | return field_type; |
3575 | |
3576 | off = vsi->offset; |
3577 | if (vsi->size != sz) |
3578 | continue; |
3579 | if (off % align) |
3580 | continue; |
3581 | |
3582 | switch (field_type) { |
3583 | case BPF_SPIN_LOCK: |
3584 | case BPF_TIMER: |
3585 | case BPF_LIST_NODE: |
3586 | case BPF_RB_NODE: |
3587 | case BPF_REFCOUNT: |
3588 | ret = btf_find_struct(btf, t: var_type, off, sz, field_type, |
3589 | info: idx < info_cnt ? &info[idx] : &tmp); |
3590 | if (ret < 0) |
3591 | return ret; |
3592 | break; |
3593 | case BPF_KPTR_UNREF: |
3594 | case BPF_KPTR_REF: |
3595 | case BPF_KPTR_PERCPU: |
3596 | ret = btf_find_kptr(btf, t: var_type, off, sz, |
3597 | info: idx < info_cnt ? &info[idx] : &tmp); |
3598 | if (ret < 0) |
3599 | return ret; |
3600 | break; |
3601 | case BPF_LIST_HEAD: |
3602 | case BPF_RB_ROOT: |
3603 | ret = btf_find_graph_root(btf, pt: var, t: var_type, |
3604 | comp_idx: -1, off, sz, |
3605 | info: idx < info_cnt ? &info[idx] : &tmp, |
3606 | head_type: field_type); |
3607 | if (ret < 0) |
3608 | return ret; |
3609 | break; |
3610 | default: |
3611 | return -EFAULT; |
3612 | } |
3613 | |
3614 | if (ret == BTF_FIELD_IGNORE) |
3615 | continue; |
3616 | if (idx >= info_cnt) |
3617 | return -E2BIG; |
3618 | ++idx; |
3619 | } |
3620 | return idx; |
3621 | } |
3622 | |
3623 | static int btf_find_field(const struct btf *btf, const struct btf_type *t, |
3624 | u32 field_mask, struct btf_field_info *info, |
3625 | int info_cnt) |
3626 | { |
3627 | if (__btf_type_is_struct(t)) |
3628 | return btf_find_struct_field(btf, t, field_mask, info, info_cnt); |
3629 | else if (btf_type_is_datasec(t)) |
3630 | return btf_find_datasec_var(btf, t, field_mask, info, info_cnt); |
3631 | return -EINVAL; |
3632 | } |
3633 | |
3634 | static int btf_parse_kptr(const struct btf *btf, struct btf_field *field, |
3635 | struct btf_field_info *info) |
3636 | { |
3637 | struct module *mod = NULL; |
3638 | const struct btf_type *t; |
3639 | /* If a matching btf type is found in kernel or module BTFs, kptr_ref |
3640 | * is that BTF, otherwise it's program BTF |
3641 | */ |
3642 | struct btf *kptr_btf; |
3643 | int ret; |
3644 | s32 id; |
3645 | |
3646 | /* Find type in map BTF, and use it to look up the matching type |
3647 | * in vmlinux or module BTFs, by name and kind. |
3648 | */ |
3649 | t = btf_type_by_id(btf, info->kptr.type_id); |
3650 | id = bpf_find_btf_id(name: __btf_name_by_offset(btf, offset: t->name_off), BTF_INFO_KIND(t->info), |
3651 | btf_p: &kptr_btf); |
3652 | if (id == -ENOENT) { |
3653 | /* btf_parse_kptr should only be called w/ btf = program BTF */ |
3654 | WARN_ON_ONCE(btf_is_kernel(btf)); |
3655 | |
3656 | /* Type exists only in program BTF. Assume that it's a MEM_ALLOC |
3657 | * kptr allocated via bpf_obj_new |
3658 | */ |
3659 | field->kptr.dtor = NULL; |
3660 | id = info->kptr.type_id; |
3661 | kptr_btf = (struct btf *)btf; |
3662 | btf_get(btf: kptr_btf); |
3663 | goto found_dtor; |
3664 | } |
3665 | if (id < 0) |
3666 | return id; |
3667 | |
3668 | /* Find and stash the function pointer for the destruction function that |
3669 | * needs to be eventually invoked from the map free path. |
3670 | */ |
3671 | if (info->type == BPF_KPTR_REF) { |
3672 | const struct btf_type *dtor_func; |
3673 | const char *dtor_func_name; |
3674 | unsigned long addr; |
3675 | s32 dtor_btf_id; |
3676 | |
3677 | /* This call also serves as a whitelist of allowed objects that |
3678 | * can be used as a referenced pointer and be stored in a map at |
3679 | * the same time. |
3680 | */ |
3681 | dtor_btf_id = btf_find_dtor_kfunc(btf: kptr_btf, btf_id: id); |
3682 | if (dtor_btf_id < 0) { |
3683 | ret = dtor_btf_id; |
3684 | goto end_btf; |
3685 | } |
3686 | |
3687 | dtor_func = btf_type_by_id(kptr_btf, dtor_btf_id); |
3688 | if (!dtor_func) { |
3689 | ret = -ENOENT; |
3690 | goto end_btf; |
3691 | } |
3692 | |
3693 | if (btf_is_module(btf: kptr_btf)) { |
3694 | mod = btf_try_get_module(btf: kptr_btf); |
3695 | if (!mod) { |
3696 | ret = -ENXIO; |
3697 | goto end_btf; |
3698 | } |
3699 | } |
3700 | |
3701 | /* We already verified dtor_func to be btf_type_is_func |
3702 | * in register_btf_id_dtor_kfuncs. |
3703 | */ |
3704 | dtor_func_name = __btf_name_by_offset(btf: kptr_btf, offset: dtor_func->name_off); |
3705 | addr = kallsyms_lookup_name(name: dtor_func_name); |
3706 | if (!addr) { |
3707 | ret = -EINVAL; |
3708 | goto end_mod; |
3709 | } |
3710 | field->kptr.dtor = (void *)addr; |
3711 | } |
3712 | |
3713 | found_dtor: |
3714 | field->kptr.btf_id = id; |
3715 | field->kptr.btf = kptr_btf; |
3716 | field->kptr.module = mod; |
3717 | return 0; |
3718 | end_mod: |
3719 | module_put(module: mod); |
3720 | end_btf: |
3721 | btf_put(btf: kptr_btf); |
3722 | return ret; |
3723 | } |
3724 | |
3725 | static int btf_parse_graph_root(const struct btf *btf, |
3726 | struct btf_field *field, |
3727 | struct btf_field_info *info, |
3728 | const char *node_type_name, |
3729 | size_t node_type_align) |
3730 | { |
3731 | const struct btf_type *t, *n = NULL; |
3732 | const struct btf_member *member; |
3733 | u32 offset; |
3734 | int i; |
3735 | |
3736 | t = btf_type_by_id(btf, info->graph_root.value_btf_id); |
3737 | /* We've already checked that value_btf_id is a struct type. We |
3738 | * just need to figure out the offset of the list_node, and |
3739 | * verify its type. |
3740 | */ |
3741 | for_each_member(i, t, member) { |
3742 | if (strcmp(info->graph_root.node_name, |
3743 | __btf_name_by_offset(btf, offset: member->name_off))) |
3744 | continue; |
3745 | /* Invalid BTF, two members with same name */ |
3746 | if (n) |
3747 | return -EINVAL; |
3748 | n = btf_type_by_id(btf, member->type); |
3749 | if (!__btf_type_is_struct(t: n)) |
3750 | return -EINVAL; |
3751 | if (strcmp(node_type_name, __btf_name_by_offset(btf, offset: n->name_off))) |
3752 | return -EINVAL; |
3753 | offset = __btf_member_bit_offset(struct_type: n, member); |
3754 | if (offset % 8) |
3755 | return -EINVAL; |
3756 | offset /= 8; |
3757 | if (offset % node_type_align) |
3758 | return -EINVAL; |
3759 | |
3760 | field->graph_root.btf = (struct btf *)btf; |
3761 | field->graph_root.value_btf_id = info->graph_root.value_btf_id; |
3762 | field->graph_root.node_offset = offset; |
3763 | } |
3764 | if (!n) |
3765 | return -ENOENT; |
3766 | return 0; |
3767 | } |
3768 | |
3769 | static int btf_parse_list_head(const struct btf *btf, struct btf_field *field, |
3770 | struct btf_field_info *info) |
3771 | { |
3772 | return btf_parse_graph_root(btf, field, info, node_type_name: "bpf_list_node" , |
3773 | node_type_align: __alignof__(struct bpf_list_node)); |
3774 | } |
3775 | |
3776 | static int btf_parse_rb_root(const struct btf *btf, struct btf_field *field, |
3777 | struct btf_field_info *info) |
3778 | { |
3779 | return btf_parse_graph_root(btf, field, info, node_type_name: "bpf_rb_node" , |
3780 | node_type_align: __alignof__(struct bpf_rb_node)); |
3781 | } |
3782 | |
3783 | static int btf_field_cmp(const void *_a, const void *_b, const void *priv) |
3784 | { |
3785 | const struct btf_field *a = (const struct btf_field *)_a; |
3786 | const struct btf_field *b = (const struct btf_field *)_b; |
3787 | |
3788 | if (a->offset < b->offset) |
3789 | return -1; |
3790 | else if (a->offset > b->offset) |
3791 | return 1; |
3792 | return 0; |
3793 | } |
3794 | |
3795 | struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t, |
3796 | u32 field_mask, u32 value_size) |
3797 | { |
3798 | struct btf_field_info info_arr[BTF_FIELDS_MAX]; |
3799 | u32 next_off = 0, field_type_size; |
3800 | struct btf_record *rec; |
3801 | int ret, i, cnt; |
3802 | |
3803 | ret = btf_find_field(btf, t, field_mask, info: info_arr, ARRAY_SIZE(info_arr)); |
3804 | if (ret < 0) |
3805 | return ERR_PTR(error: ret); |
3806 | if (!ret) |
3807 | return NULL; |
3808 | |
3809 | cnt = ret; |
3810 | /* This needs to be kzalloc to zero out padding and unused fields, see |
3811 | * comment in btf_record_equal. |
3812 | */ |
3813 | rec = kzalloc(offsetof(struct btf_record, fields[cnt]), GFP_KERNEL | __GFP_NOWARN); |
3814 | if (!rec) |
3815 | return ERR_PTR(error: -ENOMEM); |
3816 | |
3817 | rec->spin_lock_off = -EINVAL; |
3818 | rec->timer_off = -EINVAL; |
3819 | rec->refcount_off = -EINVAL; |
3820 | for (i = 0; i < cnt; i++) { |
3821 | field_type_size = btf_field_type_size(type: info_arr[i].type); |
3822 | if (info_arr[i].off + field_type_size > value_size) { |
3823 | WARN_ONCE(1, "verifier bug off %d size %d" , info_arr[i].off, value_size); |
3824 | ret = -EFAULT; |
3825 | goto end; |
3826 | } |
3827 | if (info_arr[i].off < next_off) { |
3828 | ret = -EEXIST; |
3829 | goto end; |
3830 | } |
3831 | next_off = info_arr[i].off + field_type_size; |
3832 | |
3833 | rec->field_mask |= info_arr[i].type; |
3834 | rec->fields[i].offset = info_arr[i].off; |
3835 | rec->fields[i].type = info_arr[i].type; |
3836 | rec->fields[i].size = field_type_size; |
3837 | |
3838 | switch (info_arr[i].type) { |
3839 | case BPF_SPIN_LOCK: |
3840 | WARN_ON_ONCE(rec->spin_lock_off >= 0); |
3841 | /* Cache offset for faster lookup at runtime */ |
3842 | rec->spin_lock_off = rec->fields[i].offset; |
3843 | break; |
3844 | case BPF_TIMER: |
3845 | WARN_ON_ONCE(rec->timer_off >= 0); |
3846 | /* Cache offset for faster lookup at runtime */ |
3847 | rec->timer_off = rec->fields[i].offset; |
3848 | break; |
3849 | case BPF_REFCOUNT: |
3850 | WARN_ON_ONCE(rec->refcount_off >= 0); |
3851 | /* Cache offset for faster lookup at runtime */ |
3852 | rec->refcount_off = rec->fields[i].offset; |
3853 | break; |
3854 | case BPF_KPTR_UNREF: |
3855 | case BPF_KPTR_REF: |
3856 | case BPF_KPTR_PERCPU: |
3857 | ret = btf_parse_kptr(btf, field: &rec->fields[i], info: &info_arr[i]); |
3858 | if (ret < 0) |
3859 | goto end; |
3860 | break; |
3861 | case BPF_LIST_HEAD: |
3862 | ret = btf_parse_list_head(btf, field: &rec->fields[i], info: &info_arr[i]); |
3863 | if (ret < 0) |
3864 | goto end; |
3865 | break; |
3866 | case BPF_RB_ROOT: |
3867 | ret = btf_parse_rb_root(btf, field: &rec->fields[i], info: &info_arr[i]); |
3868 | if (ret < 0) |
3869 | goto end; |
3870 | break; |
3871 | case BPF_LIST_NODE: |
3872 | case BPF_RB_NODE: |
3873 | break; |
3874 | default: |
3875 | ret = -EFAULT; |
3876 | goto end; |
3877 | } |
3878 | rec->cnt++; |
3879 | } |
3880 | |
3881 | /* bpf_{list_head, rb_node} require bpf_spin_lock */ |
3882 | if ((btf_record_has_field(rec, type: BPF_LIST_HEAD) || |
3883 | btf_record_has_field(rec, type: BPF_RB_ROOT)) && rec->spin_lock_off < 0) { |
3884 | ret = -EINVAL; |
3885 | goto end; |
3886 | } |
3887 | |
3888 | if (rec->refcount_off < 0 && |
3889 | btf_record_has_field(rec, type: BPF_LIST_NODE) && |
3890 | btf_record_has_field(rec, type: BPF_RB_NODE)) { |
3891 | ret = -EINVAL; |
3892 | goto end; |
3893 | } |
3894 | |
3895 | sort_r(base: rec->fields, num: rec->cnt, size: sizeof(struct btf_field), cmp_func: btf_field_cmp, |
3896 | NULL, priv: rec); |
3897 | |
3898 | return rec; |
3899 | end: |
3900 | btf_record_free(rec); |
3901 | return ERR_PTR(error: ret); |
3902 | } |
3903 | |
3904 | int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec) |
3905 | { |
3906 | int i; |
3907 | |
3908 | /* There are three types that signify ownership of some other type: |
3909 | * kptr_ref, bpf_list_head, bpf_rb_root. |
3910 | * kptr_ref only supports storing kernel types, which can't store |
3911 | * references to program allocated local types. |
3912 | * |
3913 | * Hence we only need to ensure that bpf_{list_head,rb_root} ownership |
3914 | * does not form cycles. |
3915 | */ |
3916 | if (IS_ERR_OR_NULL(ptr: rec) || !(rec->field_mask & BPF_GRAPH_ROOT)) |
3917 | return 0; |
3918 | for (i = 0; i < rec->cnt; i++) { |
3919 | struct btf_struct_meta *meta; |
3920 | u32 btf_id; |
3921 | |
3922 | if (!(rec->fields[i].type & BPF_GRAPH_ROOT)) |
3923 | continue; |
3924 | btf_id = rec->fields[i].graph_root.value_btf_id; |
3925 | meta = btf_find_struct_meta(btf, btf_id); |
3926 | if (!meta) |
3927 | return -EFAULT; |
3928 | rec->fields[i].graph_root.value_rec = meta->record; |
3929 | |
3930 | /* We need to set value_rec for all root types, but no need |
3931 | * to check ownership cycle for a type unless it's also a |
3932 | * node type. |
3933 | */ |
3934 | if (!(rec->field_mask & BPF_GRAPH_NODE)) |
3935 | continue; |
3936 | |
3937 | /* We need to ensure ownership acyclicity among all types. The |
3938 | * proper way to do it would be to topologically sort all BTF |
3939 | * IDs based on the ownership edges, since there can be multiple |
3940 | * bpf_{list_head,rb_node} in a type. Instead, we use the |
3941 | * following resaoning: |
3942 | * |
3943 | * - A type can only be owned by another type in user BTF if it |
3944 | * has a bpf_{list,rb}_node. Let's call these node types. |
3945 | * - A type can only _own_ another type in user BTF if it has a |
3946 | * bpf_{list_head,rb_root}. Let's call these root types. |
3947 | * |
3948 | * We ensure that if a type is both a root and node, its |
3949 | * element types cannot be root types. |
3950 | * |
3951 | * To ensure acyclicity: |
3952 | * |
3953 | * When A is an root type but not a node, its ownership |
3954 | * chain can be: |
3955 | * A -> B -> C |
3956 | * Where: |
3957 | * - A is an root, e.g. has bpf_rb_root. |
3958 | * - B is both a root and node, e.g. has bpf_rb_node and |
3959 | * bpf_list_head. |
3960 | * - C is only an root, e.g. has bpf_list_node |
3961 | * |
3962 | * When A is both a root and node, some other type already |
3963 | * owns it in the BTF domain, hence it can not own |
3964 | * another root type through any of the ownership edges. |
3965 | * A -> B |
3966 | * Where: |
3967 | * - A is both an root and node. |
3968 | * - B is only an node. |
3969 | */ |
3970 | if (meta->record->field_mask & BPF_GRAPH_ROOT) |
3971 | return -ELOOP; |
3972 | } |
3973 | return 0; |
3974 | } |
3975 | |
3976 | static void __btf_struct_show(const struct btf *btf, const struct btf_type *t, |
3977 | u32 type_id, void *data, u8 bits_offset, |
3978 | struct btf_show *show) |
3979 | { |
3980 | const struct btf_member *member; |
3981 | void *safe_data; |
3982 | u32 i; |
3983 | |
3984 | safe_data = btf_show_start_struct_type(show, t, type_id, data); |
3985 | if (!safe_data) |
3986 | return; |
3987 | |
3988 | for_each_member(i, t, member) { |
3989 | const struct btf_type *member_type = btf_type_by_id(btf, |
3990 | member->type); |
3991 | const struct btf_kind_operations *ops; |
3992 | u32 member_offset, bitfield_size; |
3993 | u32 bytes_offset; |
3994 | u8 bits8_offset; |
3995 | |
3996 | btf_show_start_member(show, m: member); |
3997 | |
3998 | member_offset = __btf_member_bit_offset(struct_type: t, member); |
3999 | bitfield_size = __btf_member_bitfield_size(struct_type: t, member); |
4000 | bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); |
4001 | bits8_offset = BITS_PER_BYTE_MASKED(member_offset); |
4002 | if (bitfield_size) { |
4003 | safe_data = btf_show_start_type(show, t: member_type, |
4004 | type_id: member->type, |
4005 | data: data + bytes_offset); |
4006 | if (safe_data) |
4007 | btf_bitfield_show(data: safe_data, |
4008 | bits_offset: bits8_offset, |
4009 | nr_bits: bitfield_size, show); |
4010 | btf_show_end_type(show); |
4011 | } else { |
4012 | ops = btf_type_ops(t: member_type); |
4013 | ops->show(btf, member_type, member->type, |
4014 | data + bytes_offset, bits8_offset, show); |
4015 | } |
4016 | |
4017 | btf_show_end_member(show); |
4018 | } |
4019 | |
4020 | btf_show_end_struct_type(show); |
4021 | } |
4022 | |
4023 | static void btf_struct_show(const struct btf *btf, const struct btf_type *t, |
4024 | u32 type_id, void *data, u8 bits_offset, |
4025 | struct btf_show *show) |
4026 | { |
4027 | const struct btf_member *m = show->state.member; |
4028 | |
4029 | /* |
4030 | * First check if any members would be shown (are non-zero). |
4031 | * See comments above "struct btf_show" definition for more |
4032 | * details on how this works at a high-level. |
4033 | */ |
4034 | if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) { |
4035 | if (!show->state.depth_check) { |
4036 | show->state.depth_check = show->state.depth + 1; |
4037 | show->state.depth_to_show = 0; |
4038 | } |
4039 | __btf_struct_show(btf, t, type_id, data, bits_offset, show); |
4040 | /* Restore saved member data here */ |
4041 | show->state.member = m; |
4042 | if (show->state.depth_check != show->state.depth + 1) |
4043 | return; |
4044 | show->state.depth_check = 0; |
4045 | |
4046 | if (show->state.depth_to_show <= show->state.depth) |
4047 | return; |
4048 | /* |
4049 | * Reaching here indicates we have recursed and found |
4050 | * non-zero child values. |
4051 | */ |
4052 | } |
4053 | |
4054 | __btf_struct_show(btf, t, type_id, data, bits_offset, show); |
4055 | } |
4056 | |
4057 | static struct btf_kind_operations struct_ops = { |
4058 | .check_meta = btf_struct_check_meta, |
4059 | .resolve = btf_struct_resolve, |
4060 | .check_member = btf_struct_check_member, |
4061 | .check_kflag_member = btf_generic_check_kflag_member, |
4062 | .log_details = btf_struct_log, |
4063 | .show = btf_struct_show, |
4064 | }; |
4065 | |
4066 | static int btf_enum_check_member(struct btf_verifier_env *env, |
4067 | const struct btf_type *struct_type, |
4068 | const struct btf_member *member, |
4069 | const struct btf_type *member_type) |
4070 | { |
4071 | u32 struct_bits_off = member->offset; |
4072 | u32 struct_size, bytes_offset; |
4073 | |
4074 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
4075 | btf_verifier_log_member(env, struct_type, member, |
4076 | fmt: "Member is not byte aligned" ); |
4077 | return -EINVAL; |
4078 | } |
4079 | |
4080 | struct_size = struct_type->size; |
4081 | bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); |
4082 | if (struct_size - bytes_offset < member_type->size) { |
4083 | btf_verifier_log_member(env, struct_type, member, |
4084 | fmt: "Member exceeds struct_size" ); |
4085 | return -EINVAL; |
4086 | } |
4087 | |
4088 | return 0; |
4089 | } |
4090 | |
4091 | static int btf_enum_check_kflag_member(struct btf_verifier_env *env, |
4092 | const struct btf_type *struct_type, |
4093 | const struct btf_member *member, |
4094 | const struct btf_type *member_type) |
4095 | { |
4096 | u32 struct_bits_off, nr_bits, bytes_end, struct_size; |
4097 | u32 int_bitsize = sizeof(int) * BITS_PER_BYTE; |
4098 | |
4099 | struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); |
4100 | nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); |
4101 | if (!nr_bits) { |
4102 | if (BITS_PER_BYTE_MASKED(struct_bits_off)) { |
4103 | btf_verifier_log_member(env, struct_type, member, |
4104 | fmt: "Member is not byte aligned" ); |
4105 | return -EINVAL; |
4106 | } |
4107 | |
4108 | nr_bits = int_bitsize; |
4109 | } else if (nr_bits > int_bitsize) { |
4110 | btf_verifier_log_member(env, struct_type, member, |
4111 | fmt: "Invalid member bitfield_size" ); |
4112 | return -EINVAL; |
4113 | } |
4114 | |
4115 | struct_size = struct_type->size; |
4116 | bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits); |
4117 | if (struct_size < bytes_end) { |
4118 | btf_verifier_log_member(env, struct_type, member, |
4119 | fmt: "Member exceeds struct_size" ); |
4120 | return -EINVAL; |
4121 | } |
4122 | |
4123 | return 0; |
4124 | } |
4125 | |
4126 | static s32 btf_enum_check_meta(struct btf_verifier_env *env, |
4127 | const struct btf_type *t, |
4128 | u32 meta_left) |
4129 | { |
4130 | const struct btf_enum *enums = btf_type_enum(t); |
4131 | struct btf *btf = env->btf; |
4132 | const char *fmt_str; |
4133 | u16 i, nr_enums; |
4134 | u32 meta_needed; |
4135 | |
4136 | nr_enums = btf_type_vlen(t); |
4137 | meta_needed = nr_enums * sizeof(*enums); |
4138 | |
4139 | if (meta_left < meta_needed) { |
4140 | btf_verifier_log_basic(env, t, |
4141 | "meta_left:%u meta_needed:%u" , |
4142 | meta_left, meta_needed); |
4143 | return -EINVAL; |
4144 | } |
4145 | |
4146 | if (t->size > 8 || !is_power_of_2(n: t->size)) { |
4147 | btf_verifier_log_type(env, t, "Unexpected size" ); |
4148 | return -EINVAL; |
4149 | } |
4150 | |
4151 | /* enum type either no name or a valid one */ |
4152 | if (t->name_off && |
4153 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
4154 | btf_verifier_log_type(env, t, "Invalid name" ); |
4155 | return -EINVAL; |
4156 | } |
4157 | |
4158 | btf_verifier_log_type(env, t, NULL); |
4159 | |
4160 | for (i = 0; i < nr_enums; i++) { |
4161 | if (!btf_name_offset_valid(btf, offset: enums[i].name_off)) { |
4162 | btf_verifier_log(env, fmt: "\tInvalid name_offset:%u" , |
4163 | enums[i].name_off); |
4164 | return -EINVAL; |
4165 | } |
4166 | |
4167 | /* enum member must have a valid name */ |
4168 | if (!enums[i].name_off || |
4169 | !btf_name_valid_identifier(btf, offset: enums[i].name_off)) { |
4170 | btf_verifier_log_type(env, t, "Invalid name" ); |
4171 | return -EINVAL; |
4172 | } |
4173 | |
4174 | if (env->log.level == BPF_LOG_KERNEL) |
4175 | continue; |
4176 | fmt_str = btf_type_kflag(t) ? "\t%s val=%d\n" : "\t%s val=%u\n" ; |
4177 | btf_verifier_log(env, fmt: fmt_str, |
4178 | __btf_name_by_offset(btf, offset: enums[i].name_off), |
4179 | enums[i].val); |
4180 | } |
4181 | |
4182 | return meta_needed; |
4183 | } |
4184 | |
4185 | static void btf_enum_log(struct btf_verifier_env *env, |
4186 | const struct btf_type *t) |
4187 | { |
4188 | btf_verifier_log(env, fmt: "size=%u vlen=%u" , t->size, btf_type_vlen(t)); |
4189 | } |
4190 | |
4191 | static void btf_enum_show(const struct btf *btf, const struct btf_type *t, |
4192 | u32 type_id, void *data, u8 bits_offset, |
4193 | struct btf_show *show) |
4194 | { |
4195 | const struct btf_enum *enums = btf_type_enum(t); |
4196 | u32 i, nr_enums = btf_type_vlen(t); |
4197 | void *safe_data; |
4198 | int v; |
4199 | |
4200 | safe_data = btf_show_start_type(show, t, type_id, data); |
4201 | if (!safe_data) |
4202 | return; |
4203 | |
4204 | v = *(int *)safe_data; |
4205 | |
4206 | for (i = 0; i < nr_enums; i++) { |
4207 | if (v != enums[i].val) |
4208 | continue; |
4209 | |
4210 | btf_show_type_value(show, "%s" , |
4211 | __btf_name_by_offset(btf, |
4212 | enums[i].name_off)); |
4213 | |
4214 | btf_show_end_type(show); |
4215 | return; |
4216 | } |
4217 | |
4218 | if (btf_type_kflag(t)) |
4219 | btf_show_type_value(show, "%d" , v); |
4220 | else |
4221 | btf_show_type_value(show, "%u" , v); |
4222 | btf_show_end_type(show); |
4223 | } |
4224 | |
4225 | static struct btf_kind_operations enum_ops = { |
4226 | .check_meta = btf_enum_check_meta, |
4227 | .resolve = btf_df_resolve, |
4228 | .check_member = btf_enum_check_member, |
4229 | .check_kflag_member = btf_enum_check_kflag_member, |
4230 | .log_details = btf_enum_log, |
4231 | .show = btf_enum_show, |
4232 | }; |
4233 | |
4234 | static s32 btf_enum64_check_meta(struct btf_verifier_env *env, |
4235 | const struct btf_type *t, |
4236 | u32 meta_left) |
4237 | { |
4238 | const struct btf_enum64 *enums = btf_type_enum64(t); |
4239 | struct btf *btf = env->btf; |
4240 | const char *fmt_str; |
4241 | u16 i, nr_enums; |
4242 | u32 meta_needed; |
4243 | |
4244 | nr_enums = btf_type_vlen(t); |
4245 | meta_needed = nr_enums * sizeof(*enums); |
4246 | |
4247 | if (meta_left < meta_needed) { |
4248 | btf_verifier_log_basic(env, t, |
4249 | "meta_left:%u meta_needed:%u" , |
4250 | meta_left, meta_needed); |
4251 | return -EINVAL; |
4252 | } |
4253 | |
4254 | if (t->size > 8 || !is_power_of_2(n: t->size)) { |
4255 | btf_verifier_log_type(env, t, "Unexpected size" ); |
4256 | return -EINVAL; |
4257 | } |
4258 | |
4259 | /* enum type either no name or a valid one */ |
4260 | if (t->name_off && |
4261 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
4262 | btf_verifier_log_type(env, t, "Invalid name" ); |
4263 | return -EINVAL; |
4264 | } |
4265 | |
4266 | btf_verifier_log_type(env, t, NULL); |
4267 | |
4268 | for (i = 0; i < nr_enums; i++) { |
4269 | if (!btf_name_offset_valid(btf, offset: enums[i].name_off)) { |
4270 | btf_verifier_log(env, fmt: "\tInvalid name_offset:%u" , |
4271 | enums[i].name_off); |
4272 | return -EINVAL; |
4273 | } |
4274 | |
4275 | /* enum member must have a valid name */ |
4276 | if (!enums[i].name_off || |
4277 | !btf_name_valid_identifier(btf, offset: enums[i].name_off)) { |
4278 | btf_verifier_log_type(env, t, "Invalid name" ); |
4279 | return -EINVAL; |
4280 | } |
4281 | |
4282 | if (env->log.level == BPF_LOG_KERNEL) |
4283 | continue; |
4284 | |
4285 | fmt_str = btf_type_kflag(t) ? "\t%s val=%lld\n" : "\t%s val=%llu\n" ; |
4286 | btf_verifier_log(env, fmt: fmt_str, |
4287 | __btf_name_by_offset(btf, offset: enums[i].name_off), |
4288 | btf_enum64_value(e: enums + i)); |
4289 | } |
4290 | |
4291 | return meta_needed; |
4292 | } |
4293 | |
4294 | static void btf_enum64_show(const struct btf *btf, const struct btf_type *t, |
4295 | u32 type_id, void *data, u8 bits_offset, |
4296 | struct btf_show *show) |
4297 | { |
4298 | const struct btf_enum64 *enums = btf_type_enum64(t); |
4299 | u32 i, nr_enums = btf_type_vlen(t); |
4300 | void *safe_data; |
4301 | s64 v; |
4302 | |
4303 | safe_data = btf_show_start_type(show, t, type_id, data); |
4304 | if (!safe_data) |
4305 | return; |
4306 | |
4307 | v = *(u64 *)safe_data; |
4308 | |
4309 | for (i = 0; i < nr_enums; i++) { |
4310 | if (v != btf_enum64_value(e: enums + i)) |
4311 | continue; |
4312 | |
4313 | btf_show_type_value(show, "%s" , |
4314 | __btf_name_by_offset(btf, |
4315 | enums[i].name_off)); |
4316 | |
4317 | btf_show_end_type(show); |
4318 | return; |
4319 | } |
4320 | |
4321 | if (btf_type_kflag(t)) |
4322 | btf_show_type_value(show, "%lld" , v); |
4323 | else |
4324 | btf_show_type_value(show, "%llu" , v); |
4325 | btf_show_end_type(show); |
4326 | } |
4327 | |
4328 | static struct btf_kind_operations enum64_ops = { |
4329 | .check_meta = btf_enum64_check_meta, |
4330 | .resolve = btf_df_resolve, |
4331 | .check_member = btf_enum_check_member, |
4332 | .check_kflag_member = btf_enum_check_kflag_member, |
4333 | .log_details = btf_enum_log, |
4334 | .show = btf_enum64_show, |
4335 | }; |
4336 | |
4337 | static s32 btf_func_proto_check_meta(struct btf_verifier_env *env, |
4338 | const struct btf_type *t, |
4339 | u32 meta_left) |
4340 | { |
4341 | u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param); |
4342 | |
4343 | if (meta_left < meta_needed) { |
4344 | btf_verifier_log_basic(env, t, |
4345 | "meta_left:%u meta_needed:%u" , |
4346 | meta_left, meta_needed); |
4347 | return -EINVAL; |
4348 | } |
4349 | |
4350 | if (t->name_off) { |
4351 | btf_verifier_log_type(env, t, "Invalid name" ); |
4352 | return -EINVAL; |
4353 | } |
4354 | |
4355 | if (btf_type_kflag(t)) { |
4356 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
4357 | return -EINVAL; |
4358 | } |
4359 | |
4360 | btf_verifier_log_type(env, t, NULL); |
4361 | |
4362 | return meta_needed; |
4363 | } |
4364 | |
4365 | static void btf_func_proto_log(struct btf_verifier_env *env, |
4366 | const struct btf_type *t) |
4367 | { |
4368 | const struct btf_param *args = (const struct btf_param *)(t + 1); |
4369 | u16 nr_args = btf_type_vlen(t), i; |
4370 | |
4371 | btf_verifier_log(env, fmt: "return=%u args=(" , t->type); |
4372 | if (!nr_args) { |
4373 | btf_verifier_log(env, fmt: "void" ); |
4374 | goto done; |
4375 | } |
4376 | |
4377 | if (nr_args == 1 && !args[0].type) { |
4378 | /* Only one vararg */ |
4379 | btf_verifier_log(env, fmt: "vararg" ); |
4380 | goto done; |
4381 | } |
4382 | |
4383 | btf_verifier_log(env, fmt: "%u %s" , args[0].type, |
4384 | __btf_name_by_offset(btf: env->btf, |
4385 | offset: args[0].name_off)); |
4386 | for (i = 1; i < nr_args - 1; i++) |
4387 | btf_verifier_log(env, fmt: ", %u %s" , args[i].type, |
4388 | __btf_name_by_offset(btf: env->btf, |
4389 | offset: args[i].name_off)); |
4390 | |
4391 | if (nr_args > 1) { |
4392 | const struct btf_param *last_arg = &args[nr_args - 1]; |
4393 | |
4394 | if (last_arg->type) |
4395 | btf_verifier_log(env, fmt: ", %u %s" , last_arg->type, |
4396 | __btf_name_by_offset(btf: env->btf, |
4397 | offset: last_arg->name_off)); |
4398 | else |
4399 | btf_verifier_log(env, fmt: ", vararg" ); |
4400 | } |
4401 | |
4402 | done: |
4403 | btf_verifier_log(env, fmt: ")" ); |
4404 | } |
4405 | |
4406 | static struct btf_kind_operations func_proto_ops = { |
4407 | .check_meta = btf_func_proto_check_meta, |
4408 | .resolve = btf_df_resolve, |
4409 | /* |
4410 | * BTF_KIND_FUNC_PROTO cannot be directly referred by |
4411 | * a struct's member. |
4412 | * |
4413 | * It should be a function pointer instead. |
4414 | * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO) |
4415 | * |
4416 | * Hence, there is no btf_func_check_member(). |
4417 | */ |
4418 | .check_member = btf_df_check_member, |
4419 | .check_kflag_member = btf_df_check_kflag_member, |
4420 | .log_details = btf_func_proto_log, |
4421 | .show = btf_df_show, |
4422 | }; |
4423 | |
4424 | static s32 btf_func_check_meta(struct btf_verifier_env *env, |
4425 | const struct btf_type *t, |
4426 | u32 meta_left) |
4427 | { |
4428 | if (!t->name_off || |
4429 | !btf_name_valid_identifier(btf: env->btf, offset: t->name_off)) { |
4430 | btf_verifier_log_type(env, t, "Invalid name" ); |
4431 | return -EINVAL; |
4432 | } |
4433 | |
4434 | if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) { |
4435 | btf_verifier_log_type(env, t, "Invalid func linkage" ); |
4436 | return -EINVAL; |
4437 | } |
4438 | |
4439 | if (btf_type_kflag(t)) { |
4440 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
4441 | return -EINVAL; |
4442 | } |
4443 | |
4444 | btf_verifier_log_type(env, t, NULL); |
4445 | |
4446 | return 0; |
4447 | } |
4448 | |
4449 | static int btf_func_resolve(struct btf_verifier_env *env, |
4450 | const struct resolve_vertex *v) |
4451 | { |
4452 | const struct btf_type *t = v->t; |
4453 | u32 next_type_id = t->type; |
4454 | int err; |
4455 | |
4456 | err = btf_func_check(env, t); |
4457 | if (err) |
4458 | return err; |
4459 | |
4460 | env_stack_pop_resolved(env, resolved_type_id: next_type_id, resolved_size: 0); |
4461 | return 0; |
4462 | } |
4463 | |
4464 | static struct btf_kind_operations func_ops = { |
4465 | .check_meta = btf_func_check_meta, |
4466 | .resolve = btf_func_resolve, |
4467 | .check_member = btf_df_check_member, |
4468 | .check_kflag_member = btf_df_check_kflag_member, |
4469 | .log_details = btf_ref_type_log, |
4470 | .show = btf_df_show, |
4471 | }; |
4472 | |
4473 | static s32 btf_var_check_meta(struct btf_verifier_env *env, |
4474 | const struct btf_type *t, |
4475 | u32 meta_left) |
4476 | { |
4477 | const struct btf_var *var; |
4478 | u32 meta_needed = sizeof(*var); |
4479 | |
4480 | if (meta_left < meta_needed) { |
4481 | btf_verifier_log_basic(env, t, |
4482 | "meta_left:%u meta_needed:%u" , |
4483 | meta_left, meta_needed); |
4484 | return -EINVAL; |
4485 | } |
4486 | |
4487 | if (btf_type_vlen(t)) { |
4488 | btf_verifier_log_type(env, t, "vlen != 0" ); |
4489 | return -EINVAL; |
4490 | } |
4491 | |
4492 | if (btf_type_kflag(t)) { |
4493 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
4494 | return -EINVAL; |
4495 | } |
4496 | |
4497 | if (!t->name_off || |
4498 | !__btf_name_valid(btf: env->btf, offset: t->name_off)) { |
4499 | btf_verifier_log_type(env, t, "Invalid name" ); |
4500 | return -EINVAL; |
4501 | } |
4502 | |
4503 | /* A var cannot be in type void */ |
4504 | if (!t->type || !BTF_TYPE_ID_VALID(t->type)) { |
4505 | btf_verifier_log_type(env, t, "Invalid type_id" ); |
4506 | return -EINVAL; |
4507 | } |
4508 | |
4509 | var = btf_type_var(t); |
4510 | if (var->linkage != BTF_VAR_STATIC && |
4511 | var->linkage != BTF_VAR_GLOBAL_ALLOCATED) { |
4512 | btf_verifier_log_type(env, t, "Linkage not supported" ); |
4513 | return -EINVAL; |
4514 | } |
4515 | |
4516 | btf_verifier_log_type(env, t, NULL); |
4517 | |
4518 | return meta_needed; |
4519 | } |
4520 | |
4521 | static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t) |
4522 | { |
4523 | const struct btf_var *var = btf_type_var(t); |
4524 | |
4525 | btf_verifier_log(env, fmt: "type_id=%u linkage=%u" , t->type, var->linkage); |
4526 | } |
4527 | |
4528 | static const struct btf_kind_operations var_ops = { |
4529 | .check_meta = btf_var_check_meta, |
4530 | .resolve = btf_var_resolve, |
4531 | .check_member = btf_df_check_member, |
4532 | .check_kflag_member = btf_df_check_kflag_member, |
4533 | .log_details = btf_var_log, |
4534 | .show = btf_var_show, |
4535 | }; |
4536 | |
4537 | static s32 btf_datasec_check_meta(struct btf_verifier_env *env, |
4538 | const struct btf_type *t, |
4539 | u32 meta_left) |
4540 | { |
4541 | const struct btf_var_secinfo *vsi; |
4542 | u64 last_vsi_end_off = 0, sum = 0; |
4543 | u32 i, meta_needed; |
4544 | |
4545 | meta_needed = btf_type_vlen(t) * sizeof(*vsi); |
4546 | if (meta_left < meta_needed) { |
4547 | btf_verifier_log_basic(env, t, |
4548 | "meta_left:%u meta_needed:%u" , |
4549 | meta_left, meta_needed); |
4550 | return -EINVAL; |
4551 | } |
4552 | |
4553 | if (!t->size) { |
4554 | btf_verifier_log_type(env, t, "size == 0" ); |
4555 | return -EINVAL; |
4556 | } |
4557 | |
4558 | if (btf_type_kflag(t)) { |
4559 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
4560 | return -EINVAL; |
4561 | } |
4562 | |
4563 | if (!t->name_off || |
4564 | !btf_name_valid_section(btf: env->btf, offset: t->name_off)) { |
4565 | btf_verifier_log_type(env, t, "Invalid name" ); |
4566 | return -EINVAL; |
4567 | } |
4568 | |
4569 | btf_verifier_log_type(env, t, NULL); |
4570 | |
4571 | for_each_vsi(i, t, vsi) { |
4572 | /* A var cannot be in type void */ |
4573 | if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) { |
4574 | btf_verifier_log_vsi(env, datasec_type: t, vsi, |
4575 | fmt: "Invalid type_id" ); |
4576 | return -EINVAL; |
4577 | } |
4578 | |
4579 | if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) { |
4580 | btf_verifier_log_vsi(env, datasec_type: t, vsi, |
4581 | fmt: "Invalid offset" ); |
4582 | return -EINVAL; |
4583 | } |
4584 | |
4585 | if (!vsi->size || vsi->size > t->size) { |
4586 | btf_verifier_log_vsi(env, datasec_type: t, vsi, |
4587 | fmt: "Invalid size" ); |
4588 | return -EINVAL; |
4589 | } |
4590 | |
4591 | last_vsi_end_off = vsi->offset + vsi->size; |
4592 | if (last_vsi_end_off > t->size) { |
4593 | btf_verifier_log_vsi(env, datasec_type: t, vsi, |
4594 | fmt: "Invalid offset+size" ); |
4595 | return -EINVAL; |
4596 | } |
4597 | |
4598 | btf_verifier_log_vsi(env, datasec_type: t, vsi, NULL); |
4599 | sum += vsi->size; |
4600 | } |
4601 | |
4602 | if (t->size < sum) { |
4603 | btf_verifier_log_type(env, t, "Invalid btf_info size" ); |
4604 | return -EINVAL; |
4605 | } |
4606 | |
4607 | return meta_needed; |
4608 | } |
4609 | |
4610 | static int btf_datasec_resolve(struct btf_verifier_env *env, |
4611 | const struct resolve_vertex *v) |
4612 | { |
4613 | const struct btf_var_secinfo *vsi; |
4614 | struct btf *btf = env->btf; |
4615 | u16 i; |
4616 | |
4617 | env->resolve_mode = RESOLVE_TBD; |
4618 | for_each_vsi_from(i, v->next_member, v->t, vsi) { |
4619 | u32 var_type_id = vsi->type, type_id, type_size = 0; |
4620 | const struct btf_type *var_type = btf_type_by_id(env->btf, |
4621 | var_type_id); |
4622 | if (!var_type || !btf_type_is_var(t: var_type)) { |
4623 | btf_verifier_log_vsi(env, datasec_type: v->t, vsi, |
4624 | fmt: "Not a VAR kind member" ); |
4625 | return -EINVAL; |
4626 | } |
4627 | |
4628 | if (!env_type_is_resolve_sink(env, next_type: var_type) && |
4629 | !env_type_is_resolved(env, type_id: var_type_id)) { |
4630 | env_stack_set_next_member(env, next_member: i + 1); |
4631 | return env_stack_push(env, t: var_type, type_id: var_type_id); |
4632 | } |
4633 | |
4634 | type_id = var_type->type; |
4635 | if (!btf_type_id_size(btf, type_id: &type_id, ret_size: &type_size)) { |
4636 | btf_verifier_log_vsi(env, datasec_type: v->t, vsi, fmt: "Invalid type" ); |
4637 | return -EINVAL; |
4638 | } |
4639 | |
4640 | if (vsi->size < type_size) { |
4641 | btf_verifier_log_vsi(env, datasec_type: v->t, vsi, fmt: "Invalid size" ); |
4642 | return -EINVAL; |
4643 | } |
4644 | } |
4645 | |
4646 | env_stack_pop_resolved(env, resolved_type_id: 0, resolved_size: 0); |
4647 | return 0; |
4648 | } |
4649 | |
4650 | static void btf_datasec_log(struct btf_verifier_env *env, |
4651 | const struct btf_type *t) |
4652 | { |
4653 | btf_verifier_log(env, fmt: "size=%u vlen=%u" , t->size, btf_type_vlen(t)); |
4654 | } |
4655 | |
4656 | static void btf_datasec_show(const struct btf *btf, |
4657 | const struct btf_type *t, u32 type_id, |
4658 | void *data, u8 bits_offset, |
4659 | struct btf_show *show) |
4660 | { |
4661 | const struct btf_var_secinfo *vsi; |
4662 | const struct btf_type *var; |
4663 | u32 i; |
4664 | |
4665 | if (!btf_show_start_type(show, t, type_id, data)) |
4666 | return; |
4667 | |
4668 | btf_show_type_value(show, "section (\"%s\") = {" , |
4669 | __btf_name_by_offset(btf, t->name_off)); |
4670 | for_each_vsi(i, t, vsi) { |
4671 | var = btf_type_by_id(btf, vsi->type); |
4672 | if (i) |
4673 | btf_show(show, fmt: "," ); |
4674 | btf_type_ops(t: var)->show(btf, var, vsi->type, |
4675 | data + vsi->offset, bits_offset, show); |
4676 | } |
4677 | btf_show_end_type(show); |
4678 | } |
4679 | |
4680 | static const struct btf_kind_operations datasec_ops = { |
4681 | .check_meta = btf_datasec_check_meta, |
4682 | .resolve = btf_datasec_resolve, |
4683 | .check_member = btf_df_check_member, |
4684 | .check_kflag_member = btf_df_check_kflag_member, |
4685 | .log_details = btf_datasec_log, |
4686 | .show = btf_datasec_show, |
4687 | }; |
4688 | |
4689 | static s32 btf_float_check_meta(struct btf_verifier_env *env, |
4690 | const struct btf_type *t, |
4691 | u32 meta_left) |
4692 | { |
4693 | if (btf_type_vlen(t)) { |
4694 | btf_verifier_log_type(env, t, "vlen != 0" ); |
4695 | return -EINVAL; |
4696 | } |
4697 | |
4698 | if (btf_type_kflag(t)) { |
4699 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
4700 | return -EINVAL; |
4701 | } |
4702 | |
4703 | if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 && |
4704 | t->size != 16) { |
4705 | btf_verifier_log_type(env, t, "Invalid type_size" ); |
4706 | return -EINVAL; |
4707 | } |
4708 | |
4709 | btf_verifier_log_type(env, t, NULL); |
4710 | |
4711 | return 0; |
4712 | } |
4713 | |
4714 | static int btf_float_check_member(struct btf_verifier_env *env, |
4715 | const struct btf_type *struct_type, |
4716 | const struct btf_member *member, |
4717 | const struct btf_type *member_type) |
4718 | { |
4719 | u64 start_offset_bytes; |
4720 | u64 end_offset_bytes; |
4721 | u64 misalign_bits; |
4722 | u64 align_bytes; |
4723 | u64 align_bits; |
4724 | |
4725 | /* Different architectures have different alignment requirements, so |
4726 | * here we check only for the reasonable minimum. This way we ensure |
4727 | * that types after CO-RE can pass the kernel BTF verifier. |
4728 | */ |
4729 | align_bytes = min_t(u64, sizeof(void *), member_type->size); |
4730 | align_bits = align_bytes * BITS_PER_BYTE; |
4731 | div64_u64_rem(dividend: member->offset, divisor: align_bits, remainder: &misalign_bits); |
4732 | if (misalign_bits) { |
4733 | btf_verifier_log_member(env, struct_type, member, |
4734 | fmt: "Member is not properly aligned" ); |
4735 | return -EINVAL; |
4736 | } |
4737 | |
4738 | start_offset_bytes = member->offset / BITS_PER_BYTE; |
4739 | end_offset_bytes = start_offset_bytes + member_type->size; |
4740 | if (end_offset_bytes > struct_type->size) { |
4741 | btf_verifier_log_member(env, struct_type, member, |
4742 | fmt: "Member exceeds struct_size" ); |
4743 | return -EINVAL; |
4744 | } |
4745 | |
4746 | return 0; |
4747 | } |
4748 | |
4749 | static void btf_float_log(struct btf_verifier_env *env, |
4750 | const struct btf_type *t) |
4751 | { |
4752 | btf_verifier_log(env, fmt: "size=%u" , t->size); |
4753 | } |
4754 | |
4755 | static const struct btf_kind_operations float_ops = { |
4756 | .check_meta = btf_float_check_meta, |
4757 | .resolve = btf_df_resolve, |
4758 | .check_member = btf_float_check_member, |
4759 | .check_kflag_member = btf_generic_check_kflag_member, |
4760 | .log_details = btf_float_log, |
4761 | .show = btf_df_show, |
4762 | }; |
4763 | |
4764 | static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env, |
4765 | const struct btf_type *t, |
4766 | u32 meta_left) |
4767 | { |
4768 | const struct btf_decl_tag *tag; |
4769 | u32 meta_needed = sizeof(*tag); |
4770 | s32 component_idx; |
4771 | const char *value; |
4772 | |
4773 | if (meta_left < meta_needed) { |
4774 | btf_verifier_log_basic(env, t, |
4775 | "meta_left:%u meta_needed:%u" , |
4776 | meta_left, meta_needed); |
4777 | return -EINVAL; |
4778 | } |
4779 | |
4780 | value = btf_name_by_offset(btf: env->btf, offset: t->name_off); |
4781 | if (!value || !value[0]) { |
4782 | btf_verifier_log_type(env, t, "Invalid value" ); |
4783 | return -EINVAL; |
4784 | } |
4785 | |
4786 | if (btf_type_vlen(t)) { |
4787 | btf_verifier_log_type(env, t, "vlen != 0" ); |
4788 | return -EINVAL; |
4789 | } |
4790 | |
4791 | if (btf_type_kflag(t)) { |
4792 | btf_verifier_log_type(env, t, "Invalid btf_info kind_flag" ); |
4793 | return -EINVAL; |
4794 | } |
4795 | |
4796 | component_idx = btf_type_decl_tag(t)->component_idx; |
4797 | if (component_idx < -1) { |
4798 | btf_verifier_log_type(env, t, "Invalid component_idx" ); |
4799 | return -EINVAL; |
4800 | } |
4801 | |
4802 | btf_verifier_log_type(env, t, NULL); |
4803 | |
4804 | return meta_needed; |
4805 | } |
4806 | |
4807 | static int btf_decl_tag_resolve(struct btf_verifier_env *env, |
4808 | const struct resolve_vertex *v) |
4809 | { |
4810 | const struct btf_type *next_type; |
4811 | const struct btf_type *t = v->t; |
4812 | u32 next_type_id = t->type; |
4813 | struct btf *btf = env->btf; |
4814 | s32 component_idx; |
4815 | u32 vlen; |
4816 | |
4817 | next_type = btf_type_by_id(btf, next_type_id); |
4818 | if (!next_type || !btf_type_is_decl_tag_target(t: next_type)) { |
4819 | btf_verifier_log_type(env, v->t, "Invalid type_id" ); |
4820 | return -EINVAL; |
4821 | } |
4822 | |
4823 | if (!env_type_is_resolve_sink(env, next_type) && |
4824 | !env_type_is_resolved(env, type_id: next_type_id)) |
4825 | return env_stack_push(env, t: next_type, type_id: next_type_id); |
4826 | |
4827 | component_idx = btf_type_decl_tag(t)->component_idx; |
4828 | if (component_idx != -1) { |
4829 | if (btf_type_is_var(t: next_type) || btf_type_is_typedef(t: next_type)) { |
4830 | btf_verifier_log_type(env, v->t, "Invalid component_idx" ); |
4831 | return -EINVAL; |
4832 | } |
4833 | |
4834 | if (btf_type_is_struct(t: next_type)) { |
4835 | vlen = btf_type_vlen(t: next_type); |
4836 | } else { |
4837 | /* next_type should be a function */ |
4838 | next_type = btf_type_by_id(btf, next_type->type); |
4839 | vlen = btf_type_vlen(t: next_type); |
4840 | } |
4841 | |
4842 | if ((u32)component_idx >= vlen) { |
4843 | btf_verifier_log_type(env, v->t, "Invalid component_idx" ); |
4844 | return -EINVAL; |
4845 | } |
4846 | } |
4847 | |
4848 | env_stack_pop_resolved(env, resolved_type_id: next_type_id, resolved_size: 0); |
4849 | |
4850 | return 0; |
4851 | } |
4852 | |
4853 | static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t) |
4854 | { |
4855 | btf_verifier_log(env, fmt: "type=%u component_idx=%d" , t->type, |
4856 | btf_type_decl_tag(t)->component_idx); |
4857 | } |
4858 | |
4859 | static const struct btf_kind_operations decl_tag_ops = { |
4860 | .check_meta = btf_decl_tag_check_meta, |
4861 | .resolve = btf_decl_tag_resolve, |
4862 | .check_member = btf_df_check_member, |
4863 | .check_kflag_member = btf_df_check_kflag_member, |
4864 | .log_details = btf_decl_tag_log, |
4865 | .show = btf_df_show, |
4866 | }; |
4867 | |
4868 | static int btf_func_proto_check(struct btf_verifier_env *env, |
4869 | const struct btf_type *t) |
4870 | { |
4871 | const struct btf_type *ret_type; |
4872 | const struct btf_param *args; |
4873 | const struct btf *btf; |
4874 | u16 nr_args, i; |
4875 | int err; |
4876 | |
4877 | btf = env->btf; |
4878 | args = (const struct btf_param *)(t + 1); |
4879 | nr_args = btf_type_vlen(t); |
4880 | |
4881 | /* Check func return type which could be "void" (t->type == 0) */ |
4882 | if (t->type) { |
4883 | u32 ret_type_id = t->type; |
4884 | |
4885 | ret_type = btf_type_by_id(btf, ret_type_id); |
4886 | if (!ret_type) { |
4887 | btf_verifier_log_type(env, t, "Invalid return type" ); |
4888 | return -EINVAL; |
4889 | } |
4890 | |
4891 | if (btf_type_is_resolve_source_only(t: ret_type)) { |
4892 | btf_verifier_log_type(env, t, "Invalid return type" ); |
4893 | return -EINVAL; |
4894 | } |
4895 | |
4896 | if (btf_type_needs_resolve(t: ret_type) && |
4897 | !env_type_is_resolved(env, type_id: ret_type_id)) { |
4898 | err = btf_resolve(env, t: ret_type, type_id: ret_type_id); |
4899 | if (err) |
4900 | return err; |
4901 | } |
4902 | |
4903 | /* Ensure the return type is a type that has a size */ |
4904 | if (!btf_type_id_size(btf, type_id: &ret_type_id, NULL)) { |
4905 | btf_verifier_log_type(env, t, "Invalid return type" ); |
4906 | return -EINVAL; |
4907 | } |
4908 | } |
4909 | |
4910 | if (!nr_args) |
4911 | return 0; |
4912 | |
4913 | /* Last func arg type_id could be 0 if it is a vararg */ |
4914 | if (!args[nr_args - 1].type) { |
4915 | if (args[nr_args - 1].name_off) { |
4916 | btf_verifier_log_type(env, t, "Invalid arg#%u" , |
4917 | nr_args); |
4918 | return -EINVAL; |
4919 | } |
4920 | nr_args--; |
4921 | } |
4922 | |
4923 | for (i = 0; i < nr_args; i++) { |
4924 | const struct btf_type *arg_type; |
4925 | u32 arg_type_id; |
4926 | |
4927 | arg_type_id = args[i].type; |
4928 | arg_type = btf_type_by_id(btf, arg_type_id); |
4929 | if (!arg_type) { |
4930 | btf_verifier_log_type(env, t, "Invalid arg#%u" , i + 1); |
4931 | return -EINVAL; |
4932 | } |
4933 | |
4934 | if (btf_type_is_resolve_source_only(t: arg_type)) { |
4935 | btf_verifier_log_type(env, t, "Invalid arg#%u" , i + 1); |
4936 | return -EINVAL; |
4937 | } |
4938 | |
4939 | if (args[i].name_off && |
4940 | (!btf_name_offset_valid(btf, offset: args[i].name_off) || |
4941 | !btf_name_valid_identifier(btf, offset: args[i].name_off))) { |
4942 | btf_verifier_log_type(env, t, |
4943 | "Invalid arg#%u" , i + 1); |
4944 | return -EINVAL; |
4945 | } |
4946 | |
4947 | if (btf_type_needs_resolve(t: arg_type) && |
4948 | !env_type_is_resolved(env, type_id: arg_type_id)) { |
4949 | err = btf_resolve(env, t: arg_type, type_id: arg_type_id); |
4950 | if (err) |
4951 | return err; |
4952 | } |
4953 | |
4954 | if (!btf_type_id_size(btf, type_id: &arg_type_id, NULL)) { |
4955 | btf_verifier_log_type(env, t, "Invalid arg#%u" , i + 1); |
4956 | return -EINVAL; |
4957 | } |
4958 | } |
4959 | |
4960 | return 0; |
4961 | } |
4962 | |
4963 | static int btf_func_check(struct btf_verifier_env *env, |
4964 | const struct btf_type *t) |
4965 | { |
4966 | const struct btf_type *proto_type; |
4967 | const struct btf_param *args; |
4968 | const struct btf *btf; |
4969 | u16 nr_args, i; |
4970 | |
4971 | btf = env->btf; |
4972 | proto_type = btf_type_by_id(btf, t->type); |
4973 | |
4974 | if (!proto_type || !btf_type_is_func_proto(t: proto_type)) { |
4975 | btf_verifier_log_type(env, t, "Invalid type_id" ); |
4976 | return -EINVAL; |
4977 | } |
4978 | |
4979 | args = (const struct btf_param *)(proto_type + 1); |
4980 | nr_args = btf_type_vlen(t: proto_type); |
4981 | for (i = 0; i < nr_args; i++) { |
4982 | if (!args[i].name_off && args[i].type) { |
4983 | btf_verifier_log_type(env, t, "Invalid arg#%u" , i + 1); |
4984 | return -EINVAL; |
4985 | } |
4986 | } |
4987 | |
4988 | return 0; |
4989 | } |
4990 | |
4991 | static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = { |
4992 | [BTF_KIND_INT] = &int_ops, |
4993 | [BTF_KIND_PTR] = &ptr_ops, |
4994 | [BTF_KIND_ARRAY] = &array_ops, |
4995 | [BTF_KIND_STRUCT] = &struct_ops, |
4996 | [BTF_KIND_UNION] = &struct_ops, |
4997 | [BTF_KIND_ENUM] = &enum_ops, |
4998 | [BTF_KIND_FWD] = &fwd_ops, |
4999 | [BTF_KIND_TYPEDEF] = &modifier_ops, |
5000 | [BTF_KIND_VOLATILE] = &modifier_ops, |
5001 | [BTF_KIND_CONST] = &modifier_ops, |
5002 | [BTF_KIND_RESTRICT] = &modifier_ops, |
5003 | [BTF_KIND_FUNC] = &func_ops, |
5004 | [BTF_KIND_FUNC_PROTO] = &func_proto_ops, |
5005 | [BTF_KIND_VAR] = &var_ops, |
5006 | [BTF_KIND_DATASEC] = &datasec_ops, |
5007 | [BTF_KIND_FLOAT] = &float_ops, |
5008 | [BTF_KIND_DECL_TAG] = &decl_tag_ops, |
5009 | [BTF_KIND_TYPE_TAG] = &modifier_ops, |
5010 | [BTF_KIND_ENUM64] = &enum64_ops, |
5011 | }; |
5012 | |
5013 | static s32 btf_check_meta(struct btf_verifier_env *env, |
5014 | const struct btf_type *t, |
5015 | u32 meta_left) |
5016 | { |
5017 | u32 saved_meta_left = meta_left; |
5018 | s32 var_meta_size; |
5019 | |
5020 | if (meta_left < sizeof(*t)) { |
5021 | btf_verifier_log(env, fmt: "[%u] meta_left:%u meta_needed:%zu" , |
5022 | env->log_type_id, meta_left, sizeof(*t)); |
5023 | return -EINVAL; |
5024 | } |
5025 | meta_left -= sizeof(*t); |
5026 | |
5027 | if (t->info & ~BTF_INFO_MASK) { |
5028 | btf_verifier_log(env, fmt: "[%u] Invalid btf_info:%x" , |
5029 | env->log_type_id, t->info); |
5030 | return -EINVAL; |
5031 | } |
5032 | |
5033 | if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX || |
5034 | BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) { |
5035 | btf_verifier_log(env, fmt: "[%u] Invalid kind:%u" , |
5036 | env->log_type_id, BTF_INFO_KIND(t->info)); |
5037 | return -EINVAL; |
5038 | } |
5039 | |
5040 | if (!btf_name_offset_valid(btf: env->btf, offset: t->name_off)) { |
5041 | btf_verifier_log(env, fmt: "[%u] Invalid name_offset:%u" , |
5042 | env->log_type_id, t->name_off); |
5043 | return -EINVAL; |
5044 | } |
5045 | |
5046 | var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left); |
5047 | if (var_meta_size < 0) |
5048 | return var_meta_size; |
5049 | |
5050 | meta_left -= var_meta_size; |
5051 | |
5052 | return saved_meta_left - meta_left; |
5053 | } |
5054 | |
5055 | static int btf_check_all_metas(struct btf_verifier_env *env) |
5056 | { |
5057 | struct btf *btf = env->btf; |
5058 | struct btf_header *hdr; |
5059 | void *cur, *end; |
5060 | |
5061 | hdr = &btf->hdr; |
5062 | cur = btf->nohdr_data + hdr->type_off; |
5063 | end = cur + hdr->type_len; |
5064 | |
5065 | env->log_type_id = btf->base_btf ? btf->start_id : 1; |
5066 | while (cur < end) { |
5067 | struct btf_type *t = cur; |
5068 | s32 meta_size; |
5069 | |
5070 | meta_size = btf_check_meta(env, t, meta_left: end - cur); |
5071 | if (meta_size < 0) |
5072 | return meta_size; |
5073 | |
5074 | btf_add_type(env, t); |
5075 | cur += meta_size; |
5076 | env->log_type_id++; |
5077 | } |
5078 | |
5079 | return 0; |
5080 | } |
5081 | |
5082 | static bool btf_resolve_valid(struct btf_verifier_env *env, |
5083 | const struct btf_type *t, |
5084 | u32 type_id) |
5085 | { |
5086 | struct btf *btf = env->btf; |
5087 | |
5088 | if (!env_type_is_resolved(env, type_id)) |
5089 | return false; |
5090 | |
5091 | if (btf_type_is_struct(t) || btf_type_is_datasec(t)) |
5092 | return !btf_resolved_type_id(btf, type_id) && |
5093 | !btf_resolved_type_size(btf, type_id); |
5094 | |
5095 | if (btf_type_is_decl_tag(t) || btf_type_is_func(t)) |
5096 | return btf_resolved_type_id(btf, type_id) && |
5097 | !btf_resolved_type_size(btf, type_id); |
5098 | |
5099 | if (btf_type_is_modifier(t) || btf_type_is_ptr(t) || |
5100 | btf_type_is_var(t)) { |
5101 | t = btf_type_id_resolve(btf, type_id: &type_id); |
5102 | return t && |
5103 | !btf_type_is_modifier(t) && |
5104 | !btf_type_is_var(t) && |
5105 | !btf_type_is_datasec(t); |
5106 | } |
5107 | |
5108 | if (btf_type_is_array(t)) { |
5109 | const struct btf_array *array = btf_type_array(t); |
5110 | const struct btf_type *elem_type; |
5111 | u32 elem_type_id = array->type; |
5112 | u32 elem_size; |
5113 | |
5114 | elem_type = btf_type_id_size(btf, type_id: &elem_type_id, ret_size: &elem_size); |
5115 | return elem_type && !btf_type_is_modifier(t: elem_type) && |
5116 | (array->nelems * elem_size == |
5117 | btf_resolved_type_size(btf, type_id)); |
5118 | } |
5119 | |
5120 | return false; |
5121 | } |
5122 | |
5123 | static int btf_resolve(struct btf_verifier_env *env, |
5124 | const struct btf_type *t, u32 type_id) |
5125 | { |
5126 | u32 save_log_type_id = env->log_type_id; |
5127 | const struct resolve_vertex *v; |
5128 | int err = 0; |
5129 | |
5130 | env->resolve_mode = RESOLVE_TBD; |
5131 | env_stack_push(env, t, type_id); |
5132 | while (!err && (v = env_stack_peak(env))) { |
5133 | env->log_type_id = v->type_id; |
5134 | err = btf_type_ops(t: v->t)->resolve(env, v); |
5135 | } |
5136 | |
5137 | env->log_type_id = type_id; |
5138 | if (err == -E2BIG) { |
5139 | btf_verifier_log_type(env, t, |
5140 | "Exceeded max resolving depth:%u" , |
5141 | MAX_RESOLVE_DEPTH); |
5142 | } else if (err == -EEXIST) { |
5143 | btf_verifier_log_type(env, t, "Loop detected" ); |
5144 | } |
5145 | |
5146 | /* Final sanity check */ |
5147 | if (!err && !btf_resolve_valid(env, t, type_id)) { |
5148 | btf_verifier_log_type(env, t, "Invalid resolve state" ); |
5149 | err = -EINVAL; |
5150 | } |
5151 | |
5152 | env->log_type_id = save_log_type_id; |
5153 | return err; |
5154 | } |
5155 | |
5156 | static int btf_check_all_types(struct btf_verifier_env *env) |
5157 | { |
5158 | struct btf *btf = env->btf; |
5159 | const struct btf_type *t; |
5160 | u32 type_id, i; |
5161 | int err; |
5162 | |
5163 | err = env_resolve_init(env); |
5164 | if (err) |
5165 | return err; |
5166 | |
5167 | env->phase++; |
5168 | for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) { |
5169 | type_id = btf->start_id + i; |
5170 | t = btf_type_by_id(btf, type_id); |
5171 | |
5172 | env->log_type_id = type_id; |
5173 | if (btf_type_needs_resolve(t) && |
5174 | !env_type_is_resolved(env, type_id)) { |
5175 | err = btf_resolve(env, t, type_id); |
5176 | if (err) |
5177 | return err; |
5178 | } |
5179 | |
5180 | if (btf_type_is_func_proto(t)) { |
5181 | err = btf_func_proto_check(env, t); |
5182 | if (err) |
5183 | return err; |
5184 | } |
5185 | } |
5186 | |
5187 | return 0; |
5188 | } |
5189 | |
5190 | static int btf_parse_type_sec(struct btf_verifier_env *env) |
5191 | { |
5192 | const struct btf_header *hdr = &env->btf->hdr; |
5193 | int err; |
5194 | |
5195 | /* Type section must align to 4 bytes */ |
5196 | if (hdr->type_off & (sizeof(u32) - 1)) { |
5197 | btf_verifier_log(env, fmt: "Unaligned type_off" ); |
5198 | return -EINVAL; |
5199 | } |
5200 | |
5201 | if (!env->btf->base_btf && !hdr->type_len) { |
5202 | btf_verifier_log(env, fmt: "No type found" ); |
5203 | return -EINVAL; |
5204 | } |
5205 | |
5206 | err = btf_check_all_metas(env); |
5207 | if (err) |
5208 | return err; |
5209 | |
5210 | return btf_check_all_types(env); |
5211 | } |
5212 | |
5213 | static int btf_parse_str_sec(struct btf_verifier_env *env) |
5214 | { |
5215 | const struct btf_header *hdr; |
5216 | struct btf *btf = env->btf; |
5217 | const char *start, *end; |
5218 | |
5219 | hdr = &btf->hdr; |
5220 | start = btf->nohdr_data + hdr->str_off; |
5221 | end = start + hdr->str_len; |
5222 | |
5223 | if (end != btf->data + btf->data_size) { |
5224 | btf_verifier_log(env, fmt: "String section is not at the end" ); |
5225 | return -EINVAL; |
5226 | } |
5227 | |
5228 | btf->strings = start; |
5229 | |
5230 | if (btf->base_btf && !hdr->str_len) |
5231 | return 0; |
5232 | if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) { |
5233 | btf_verifier_log(env, fmt: "Invalid string section" ); |
5234 | return -EINVAL; |
5235 | } |
5236 | if (!btf->base_btf && start[0]) { |
5237 | btf_verifier_log(env, fmt: "Invalid string section" ); |
5238 | return -EINVAL; |
5239 | } |
5240 | |
5241 | return 0; |
5242 | } |
5243 | |
5244 | static const size_t btf_sec_info_offset[] = { |
5245 | offsetof(struct btf_header, type_off), |
5246 | offsetof(struct btf_header, str_off), |
5247 | }; |
5248 | |
5249 | static int btf_sec_info_cmp(const void *a, const void *b) |
5250 | { |
5251 | const struct btf_sec_info *x = a; |
5252 | const struct btf_sec_info *y = b; |
5253 | |
5254 | return (int)(x->off - y->off) ? : (int)(x->len - y->len); |
5255 | } |
5256 | |
5257 | static int btf_check_sec_info(struct btf_verifier_env *env, |
5258 | u32 btf_data_size) |
5259 | { |
5260 | struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)]; |
5261 | u32 total, expected_total, i; |
5262 | const struct btf_header *hdr; |
5263 | const struct btf *btf; |
5264 | |
5265 | btf = env->btf; |
5266 | hdr = &btf->hdr; |
5267 | |
5268 | /* Populate the secs from hdr */ |
5269 | for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) |
5270 | secs[i] = *(struct btf_sec_info *)((void *)hdr + |
5271 | btf_sec_info_offset[i]); |
5272 | |
5273 | sort(base: secs, ARRAY_SIZE(btf_sec_info_offset), |
5274 | size: sizeof(struct btf_sec_info), cmp_func: btf_sec_info_cmp, NULL); |
5275 | |
5276 | /* Check for gaps and overlap among sections */ |
5277 | total = 0; |
5278 | expected_total = btf_data_size - hdr->hdr_len; |
5279 | for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) { |
5280 | if (expected_total < secs[i].off) { |
5281 | btf_verifier_log(env, fmt: "Invalid section offset" ); |
5282 | return -EINVAL; |
5283 | } |
5284 | if (total < secs[i].off) { |
5285 | /* gap */ |
5286 | btf_verifier_log(env, fmt: "Unsupported section found" ); |
5287 | return -EINVAL; |
5288 | } |
5289 | if (total > secs[i].off) { |
5290 | btf_verifier_log(env, fmt: "Section overlap found" ); |
5291 | return -EINVAL; |
5292 | } |
5293 | if (expected_total - total < secs[i].len) { |
5294 | btf_verifier_log(env, |
5295 | fmt: "Total section length too long" ); |
5296 | return -EINVAL; |
5297 | } |
5298 | total += secs[i].len; |
5299 | } |
5300 | |
5301 | /* There is data other than hdr and known sections */ |
5302 | if (expected_total != total) { |
5303 | btf_verifier_log(env, fmt: "Unsupported section found" ); |
5304 | return -EINVAL; |
5305 | } |
5306 | |
5307 | return 0; |
5308 | } |
5309 | |
5310 | static int btf_parse_hdr(struct btf_verifier_env *env) |
5311 | { |
5312 | u32 hdr_len, hdr_copy, btf_data_size; |
5313 | const struct btf_header *hdr; |
5314 | struct btf *btf; |
5315 | |
5316 | btf = env->btf; |
5317 | btf_data_size = btf->data_size; |
5318 | |
5319 | if (btf_data_size < offsetofend(struct btf_header, hdr_len)) { |
5320 | btf_verifier_log(env, fmt: "hdr_len not found" ); |
5321 | return -EINVAL; |
5322 | } |
5323 | |
5324 | hdr = btf->data; |
5325 | hdr_len = hdr->hdr_len; |
5326 | if (btf_data_size < hdr_len) { |
5327 | btf_verifier_log(env, fmt: "btf_header not found" ); |
5328 | return -EINVAL; |
5329 | } |
5330 | |
5331 | /* Ensure the unsupported header fields are zero */ |
5332 | if (hdr_len > sizeof(btf->hdr)) { |
5333 | u8 *expected_zero = btf->data + sizeof(btf->hdr); |
5334 | u8 *end = btf->data + hdr_len; |
5335 | |
5336 | for (; expected_zero < end; expected_zero++) { |
5337 | if (*expected_zero) { |
5338 | btf_verifier_log(env, fmt: "Unsupported btf_header" ); |
5339 | return -E2BIG; |
5340 | } |
5341 | } |
5342 | } |
5343 | |
5344 | hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr)); |
5345 | memcpy(&btf->hdr, btf->data, hdr_copy); |
5346 | |
5347 | hdr = &btf->hdr; |
5348 | |
5349 | btf_verifier_log_hdr(env, btf_data_size); |
5350 | |
5351 | if (hdr->magic != BTF_MAGIC) { |
5352 | btf_verifier_log(env, fmt: "Invalid magic" ); |
5353 | return -EINVAL; |
5354 | } |
5355 | |
5356 | if (hdr->version != BTF_VERSION) { |
5357 | btf_verifier_log(env, fmt: "Unsupported version" ); |
5358 | return -ENOTSUPP; |
5359 | } |
5360 | |
5361 | if (hdr->flags) { |
5362 | btf_verifier_log(env, fmt: "Unsupported flags" ); |
5363 | return -ENOTSUPP; |
5364 | } |
5365 | |
5366 | if (!btf->base_btf && btf_data_size == hdr->hdr_len) { |
5367 | btf_verifier_log(env, fmt: "No data" ); |
5368 | return -EINVAL; |
5369 | } |
5370 | |
5371 | return btf_check_sec_info(env, btf_data_size); |
5372 | } |
5373 | |
5374 | static const char *alloc_obj_fields[] = { |
5375 | "bpf_spin_lock" , |
5376 | "bpf_list_head" , |
5377 | "bpf_list_node" , |
5378 | "bpf_rb_root" , |
5379 | "bpf_rb_node" , |
5380 | "bpf_refcount" , |
5381 | }; |
5382 | |
5383 | static struct btf_struct_metas * |
5384 | btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf) |
5385 | { |
5386 | union { |
5387 | struct btf_id_set set; |
5388 | struct { |
5389 | u32 _cnt; |
5390 | u32 _ids[ARRAY_SIZE(alloc_obj_fields)]; |
5391 | } _arr; |
5392 | } aof; |
5393 | struct btf_struct_metas *tab = NULL; |
5394 | int i, n, id, ret; |
5395 | |
5396 | BUILD_BUG_ON(offsetof(struct btf_id_set, cnt) != 0); |
5397 | BUILD_BUG_ON(sizeof(struct btf_id_set) != sizeof(u32)); |
5398 | |
5399 | memset(&aof, 0, sizeof(aof)); |
5400 | for (i = 0; i < ARRAY_SIZE(alloc_obj_fields); i++) { |
5401 | /* Try to find whether this special type exists in user BTF, and |
5402 | * if so remember its ID so we can easily find it among members |
5403 | * of structs that we iterate in the next loop. |
5404 | */ |
5405 | id = btf_find_by_name_kind(btf, name: alloc_obj_fields[i], kind: BTF_KIND_STRUCT); |
5406 | if (id < 0) |
5407 | continue; |
5408 | aof.set.ids[aof.set.cnt++] = id; |
5409 | } |
5410 | |
5411 | if (!aof.set.cnt) |
5412 | return NULL; |
5413 | sort(base: &aof.set.ids, num: aof.set.cnt, size: sizeof(aof.set.ids[0]), cmp_func: btf_id_cmp_func, NULL); |
5414 | |
5415 | n = btf_nr_types(btf); |
5416 | for (i = 1; i < n; i++) { |
5417 | struct btf_struct_metas *new_tab; |
5418 | const struct btf_member *member; |
5419 | struct btf_struct_meta *type; |
5420 | struct btf_record *record; |
5421 | const struct btf_type *t; |
5422 | int j, tab_cnt; |
5423 | |
5424 | t = btf_type_by_id(btf, i); |
5425 | if (!t) { |
5426 | ret = -EINVAL; |
5427 | goto free; |
5428 | } |
5429 | if (!__btf_type_is_struct(t)) |
5430 | continue; |
5431 | |
5432 | cond_resched(); |
5433 | |
5434 | for_each_member(j, t, member) { |
5435 | if (btf_id_set_contains(set: &aof.set, id: member->type)) |
5436 | goto parse; |
5437 | } |
5438 | continue; |
5439 | parse: |
5440 | tab_cnt = tab ? tab->cnt : 0; |
5441 | new_tab = krealloc(objp: tab, offsetof(struct btf_struct_metas, types[tab_cnt + 1]), |
5442 | GFP_KERNEL | __GFP_NOWARN); |
5443 | if (!new_tab) { |
5444 | ret = -ENOMEM; |
5445 | goto free; |
5446 | } |
5447 | if (!tab) |
5448 | new_tab->cnt = 0; |
5449 | tab = new_tab; |
5450 | |
5451 | type = &tab->types[tab->cnt]; |
5452 | type->btf_id = i; |
5453 | record = btf_parse_fields(btf, t, field_mask: BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE | |
5454 | BPF_RB_ROOT | BPF_RB_NODE | BPF_REFCOUNT, value_size: t->size); |
5455 | /* The record cannot be unset, treat it as an error if so */ |
5456 | if (IS_ERR_OR_NULL(ptr: record)) { |
5457 | ret = PTR_ERR_OR_ZERO(ptr: record) ?: -EFAULT; |
5458 | goto free; |
5459 | } |
5460 | type->record = record; |
5461 | tab->cnt++; |
5462 | } |
5463 | return tab; |
5464 | free: |
5465 | btf_struct_metas_free(tab); |
5466 | return ERR_PTR(error: ret); |
5467 | } |
5468 | |
5469 | struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id) |
5470 | { |
5471 | struct btf_struct_metas *tab; |
5472 | |
5473 | BUILD_BUG_ON(offsetof(struct btf_struct_meta, btf_id) != 0); |
5474 | tab = btf->struct_meta_tab; |
5475 | if (!tab) |
5476 | return NULL; |
5477 | return bsearch(key: &btf_id, base: tab->types, num: tab->cnt, size: sizeof(tab->types[0]), cmp: btf_id_cmp_func); |
5478 | } |
5479 | |
5480 | static int btf_check_type_tags(struct btf_verifier_env *env, |
5481 | struct btf *btf, int start_id) |
5482 | { |
5483 | int i, n, good_id = start_id - 1; |
5484 | bool in_tags; |
5485 | |
5486 | n = btf_nr_types(btf); |
5487 | for (i = start_id; i < n; i++) { |
5488 | const struct btf_type *t; |
5489 | int chain_limit = 32; |
5490 | u32 cur_id = i; |
5491 | |
5492 | t = btf_type_by_id(btf, i); |
5493 | if (!t) |
5494 | return -EINVAL; |
5495 | if (!btf_type_is_modifier(t)) |
5496 | continue; |
5497 | |
5498 | cond_resched(); |
5499 | |
5500 | in_tags = btf_type_is_type_tag(t); |
5501 | while (btf_type_is_modifier(t)) { |
5502 | if (!chain_limit--) { |
5503 | btf_verifier_log(env, fmt: "Max chain length or cycle detected" ); |
5504 | return -ELOOP; |
5505 | } |
5506 | if (btf_type_is_type_tag(t)) { |
5507 | if (!in_tags) { |
5508 | btf_verifier_log(env, fmt: "Type tags don't precede modifiers" ); |
5509 | return -EINVAL; |
5510 | } |
5511 | } else if (in_tags) { |
5512 | in_tags = false; |
5513 | } |
5514 | if (cur_id <= good_id) |
5515 | break; |
5516 | /* Move to next type */ |
5517 | cur_id = t->type; |
5518 | t = btf_type_by_id(btf, cur_id); |
5519 | if (!t) |
5520 | return -EINVAL; |
5521 | } |
5522 | good_id = i; |
5523 | } |
5524 | return 0; |
5525 | } |
5526 | |
5527 | static int finalize_log(struct bpf_verifier_log *log, bpfptr_t uattr, u32 uattr_size) |
5528 | { |
5529 | u32 log_true_size; |
5530 | int err; |
5531 | |
5532 | err = bpf_vlog_finalize(log, log_size_actual: &log_true_size); |
5533 | |
5534 | if (uattr_size >= offsetofend(union bpf_attr, btf_log_true_size) && |
5535 | copy_to_bpfptr_offset(dst: uattr, offsetof(union bpf_attr, btf_log_true_size), |
5536 | src: &log_true_size, size: sizeof(log_true_size))) |
5537 | err = -EFAULT; |
5538 | |
5539 | return err; |
5540 | } |
5541 | |
5542 | static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) |
5543 | { |
5544 | bpfptr_t btf_data = make_bpfptr(addr: attr->btf, is_kernel: uattr.is_kernel); |
5545 | char __user *log_ubuf = u64_to_user_ptr(attr->btf_log_buf); |
5546 | struct btf_struct_metas *struct_meta_tab; |
5547 | struct btf_verifier_env *env = NULL; |
5548 | struct btf *btf = NULL; |
5549 | u8 *data; |
5550 | int err, ret; |
5551 | |
5552 | if (attr->btf_size > BTF_MAX_SIZE) |
5553 | return ERR_PTR(error: -E2BIG); |
5554 | |
5555 | env = kzalloc(size: sizeof(*env), GFP_KERNEL | __GFP_NOWARN); |
5556 | if (!env) |
5557 | return ERR_PTR(error: -ENOMEM); |
5558 | |
5559 | /* user could have requested verbose verifier output |
5560 | * and supplied buffer to store the verification trace |
5561 | */ |
5562 | err = bpf_vlog_init(log: &env->log, log_level: attr->btf_log_level, |
5563 | log_buf: log_ubuf, log_size: attr->btf_log_size); |
5564 | if (err) |
5565 | goto errout_free; |
5566 | |
5567 | btf = kzalloc(size: sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); |
5568 | if (!btf) { |
5569 | err = -ENOMEM; |
5570 | goto errout; |
5571 | } |
5572 | env->btf = btf; |
5573 | |
5574 | data = kvmalloc(size: attr->btf_size, GFP_KERNEL | __GFP_NOWARN); |
5575 | if (!data) { |
5576 | err = -ENOMEM; |
5577 | goto errout; |
5578 | } |
5579 | |
5580 | btf->data = data; |
5581 | btf->data_size = attr->btf_size; |
5582 | |
5583 | if (copy_from_bpfptr(dst: data, src: btf_data, size: attr->btf_size)) { |
5584 | err = -EFAULT; |
5585 | goto errout; |
5586 | } |
5587 | |
5588 | err = btf_parse_hdr(env); |
5589 | if (err) |
5590 | goto errout; |
5591 | |
5592 | btf->nohdr_data = btf->data + btf->hdr.hdr_len; |
5593 | |
5594 | err = btf_parse_str_sec(env); |
5595 | if (err) |
5596 | goto errout; |
5597 | |
5598 | err = btf_parse_type_sec(env); |
5599 | if (err) |
5600 | goto errout; |
5601 | |
5602 | err = btf_check_type_tags(env, btf, start_id: 1); |
5603 | if (err) |
5604 | goto errout; |
5605 | |
5606 | struct_meta_tab = btf_parse_struct_metas(log: &env->log, btf); |
5607 | if (IS_ERR(ptr: struct_meta_tab)) { |
5608 | err = PTR_ERR(ptr: struct_meta_tab); |
5609 | goto errout; |
5610 | } |
5611 | btf->struct_meta_tab = struct_meta_tab; |
5612 | |
5613 | if (struct_meta_tab) { |
5614 | int i; |
5615 | |
5616 | for (i = 0; i < struct_meta_tab->cnt; i++) { |
5617 | err = btf_check_and_fixup_fields(btf, rec: struct_meta_tab->types[i].record); |
5618 | if (err < 0) |
5619 | goto errout_meta; |
5620 | } |
5621 | } |
5622 | |
5623 | err = finalize_log(log: &env->log, uattr, uattr_size); |
5624 | if (err) |
5625 | goto errout_free; |
5626 | |
5627 | btf_verifier_env_free(env); |
5628 | refcount_set(r: &btf->refcnt, n: 1); |
5629 | return btf; |
5630 | |
5631 | errout_meta: |
5632 | btf_free_struct_meta_tab(btf); |
5633 | errout: |
5634 | /* overwrite err with -ENOSPC or -EFAULT */ |
5635 | ret = finalize_log(log: &env->log, uattr, uattr_size); |
5636 | if (ret) |
5637 | err = ret; |
5638 | errout_free: |
5639 | btf_verifier_env_free(env); |
5640 | if (btf) |
5641 | btf_free(btf); |
5642 | return ERR_PTR(error: err); |
5643 | } |
5644 | |
5645 | extern char __weak __start_BTF[]; |
5646 | extern char __weak __stop_BTF[]; |
5647 | extern struct btf *btf_vmlinux; |
5648 | |
5649 | #define BPF_MAP_TYPE(_id, _ops) |
5650 | #define BPF_LINK_TYPE(_id, _name) |
5651 | static union { |
5652 | struct bpf_ctx_convert { |
5653 | #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ |
5654 | prog_ctx_type _id##_prog; \ |
5655 | kern_ctx_type _id##_kern; |
5656 | #include <linux/bpf_types.h> |
5657 | #undef BPF_PROG_TYPE |
5658 | } *__t; |
5659 | /* 't' is written once under lock. Read many times. */ |
5660 | const struct btf_type *t; |
5661 | } bpf_ctx_convert; |
5662 | enum { |
5663 | #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ |
5664 | __ctx_convert##_id, |
5665 | #include <linux/bpf_types.h> |
5666 | #undef BPF_PROG_TYPE |
5667 | __ctx_convert_unused, /* to avoid empty enum in extreme .config */ |
5668 | }; |
5669 | static u8 bpf_ctx_convert_map[] = { |
5670 | #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ |
5671 | [_id] = __ctx_convert##_id, |
5672 | #include <linux/bpf_types.h> |
5673 | #undef BPF_PROG_TYPE |
5674 | 0, /* avoid empty array */ |
5675 | }; |
5676 | #undef BPF_MAP_TYPE |
5677 | #undef BPF_LINK_TYPE |
5678 | |
5679 | static const struct btf_type *find_canonical_prog_ctx_type(enum bpf_prog_type prog_type) |
5680 | { |
5681 | const struct btf_type *conv_struct; |
5682 | const struct btf_member *ctx_type; |
5683 | |
5684 | conv_struct = bpf_ctx_convert.t; |
5685 | if (!conv_struct) |
5686 | return NULL; |
5687 | /* prog_type is valid bpf program type. No need for bounds check. */ |
5688 | ctx_type = btf_type_member(t: conv_struct) + bpf_ctx_convert_map[prog_type] * 2; |
5689 | /* ctx_type is a pointer to prog_ctx_type in vmlinux. |
5690 | * Like 'struct __sk_buff' |
5691 | */ |
5692 | return btf_type_by_id(btf_vmlinux, ctx_type->type); |
5693 | } |
5694 | |
5695 | static int find_kern_ctx_type_id(enum bpf_prog_type prog_type) |
5696 | { |
5697 | const struct btf_type *conv_struct; |
5698 | const struct btf_member *ctx_type; |
5699 | |
5700 | conv_struct = bpf_ctx_convert.t; |
5701 | if (!conv_struct) |
5702 | return -EFAULT; |
5703 | /* prog_type is valid bpf program type. No need for bounds check. */ |
5704 | ctx_type = btf_type_member(t: conv_struct) + bpf_ctx_convert_map[prog_type] * 2 + 1; |
5705 | /* ctx_type is a pointer to prog_ctx_type in vmlinux. |
5706 | * Like 'struct sk_buff' |
5707 | */ |
5708 | return ctx_type->type; |
5709 | } |
5710 | |
5711 | bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf, |
5712 | const struct btf_type *t, enum bpf_prog_type prog_type, |
5713 | int arg) |
5714 | { |
5715 | const struct btf_type *ctx_type; |
5716 | const char *tname, *ctx_tname; |
5717 | |
5718 | t = btf_type_by_id(btf, t->type); |
5719 | |
5720 | /* KPROBE programs allow bpf_user_pt_regs_t typedef, which we need to |
5721 | * check before we skip all the typedef below. |
5722 | */ |
5723 | if (prog_type == BPF_PROG_TYPE_KPROBE) { |
5724 | while (btf_type_is_modifier(t) && !btf_type_is_typedef(t)) |
5725 | t = btf_type_by_id(btf, t->type); |
5726 | |
5727 | if (btf_type_is_typedef(t)) { |
5728 | tname = btf_name_by_offset(btf, offset: t->name_off); |
5729 | if (tname && strcmp(tname, "bpf_user_pt_regs_t" ) == 0) |
5730 | return true; |
5731 | } |
5732 | } |
5733 | |
5734 | while (btf_type_is_modifier(t)) |
5735 | t = btf_type_by_id(btf, t->type); |
5736 | if (!btf_type_is_struct(t)) { |
5737 | /* Only pointer to struct is supported for now. |
5738 | * That means that BPF_PROG_TYPE_TRACEPOINT with BTF |
5739 | * is not supported yet. |
5740 | * BPF_PROG_TYPE_RAW_TRACEPOINT is fine. |
5741 | */ |
5742 | return false; |
5743 | } |
5744 | tname = btf_name_by_offset(btf, offset: t->name_off); |
5745 | if (!tname) { |
5746 | bpf_log(log, fmt: "arg#%d struct doesn't have a name\n" , arg); |
5747 | return false; |
5748 | } |
5749 | |
5750 | ctx_type = find_canonical_prog_ctx_type(prog_type); |
5751 | if (!ctx_type) { |
5752 | bpf_log(log, fmt: "btf_vmlinux is malformed\n" ); |
5753 | /* should not happen */ |
5754 | return false; |
5755 | } |
5756 | again: |
5757 | ctx_tname = btf_name_by_offset(btf: btf_vmlinux, offset: ctx_type->name_off); |
5758 | if (!ctx_tname) { |
5759 | /* should not happen */ |
5760 | bpf_log(log, fmt: "Please fix kernel include/linux/bpf_types.h\n" ); |
5761 | return false; |
5762 | } |
5763 | /* program types without named context types work only with arg:ctx tag */ |
5764 | if (ctx_tname[0] == '\0') |
5765 | return false; |
5766 | /* only compare that prog's ctx type name is the same as |
5767 | * kernel expects. No need to compare field by field. |
5768 | * It's ok for bpf prog to do: |
5769 | * struct __sk_buff {}; |
5770 | * int socket_filter_bpf_prog(struct __sk_buff *skb) |
5771 | * { // no fields of skb are ever used } |
5772 | */ |
5773 | if (strcmp(ctx_tname, "__sk_buff" ) == 0 && strcmp(tname, "sk_buff" ) == 0) |
5774 | return true; |
5775 | if (strcmp(ctx_tname, "xdp_md" ) == 0 && strcmp(tname, "xdp_buff" ) == 0) |
5776 | return true; |
5777 | if (strcmp(ctx_tname, tname)) { |
5778 | /* bpf_user_pt_regs_t is a typedef, so resolve it to |
5779 | * underlying struct and check name again |
5780 | */ |
5781 | if (!btf_type_is_modifier(t: ctx_type)) |
5782 | return false; |
5783 | while (btf_type_is_modifier(t: ctx_type)) |
5784 | ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type); |
5785 | goto again; |
5786 | } |
5787 | return true; |
5788 | } |
5789 | |
5790 | /* forward declarations for arch-specific underlying types of |
5791 | * bpf_user_pt_regs_t; this avoids the need for arch-specific #ifdef |
5792 | * compilation guards below for BPF_PROG_TYPE_PERF_EVENT checks, but still |
5793 | * works correctly with __builtin_types_compatible_p() on respective |
5794 | * architectures |
5795 | */ |
5796 | struct user_regs_struct; |
5797 | struct user_pt_regs; |
5798 | |
5799 | static int btf_validate_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf, |
5800 | const struct btf_type *t, int arg, |
5801 | enum bpf_prog_type prog_type, |
5802 | enum bpf_attach_type attach_type) |
5803 | { |
5804 | const struct btf_type *ctx_type; |
5805 | const char *tname, *ctx_tname; |
5806 | |
5807 | if (!btf_is_ptr(t)) { |
5808 | bpf_log(log, fmt: "arg#%d type isn't a pointer\n" , arg); |
5809 | return -EINVAL; |
5810 | } |
5811 | t = btf_type_by_id(btf, t->type); |
5812 | |
5813 | /* KPROBE and PERF_EVENT programs allow bpf_user_pt_regs_t typedef */ |
5814 | if (prog_type == BPF_PROG_TYPE_KPROBE || prog_type == BPF_PROG_TYPE_PERF_EVENT) { |
5815 | while (btf_type_is_modifier(t) && !btf_type_is_typedef(t)) |
5816 | t = btf_type_by_id(btf, t->type); |
5817 | |
5818 | if (btf_type_is_typedef(t)) { |
5819 | tname = btf_name_by_offset(btf, offset: t->name_off); |
5820 | if (tname && strcmp(tname, "bpf_user_pt_regs_t" ) == 0) |
5821 | return 0; |
5822 | } |
5823 | } |
5824 | |
5825 | /* all other program types don't use typedefs for context type */ |
5826 | while (btf_type_is_modifier(t)) |
5827 | t = btf_type_by_id(btf, t->type); |
5828 | |
5829 | /* `void *ctx __arg_ctx` is always valid */ |
5830 | if (btf_type_is_void(t)) |
5831 | return 0; |
5832 | |
5833 | tname = btf_name_by_offset(btf, offset: t->name_off); |
5834 | if (str_is_empty(s: tname)) { |
5835 | bpf_log(log, fmt: "arg#%d type doesn't have a name\n" , arg); |
5836 | return -EINVAL; |
5837 | } |
5838 | |
5839 | /* special cases */ |
5840 | switch (prog_type) { |
5841 | case BPF_PROG_TYPE_KPROBE: |
5842 | if (__btf_type_is_struct(t) && strcmp(tname, "pt_regs" ) == 0) |
5843 | return 0; |
5844 | break; |
5845 | case BPF_PROG_TYPE_PERF_EVENT: |
5846 | if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) && |
5847 | __btf_type_is_struct(t) && strcmp(tname, "pt_regs" ) == 0) |
5848 | return 0; |
5849 | if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) && |
5850 | __btf_type_is_struct(t) && strcmp(tname, "user_pt_regs" ) == 0) |
5851 | return 0; |
5852 | if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) && |
5853 | __btf_type_is_struct(t) && strcmp(tname, "user_regs_struct" ) == 0) |
5854 | return 0; |
5855 | break; |
5856 | case BPF_PROG_TYPE_RAW_TRACEPOINT: |
5857 | case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: |
5858 | /* allow u64* as ctx */ |
5859 | if (btf_is_int(t) && t->size == 8) |
5860 | return 0; |
5861 | break; |
5862 | case BPF_PROG_TYPE_TRACING: |
5863 | switch (attach_type) { |
5864 | case BPF_TRACE_RAW_TP: |
5865 | /* tp_btf program is TRACING, so need special case here */ |
5866 | if (__btf_type_is_struct(t) && |
5867 | strcmp(tname, "bpf_raw_tracepoint_args" ) == 0) |
5868 | return 0; |
5869 | /* allow u64* as ctx */ |
5870 | if (btf_is_int(t) && t->size == 8) |
5871 | return 0; |
5872 | break; |
5873 | case BPF_TRACE_ITER: |
5874 | /* allow struct bpf_iter__xxx types only */ |
5875 | if (__btf_type_is_struct(t) && |
5876 | strncmp(tname, "bpf_iter__" , sizeof("bpf_iter__" ) - 1) == 0) |
5877 | return 0; |
5878 | break; |
5879 | case BPF_TRACE_FENTRY: |
5880 | case BPF_TRACE_FEXIT: |
5881 | case BPF_MODIFY_RETURN: |
5882 | /* allow u64* as ctx */ |
5883 | if (btf_is_int(t) && t->size == 8) |
5884 | return 0; |
5885 | break; |
5886 | default: |
5887 | break; |
5888 | } |
5889 | break; |
5890 | case BPF_PROG_TYPE_LSM: |
5891 | case BPF_PROG_TYPE_STRUCT_OPS: |
5892 | /* allow u64* as ctx */ |
5893 | if (btf_is_int(t) && t->size == 8) |
5894 | return 0; |
5895 | break; |
5896 | case BPF_PROG_TYPE_TRACEPOINT: |
5897 | case BPF_PROG_TYPE_SYSCALL: |
5898 | case BPF_PROG_TYPE_EXT: |
5899 | return 0; /* anything goes */ |
5900 | default: |
5901 | break; |
5902 | } |
5903 | |
5904 | ctx_type = find_canonical_prog_ctx_type(prog_type); |
5905 | if (!ctx_type) { |
5906 | /* should not happen */ |
5907 | bpf_log(log, fmt: "btf_vmlinux is malformed\n" ); |
5908 | return -EINVAL; |
5909 | } |
5910 | |
5911 | /* resolve typedefs and check that underlying structs are matching as well */ |
5912 | while (btf_type_is_modifier(t: ctx_type)) |
5913 | ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type); |
5914 | |
5915 | /* if program type doesn't have distinctly named struct type for |
5916 | * context, then __arg_ctx argument can only be `void *`, which we |
5917 | * already checked above |
5918 | */ |
5919 | if (!__btf_type_is_struct(t: ctx_type)) { |
5920 | bpf_log(log, fmt: "arg#%d should be void pointer\n" , arg); |
5921 | return -EINVAL; |
5922 | } |
5923 | |
5924 | ctx_tname = btf_name_by_offset(btf: btf_vmlinux, offset: ctx_type->name_off); |
5925 | if (!__btf_type_is_struct(t) || strcmp(ctx_tname, tname) != 0) { |
5926 | bpf_log(log, fmt: "arg#%d should be `struct %s *`\n" , arg, ctx_tname); |
5927 | return -EINVAL; |
5928 | } |
5929 | |
5930 | return 0; |
5931 | } |
5932 | |
5933 | static int btf_translate_to_vmlinux(struct bpf_verifier_log *log, |
5934 | struct btf *btf, |
5935 | const struct btf_type *t, |
5936 | enum bpf_prog_type prog_type, |
5937 | int arg) |
5938 | { |
5939 | if (!btf_is_prog_ctx_type(log, btf, t, prog_type, arg)) |
5940 | return -ENOENT; |
5941 | return find_kern_ctx_type_id(prog_type); |
5942 | } |
5943 | |
5944 | int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type) |
5945 | { |
5946 | const struct btf_member *kctx_member; |
5947 | const struct btf_type *conv_struct; |
5948 | const struct btf_type *kctx_type; |
5949 | u32 kctx_type_id; |
5950 | |
5951 | conv_struct = bpf_ctx_convert.t; |
5952 | /* get member for kernel ctx type */ |
5953 | kctx_member = btf_type_member(t: conv_struct) + bpf_ctx_convert_map[prog_type] * 2 + 1; |
5954 | kctx_type_id = kctx_member->type; |
5955 | kctx_type = btf_type_by_id(btf_vmlinux, kctx_type_id); |
5956 | if (!btf_type_is_struct(t: kctx_type)) { |
5957 | bpf_log(log, fmt: "kern ctx type id %u is not a struct\n" , kctx_type_id); |
5958 | return -EINVAL; |
5959 | } |
5960 | |
5961 | return kctx_type_id; |
5962 | } |
5963 | |
5964 | BTF_ID_LIST(bpf_ctx_convert_btf_id) |
5965 | BTF_ID(struct, bpf_ctx_convert) |
5966 | |
5967 | struct btf *btf_parse_vmlinux(void) |
5968 | { |
5969 | struct btf_verifier_env *env = NULL; |
5970 | struct bpf_verifier_log *log; |
5971 | struct btf *btf = NULL; |
5972 | int err; |
5973 | |
5974 | env = kzalloc(size: sizeof(*env), GFP_KERNEL | __GFP_NOWARN); |
5975 | if (!env) |
5976 | return ERR_PTR(error: -ENOMEM); |
5977 | |
5978 | log = &env->log; |
5979 | log->level = BPF_LOG_KERNEL; |
5980 | |
5981 | btf = kzalloc(size: sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); |
5982 | if (!btf) { |
5983 | err = -ENOMEM; |
5984 | goto errout; |
5985 | } |
5986 | env->btf = btf; |
5987 | |
5988 | btf->data = __start_BTF; |
5989 | btf->data_size = __stop_BTF - __start_BTF; |
5990 | btf->kernel_btf = true; |
5991 | snprintf(buf: btf->name, size: sizeof(btf->name), fmt: "vmlinux" ); |
5992 | |
5993 | err = btf_parse_hdr(env); |
5994 | if (err) |
5995 | goto errout; |
5996 | |
5997 | btf->nohdr_data = btf->data + btf->hdr.hdr_len; |
5998 | |
5999 | err = btf_parse_str_sec(env); |
6000 | if (err) |
6001 | goto errout; |
6002 | |
6003 | err = btf_check_all_metas(env); |
6004 | if (err) |
6005 | goto errout; |
6006 | |
6007 | err = btf_check_type_tags(env, btf, start_id: 1); |
6008 | if (err) |
6009 | goto errout; |
6010 | |
6011 | /* btf_parse_vmlinux() runs under bpf_verifier_lock */ |
6012 | bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); |
6013 | |
6014 | refcount_set(r: &btf->refcnt, n: 1); |
6015 | |
6016 | err = btf_alloc_id(btf); |
6017 | if (err) |
6018 | goto errout; |
6019 | |
6020 | btf_verifier_env_free(env); |
6021 | return btf; |
6022 | |
6023 | errout: |
6024 | btf_verifier_env_free(env); |
6025 | if (btf) { |
6026 | kvfree(addr: btf->types); |
6027 | kfree(objp: btf); |
6028 | } |
6029 | return ERR_PTR(error: err); |
6030 | } |
6031 | |
6032 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
6033 | |
6034 | static struct btf *btf_parse_module(const char *module_name, const void *data, unsigned int data_size) |
6035 | { |
6036 | struct btf_verifier_env *env = NULL; |
6037 | struct bpf_verifier_log *log; |
6038 | struct btf *btf = NULL, *base_btf; |
6039 | int err; |
6040 | |
6041 | base_btf = bpf_get_btf_vmlinux(); |
6042 | if (IS_ERR(base_btf)) |
6043 | return base_btf; |
6044 | if (!base_btf) |
6045 | return ERR_PTR(-EINVAL); |
6046 | |
6047 | env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); |
6048 | if (!env) |
6049 | return ERR_PTR(-ENOMEM); |
6050 | |
6051 | log = &env->log; |
6052 | log->level = BPF_LOG_KERNEL; |
6053 | |
6054 | btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); |
6055 | if (!btf) { |
6056 | err = -ENOMEM; |
6057 | goto errout; |
6058 | } |
6059 | env->btf = btf; |
6060 | |
6061 | btf->base_btf = base_btf; |
6062 | btf->start_id = base_btf->nr_types; |
6063 | btf->start_str_off = base_btf->hdr.str_len; |
6064 | btf->kernel_btf = true; |
6065 | snprintf(btf->name, sizeof(btf->name), "%s" , module_name); |
6066 | |
6067 | btf->data = kvmalloc(data_size, GFP_KERNEL | __GFP_NOWARN); |
6068 | if (!btf->data) { |
6069 | err = -ENOMEM; |
6070 | goto errout; |
6071 | } |
6072 | memcpy(btf->data, data, data_size); |
6073 | btf->data_size = data_size; |
6074 | |
6075 | err = btf_parse_hdr(env); |
6076 | if (err) |
6077 | goto errout; |
6078 | |
6079 | btf->nohdr_data = btf->data + btf->hdr.hdr_len; |
6080 | |
6081 | err = btf_parse_str_sec(env); |
6082 | if (err) |
6083 | goto errout; |
6084 | |
6085 | err = btf_check_all_metas(env); |
6086 | if (err) |
6087 | goto errout; |
6088 | |
6089 | err = btf_check_type_tags(env, btf, btf_nr_types(base_btf)); |
6090 | if (err) |
6091 | goto errout; |
6092 | |
6093 | btf_verifier_env_free(env); |
6094 | refcount_set(&btf->refcnt, 1); |
6095 | return btf; |
6096 | |
6097 | errout: |
6098 | btf_verifier_env_free(env); |
6099 | if (btf) { |
6100 | kvfree(btf->data); |
6101 | kvfree(btf->types); |
6102 | kfree(btf); |
6103 | } |
6104 | return ERR_PTR(err); |
6105 | } |
6106 | |
6107 | #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */ |
6108 | |
6109 | struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog) |
6110 | { |
6111 | struct bpf_prog *tgt_prog = prog->aux->dst_prog; |
6112 | |
6113 | if (tgt_prog) |
6114 | return tgt_prog->aux->btf; |
6115 | else |
6116 | return prog->aux->attach_btf; |
6117 | } |
6118 | |
6119 | static bool is_int_ptr(struct btf *btf, const struct btf_type *t) |
6120 | { |
6121 | /* skip modifiers */ |
6122 | t = btf_type_skip_modifiers(btf, id: t->type, NULL); |
6123 | |
6124 | return btf_type_is_int(t); |
6125 | } |
6126 | |
6127 | static u32 get_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto, |
6128 | int off) |
6129 | { |
6130 | const struct btf_param *args; |
6131 | const struct btf_type *t; |
6132 | u32 offset = 0, nr_args; |
6133 | int i; |
6134 | |
6135 | if (!func_proto) |
6136 | return off / 8; |
6137 | |
6138 | nr_args = btf_type_vlen(t: func_proto); |
6139 | args = (const struct btf_param *)(func_proto + 1); |
6140 | for (i = 0; i < nr_args; i++) { |
6141 | t = btf_type_skip_modifiers(btf, id: args[i].type, NULL); |
6142 | offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8); |
6143 | if (off < offset) |
6144 | return i; |
6145 | } |
6146 | |
6147 | t = btf_type_skip_modifiers(btf, id: func_proto->type, NULL); |
6148 | offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8); |
6149 | if (off < offset) |
6150 | return nr_args; |
6151 | |
6152 | return nr_args + 1; |
6153 | } |
6154 | |
6155 | static bool prog_args_trusted(const struct bpf_prog *prog) |
6156 | { |
6157 | enum bpf_attach_type atype = prog->expected_attach_type; |
6158 | |
6159 | switch (prog->type) { |
6160 | case BPF_PROG_TYPE_TRACING: |
6161 | return atype == BPF_TRACE_RAW_TP || atype == BPF_TRACE_ITER; |
6162 | case BPF_PROG_TYPE_LSM: |
6163 | return bpf_lsm_is_trusted(prog); |
6164 | case BPF_PROG_TYPE_STRUCT_OPS: |
6165 | return true; |
6166 | default: |
6167 | return false; |
6168 | } |
6169 | } |
6170 | |
6171 | int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto, |
6172 | u32 arg_no) |
6173 | { |
6174 | const struct btf_param *args; |
6175 | const struct btf_type *t; |
6176 | int off = 0, i; |
6177 | u32 sz; |
6178 | |
6179 | args = btf_params(t: func_proto); |
6180 | for (i = 0; i < arg_no; i++) { |
6181 | t = btf_type_by_id(btf, args[i].type); |
6182 | t = btf_resolve_size(btf, type: t, type_size: &sz); |
6183 | if (IS_ERR(ptr: t)) |
6184 | return PTR_ERR(ptr: t); |
6185 | off += roundup(sz, 8); |
6186 | } |
6187 | |
6188 | return off; |
6189 | } |
6190 | |
6191 | bool btf_ctx_access(int off, int size, enum bpf_access_type type, |
6192 | const struct bpf_prog *prog, |
6193 | struct bpf_insn_access_aux *info) |
6194 | { |
6195 | const struct btf_type *t = prog->aux->attach_func_proto; |
6196 | struct bpf_prog *tgt_prog = prog->aux->dst_prog; |
6197 | struct btf *btf = bpf_prog_get_target_btf(prog); |
6198 | const char *tname = prog->aux->attach_func_name; |
6199 | struct bpf_verifier_log *log = info->log; |
6200 | const struct btf_param *args; |
6201 | const char *tag_value; |
6202 | u32 nr_args, arg; |
6203 | int i, ret; |
6204 | |
6205 | if (off % 8) { |
6206 | bpf_log(log, fmt: "func '%s' offset %d is not multiple of 8\n" , |
6207 | tname, off); |
6208 | return false; |
6209 | } |
6210 | arg = get_ctx_arg_idx(btf, func_proto: t, off); |
6211 | args = (const struct btf_param *)(t + 1); |
6212 | /* if (t == NULL) Fall back to default BPF prog with |
6213 | * MAX_BPF_FUNC_REG_ARGS u64 arguments. |
6214 | */ |
6215 | nr_args = t ? btf_type_vlen(t) : MAX_BPF_FUNC_REG_ARGS; |
6216 | if (prog->aux->attach_btf_trace) { |
6217 | /* skip first 'void *__data' argument in btf_trace_##name typedef */ |
6218 | args++; |
6219 | nr_args--; |
6220 | } |
6221 | |
6222 | if (arg > nr_args) { |
6223 | bpf_log(log, fmt: "func '%s' doesn't have %d-th argument\n" , |
6224 | tname, arg + 1); |
6225 | return false; |
6226 | } |
6227 | |
6228 | if (arg == nr_args) { |
6229 | switch (prog->expected_attach_type) { |
6230 | case BPF_LSM_CGROUP: |
6231 | case BPF_LSM_MAC: |
6232 | case BPF_TRACE_FEXIT: |
6233 | /* When LSM programs are attached to void LSM hooks |
6234 | * they use FEXIT trampolines and when attached to |
6235 | * int LSM hooks, they use MODIFY_RETURN trampolines. |
6236 | * |
6237 | * While the LSM programs are BPF_MODIFY_RETURN-like |
6238 | * the check: |
6239 | * |
6240 | * if (ret_type != 'int') |
6241 | * return -EINVAL; |
6242 | * |
6243 | * is _not_ done here. This is still safe as LSM hooks |
6244 | * have only void and int return types. |
6245 | */ |
6246 | if (!t) |
6247 | return true; |
6248 | t = btf_type_by_id(btf, t->type); |
6249 | break; |
6250 | case BPF_MODIFY_RETURN: |
6251 | /* For now the BPF_MODIFY_RETURN can only be attached to |
6252 | * functions that return an int. |
6253 | */ |
6254 | if (!t) |
6255 | return false; |
6256 | |
6257 | t = btf_type_skip_modifiers(btf, id: t->type, NULL); |
6258 | if (!btf_type_is_small_int(t)) { |
6259 | bpf_log(log, |
6260 | fmt: "ret type %s not allowed for fmod_ret\n" , |
6261 | btf_type_str(t)); |
6262 | return false; |
6263 | } |
6264 | break; |
6265 | default: |
6266 | bpf_log(log, fmt: "func '%s' doesn't have %d-th argument\n" , |
6267 | tname, arg + 1); |
6268 | return false; |
6269 | } |
6270 | } else { |
6271 | if (!t) |
6272 | /* Default prog with MAX_BPF_FUNC_REG_ARGS args */ |
6273 | return true; |
6274 | t = btf_type_by_id(btf, args[arg].type); |
6275 | } |
6276 | |
6277 | /* skip modifiers */ |
6278 | while (btf_type_is_modifier(t)) |
6279 | t = btf_type_by_id(btf, t->type); |
6280 | if (btf_type_is_small_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t)) |
6281 | /* accessing a scalar */ |
6282 | return true; |
6283 | if (!btf_type_is_ptr(t)) { |
6284 | bpf_log(log, |
6285 | fmt: "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n" , |
6286 | tname, arg, |
6287 | __btf_name_by_offset(btf, offset: t->name_off), |
6288 | btf_type_str(t)); |
6289 | return false; |
6290 | } |
6291 | |
6292 | /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */ |
6293 | for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { |
6294 | const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; |
6295 | u32 type, flag; |
6296 | |
6297 | type = base_type(type: ctx_arg_info->reg_type); |
6298 | flag = type_flag(type: ctx_arg_info->reg_type); |
6299 | if (ctx_arg_info->offset == off && type == PTR_TO_BUF && |
6300 | (flag & PTR_MAYBE_NULL)) { |
6301 | info->reg_type = ctx_arg_info->reg_type; |
6302 | return true; |
6303 | } |
6304 | } |
6305 | |
6306 | if (t->type == 0) |
6307 | /* This is a pointer to void. |
6308 | * It is the same as scalar from the verifier safety pov. |
6309 | * No further pointer walking is allowed. |
6310 | */ |
6311 | return true; |
6312 | |
6313 | if (is_int_ptr(btf, t)) |
6314 | return true; |
6315 | |
6316 | /* this is a pointer to another type */ |
6317 | for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { |
6318 | const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; |
6319 | |
6320 | if (ctx_arg_info->offset == off) { |
6321 | if (!ctx_arg_info->btf_id) { |
6322 | bpf_log(log,fmt: "invalid btf_id for context argument offset %u\n" , off); |
6323 | return false; |
6324 | } |
6325 | |
6326 | info->reg_type = ctx_arg_info->reg_type; |
6327 | info->btf = ctx_arg_info->btf ? : btf_vmlinux; |
6328 | info->btf_id = ctx_arg_info->btf_id; |
6329 | return true; |
6330 | } |
6331 | } |
6332 | |
6333 | info->reg_type = PTR_TO_BTF_ID; |
6334 | if (prog_args_trusted(prog)) |
6335 | info->reg_type |= PTR_TRUSTED; |
6336 | |
6337 | if (tgt_prog) { |
6338 | enum bpf_prog_type tgt_type; |
6339 | |
6340 | if (tgt_prog->type == BPF_PROG_TYPE_EXT) |
6341 | tgt_type = tgt_prog->aux->saved_dst_prog_type; |
6342 | else |
6343 | tgt_type = tgt_prog->type; |
6344 | |
6345 | ret = btf_translate_to_vmlinux(log, btf, t, prog_type: tgt_type, arg); |
6346 | if (ret > 0) { |
6347 | info->btf = btf_vmlinux; |
6348 | info->btf_id = ret; |
6349 | return true; |
6350 | } else { |
6351 | return false; |
6352 | } |
6353 | } |
6354 | |
6355 | info->btf = btf; |
6356 | info->btf_id = t->type; |
6357 | t = btf_type_by_id(btf, t->type); |
6358 | |
6359 | if (btf_type_is_type_tag(t)) { |
6360 | tag_value = __btf_name_by_offset(btf, offset: t->name_off); |
6361 | if (strcmp(tag_value, "user" ) == 0) |
6362 | info->reg_type |= MEM_USER; |
6363 | if (strcmp(tag_value, "percpu" ) == 0) |
6364 | info->reg_type |= MEM_PERCPU; |
6365 | } |
6366 | |
6367 | /* skip modifiers */ |
6368 | while (btf_type_is_modifier(t)) { |
6369 | info->btf_id = t->type; |
6370 | t = btf_type_by_id(btf, t->type); |
6371 | } |
6372 | if (!btf_type_is_struct(t)) { |
6373 | bpf_log(log, |
6374 | fmt: "func '%s' arg%d type %s is not a struct\n" , |
6375 | tname, arg, btf_type_str(t)); |
6376 | return false; |
6377 | } |
6378 | bpf_log(log, fmt: "func '%s' arg%d has btf_id %d type %s '%s'\n" , |
6379 | tname, arg, info->btf_id, btf_type_str(t), |
6380 | __btf_name_by_offset(btf, offset: t->name_off)); |
6381 | return true; |
6382 | } |
6383 | EXPORT_SYMBOL_GPL(btf_ctx_access); |
6384 | |
6385 | enum bpf_struct_walk_result { |
6386 | /* < 0 error */ |
6387 | WALK_SCALAR = 0, |
6388 | WALK_PTR, |
6389 | WALK_STRUCT, |
6390 | }; |
6391 | |
6392 | static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf, |
6393 | const struct btf_type *t, int off, int size, |
6394 | u32 *next_btf_id, enum bpf_type_flag *flag, |
6395 | const char **field_name) |
6396 | { |
6397 | u32 i, moff, mtrue_end, msize = 0, total_nelems = 0; |
6398 | const struct btf_type *mtype, *elem_type = NULL; |
6399 | const struct btf_member *member; |
6400 | const char *tname, *mname, *tag_value; |
6401 | u32 vlen, elem_id, mid; |
6402 | |
6403 | again: |
6404 | if (btf_type_is_modifier(t)) |
6405 | t = btf_type_skip_modifiers(btf, id: t->type, NULL); |
6406 | tname = __btf_name_by_offset(btf, offset: t->name_off); |
6407 | if (!btf_type_is_struct(t)) { |
6408 | bpf_log(log, fmt: "Type '%s' is not a struct\n" , tname); |
6409 | return -EINVAL; |
6410 | } |
6411 | |
6412 | vlen = btf_type_vlen(t); |
6413 | if (BTF_INFO_KIND(t->info) == BTF_KIND_UNION && vlen != 1 && !(*flag & PTR_UNTRUSTED)) |
6414 | /* |
6415 | * walking unions yields untrusted pointers |
6416 | * with exception of __bpf_md_ptr and other |
6417 | * unions with a single member |
6418 | */ |
6419 | *flag |= PTR_UNTRUSTED; |
6420 | |
6421 | if (off + size > t->size) { |
6422 | /* If the last element is a variable size array, we may |
6423 | * need to relax the rule. |
6424 | */ |
6425 | struct btf_array *array_elem; |
6426 | |
6427 | if (vlen == 0) |
6428 | goto error; |
6429 | |
6430 | member = btf_type_member(t) + vlen - 1; |
6431 | mtype = btf_type_skip_modifiers(btf, id: member->type, |
6432 | NULL); |
6433 | if (!btf_type_is_array(t: mtype)) |
6434 | goto error; |
6435 | |
6436 | array_elem = (struct btf_array *)(mtype + 1); |
6437 | if (array_elem->nelems != 0) |
6438 | goto error; |
6439 | |
6440 | moff = __btf_member_bit_offset(struct_type: t, member) / 8; |
6441 | if (off < moff) |
6442 | goto error; |
6443 | |
6444 | /* allow structure and integer */ |
6445 | t = btf_type_skip_modifiers(btf, id: array_elem->type, |
6446 | NULL); |
6447 | |
6448 | if (btf_type_is_int(t)) |
6449 | return WALK_SCALAR; |
6450 | |
6451 | if (!btf_type_is_struct(t)) |
6452 | goto error; |
6453 | |
6454 | off = (off - moff) % t->size; |
6455 | goto again; |
6456 | |
6457 | error: |
6458 | bpf_log(log, fmt: "access beyond struct %s at off %u size %u\n" , |
6459 | tname, off, size); |
6460 | return -EACCES; |
6461 | } |
6462 | |
6463 | for_each_member(i, t, member) { |
6464 | /* offset of the field in bytes */ |
6465 | moff = __btf_member_bit_offset(struct_type: t, member) / 8; |
6466 | if (off + size <= moff) |
6467 | /* won't find anything, field is already too far */ |
6468 | break; |
6469 | |
6470 | if (__btf_member_bitfield_size(struct_type: t, member)) { |
6471 | u32 end_bit = __btf_member_bit_offset(struct_type: t, member) + |
6472 | __btf_member_bitfield_size(struct_type: t, member); |
6473 | |
6474 | /* off <= moff instead of off == moff because clang |
6475 | * does not generate a BTF member for anonymous |
6476 | * bitfield like the ":16" here: |
6477 | * struct { |
6478 | * int :16; |
6479 | * int x:8; |
6480 | * }; |
6481 | */ |
6482 | if (off <= moff && |
6483 | BITS_ROUNDUP_BYTES(end_bit) <= off + size) |
6484 | return WALK_SCALAR; |
6485 | |
6486 | /* off may be accessing a following member |
6487 | * |
6488 | * or |
6489 | * |
6490 | * Doing partial access at either end of this |
6491 | * bitfield. Continue on this case also to |
6492 | * treat it as not accessing this bitfield |
6493 | * and eventually error out as field not |
6494 | * found to keep it simple. |
6495 | * It could be relaxed if there was a legit |
6496 | * partial access case later. |
6497 | */ |
6498 | continue; |
6499 | } |
6500 | |
6501 | /* In case of "off" is pointing to holes of a struct */ |
6502 | if (off < moff) |
6503 | break; |
6504 | |
6505 | /* type of the field */ |
6506 | mid = member->type; |
6507 | mtype = btf_type_by_id(btf, member->type); |
6508 | mname = __btf_name_by_offset(btf, offset: member->name_off); |
6509 | |
6510 | mtype = __btf_resolve_size(btf, type: mtype, type_size: &msize, |
6511 | elem_type: &elem_type, elem_id: &elem_id, total_nelems: &total_nelems, |
6512 | type_id: &mid); |
6513 | if (IS_ERR(ptr: mtype)) { |
6514 | bpf_log(log, fmt: "field %s doesn't have size\n" , mname); |
6515 | return -EFAULT; |
6516 | } |
6517 | |
6518 | mtrue_end = moff + msize; |
6519 | if (off >= mtrue_end) |
6520 | /* no overlap with member, keep iterating */ |
6521 | continue; |
6522 | |
6523 | if (btf_type_is_array(t: mtype)) { |
6524 | u32 elem_idx; |
6525 | |
6526 | /* __btf_resolve_size() above helps to |
6527 | * linearize a multi-dimensional array. |
6528 | * |
6529 | * The logic here is treating an array |
6530 | * in a struct as the following way: |
6531 | * |
6532 | * struct outer { |
6533 | * struct inner array[2][2]; |
6534 | * }; |
6535 | * |
6536 | * looks like: |
6537 | * |
6538 | * struct outer { |
6539 | * struct inner array_elem0; |
6540 | * struct inner array_elem1; |
6541 | * struct inner array_elem2; |
6542 | * struct inner array_elem3; |
6543 | * }; |
6544 | * |
6545 | * When accessing outer->array[1][0], it moves |
6546 | * moff to "array_elem2", set mtype to |
6547 | * "struct inner", and msize also becomes |
6548 | * sizeof(struct inner). Then most of the |
6549 | * remaining logic will fall through without |
6550 | * caring the current member is an array or |
6551 | * not. |
6552 | * |
6553 | * Unlike mtype/msize/moff, mtrue_end does not |
6554 | * change. The naming difference ("_true") tells |
6555 | * that it is not always corresponding to |
6556 | * the current mtype/msize/moff. |
6557 | * It is the true end of the current |
6558 | * member (i.e. array in this case). That |
6559 | * will allow an int array to be accessed like |
6560 | * a scratch space, |
6561 | * i.e. allow access beyond the size of |
6562 | * the array's element as long as it is |
6563 | * within the mtrue_end boundary. |
6564 | */ |
6565 | |
6566 | /* skip empty array */ |
6567 | if (moff == mtrue_end) |
6568 | continue; |
6569 | |
6570 | msize /= total_nelems; |
6571 | elem_idx = (off - moff) / msize; |
6572 | moff += elem_idx * msize; |
6573 | mtype = elem_type; |
6574 | mid = elem_id; |
6575 | } |
6576 | |
6577 | /* the 'off' we're looking for is either equal to start |
6578 | * of this field or inside of this struct |
6579 | */ |
6580 | if (btf_type_is_struct(t: mtype)) { |
6581 | /* our field must be inside that union or struct */ |
6582 | t = mtype; |
6583 | |
6584 | /* return if the offset matches the member offset */ |
6585 | if (off == moff) { |
6586 | *next_btf_id = mid; |
6587 | return WALK_STRUCT; |
6588 | } |
6589 | |
6590 | /* adjust offset we're looking for */ |
6591 | off -= moff; |
6592 | goto again; |
6593 | } |
6594 | |
6595 | if (btf_type_is_ptr(t: mtype)) { |
6596 | const struct btf_type *stype, *t; |
6597 | enum bpf_type_flag tmp_flag = 0; |
6598 | u32 id; |
6599 | |
6600 | if (msize != size || off != moff) { |
6601 | bpf_log(log, |
6602 | fmt: "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n" , |
6603 | mname, moff, tname, off, size); |
6604 | return -EACCES; |
6605 | } |
6606 | |
6607 | /* check type tag */ |
6608 | t = btf_type_by_id(btf, mtype->type); |
6609 | if (btf_type_is_type_tag(t)) { |
6610 | tag_value = __btf_name_by_offset(btf, offset: t->name_off); |
6611 | /* check __user tag */ |
6612 | if (strcmp(tag_value, "user" ) == 0) |
6613 | tmp_flag = MEM_USER; |
6614 | /* check __percpu tag */ |
6615 | if (strcmp(tag_value, "percpu" ) == 0) |
6616 | tmp_flag = MEM_PERCPU; |
6617 | /* check __rcu tag */ |
6618 | if (strcmp(tag_value, "rcu" ) == 0) |
6619 | tmp_flag = MEM_RCU; |
6620 | } |
6621 | |
6622 | stype = btf_type_skip_modifiers(btf, id: mtype->type, res_id: &id); |
6623 | if (btf_type_is_struct(t: stype)) { |
6624 | *next_btf_id = id; |
6625 | *flag |= tmp_flag; |
6626 | if (field_name) |
6627 | *field_name = mname; |
6628 | return WALK_PTR; |
6629 | } |
6630 | } |
6631 | |
6632 | /* Allow more flexible access within an int as long as |
6633 | * it is within mtrue_end. |
6634 | * Since mtrue_end could be the end of an array, |
6635 | * that also allows using an array of int as a scratch |
6636 | * space. e.g. skb->cb[]. |
6637 | */ |
6638 | if (off + size > mtrue_end && !(*flag & PTR_UNTRUSTED)) { |
6639 | bpf_log(log, |
6640 | fmt: "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n" , |
6641 | mname, mtrue_end, tname, off, size); |
6642 | return -EACCES; |
6643 | } |
6644 | |
6645 | return WALK_SCALAR; |
6646 | } |
6647 | bpf_log(log, fmt: "struct %s doesn't have field at offset %d\n" , tname, off); |
6648 | return -EINVAL; |
6649 | } |
6650 | |
6651 | int btf_struct_access(struct bpf_verifier_log *log, |
6652 | const struct bpf_reg_state *reg, |
6653 | int off, int size, enum bpf_access_type atype __maybe_unused, |
6654 | u32 *next_btf_id, enum bpf_type_flag *flag, |
6655 | const char **field_name) |
6656 | { |
6657 | const struct btf *btf = reg->btf; |
6658 | enum bpf_type_flag tmp_flag = 0; |
6659 | const struct btf_type *t; |
6660 | u32 id = reg->btf_id; |
6661 | int err; |
6662 | |
6663 | while (type_is_alloc(type: reg->type)) { |
6664 | struct btf_struct_meta *meta; |
6665 | struct btf_record *rec; |
6666 | int i; |
6667 | |
6668 | meta = btf_find_struct_meta(btf, btf_id: id); |
6669 | if (!meta) |
6670 | break; |
6671 | rec = meta->record; |
6672 | for (i = 0; i < rec->cnt; i++) { |
6673 | struct btf_field *field = &rec->fields[i]; |
6674 | u32 offset = field->offset; |
6675 | if (off < offset + btf_field_type_size(type: field->type) && offset < off + size) { |
6676 | bpf_log(log, |
6677 | fmt: "direct access to %s is disallowed\n" , |
6678 | btf_field_type_name(type: field->type)); |
6679 | return -EACCES; |
6680 | } |
6681 | } |
6682 | break; |
6683 | } |
6684 | |
6685 | t = btf_type_by_id(btf, id); |
6686 | do { |
6687 | err = btf_struct_walk(log, btf, t, off, size, next_btf_id: &id, flag: &tmp_flag, field_name); |
6688 | |
6689 | switch (err) { |
6690 | case WALK_PTR: |
6691 | /* For local types, the destination register cannot |
6692 | * become a pointer again. |
6693 | */ |
6694 | if (type_is_alloc(type: reg->type)) |
6695 | return SCALAR_VALUE; |
6696 | /* If we found the pointer or scalar on t+off, |
6697 | * we're done. |
6698 | */ |
6699 | *next_btf_id = id; |
6700 | *flag = tmp_flag; |
6701 | return PTR_TO_BTF_ID; |
6702 | case WALK_SCALAR: |
6703 | return SCALAR_VALUE; |
6704 | case WALK_STRUCT: |
6705 | /* We found nested struct, so continue the search |
6706 | * by diving in it. At this point the offset is |
6707 | * aligned with the new type, so set it to 0. |
6708 | */ |
6709 | t = btf_type_by_id(btf, id); |
6710 | off = 0; |
6711 | break; |
6712 | default: |
6713 | /* It's either error or unknown return value.. |
6714 | * scream and leave. |
6715 | */ |
6716 | if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value" )) |
6717 | return -EINVAL; |
6718 | return err; |
6719 | } |
6720 | } while (t); |
6721 | |
6722 | return -EINVAL; |
6723 | } |
6724 | |
6725 | /* Check that two BTF types, each specified as an BTF object + id, are exactly |
6726 | * the same. Trivial ID check is not enough due to module BTFs, because we can |
6727 | * end up with two different module BTFs, but IDs point to the common type in |
6728 | * vmlinux BTF. |
6729 | */ |
6730 | bool btf_types_are_same(const struct btf *btf1, u32 id1, |
6731 | const struct btf *btf2, u32 id2) |
6732 | { |
6733 | if (id1 != id2) |
6734 | return false; |
6735 | if (btf1 == btf2) |
6736 | return true; |
6737 | return btf_type_by_id(btf1, id1) == btf_type_by_id(btf2, id2); |
6738 | } |
6739 | |
6740 | bool btf_struct_ids_match(struct bpf_verifier_log *log, |
6741 | const struct btf *btf, u32 id, int off, |
6742 | const struct btf *need_btf, u32 need_type_id, |
6743 | bool strict) |
6744 | { |
6745 | const struct btf_type *type; |
6746 | enum bpf_type_flag flag = 0; |
6747 | int err; |
6748 | |
6749 | /* Are we already done? */ |
6750 | if (off == 0 && btf_types_are_same(btf1: btf, id1: id, btf2: need_btf, id2: need_type_id)) |
6751 | return true; |
6752 | /* In case of strict type match, we do not walk struct, the top level |
6753 | * type match must succeed. When strict is true, off should have already |
6754 | * been 0. |
6755 | */ |
6756 | if (strict) |
6757 | return false; |
6758 | again: |
6759 | type = btf_type_by_id(btf, id); |
6760 | if (!type) |
6761 | return false; |
6762 | err = btf_struct_walk(log, btf, t: type, off, size: 1, next_btf_id: &id, flag: &flag, NULL); |
6763 | if (err != WALK_STRUCT) |
6764 | return false; |
6765 | |
6766 | /* We found nested struct object. If it matches |
6767 | * the requested ID, we're done. Otherwise let's |
6768 | * continue the search with offset 0 in the new |
6769 | * type. |
6770 | */ |
6771 | if (!btf_types_are_same(btf1: btf, id1: id, btf2: need_btf, id2: need_type_id)) { |
6772 | off = 0; |
6773 | goto again; |
6774 | } |
6775 | |
6776 | return true; |
6777 | } |
6778 | |
6779 | static int __get_type_size(struct btf *btf, u32 btf_id, |
6780 | const struct btf_type **ret_type) |
6781 | { |
6782 | const struct btf_type *t; |
6783 | |
6784 | *ret_type = btf_type_by_id(btf, 0); |
6785 | if (!btf_id) |
6786 | /* void */ |
6787 | return 0; |
6788 | t = btf_type_by_id(btf, btf_id); |
6789 | while (t && btf_type_is_modifier(t)) |
6790 | t = btf_type_by_id(btf, t->type); |
6791 | if (!t) |
6792 | return -EINVAL; |
6793 | *ret_type = t; |
6794 | if (btf_type_is_ptr(t)) |
6795 | /* kernel size of pointer. Not BPF's size of pointer*/ |
6796 | return sizeof(void *); |
6797 | if (btf_type_is_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t)) |
6798 | return t->size; |
6799 | return -EINVAL; |
6800 | } |
6801 | |
6802 | static u8 __get_type_fmodel_flags(const struct btf_type *t) |
6803 | { |
6804 | u8 flags = 0; |
6805 | |
6806 | if (__btf_type_is_struct(t)) |
6807 | flags |= BTF_FMODEL_STRUCT_ARG; |
6808 | if (btf_type_is_signed_int(t)) |
6809 | flags |= BTF_FMODEL_SIGNED_ARG; |
6810 | |
6811 | return flags; |
6812 | } |
6813 | |
6814 | int btf_distill_func_proto(struct bpf_verifier_log *log, |
6815 | struct btf *btf, |
6816 | const struct btf_type *func, |
6817 | const char *tname, |
6818 | struct btf_func_model *m) |
6819 | { |
6820 | const struct btf_param *args; |
6821 | const struct btf_type *t; |
6822 | u32 i, nargs; |
6823 | int ret; |
6824 | |
6825 | if (!func) { |
6826 | /* BTF function prototype doesn't match the verifier types. |
6827 | * Fall back to MAX_BPF_FUNC_REG_ARGS u64 args. |
6828 | */ |
6829 | for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) { |
6830 | m->arg_size[i] = 8; |
6831 | m->arg_flags[i] = 0; |
6832 | } |
6833 | m->ret_size = 8; |
6834 | m->ret_flags = 0; |
6835 | m->nr_args = MAX_BPF_FUNC_REG_ARGS; |
6836 | return 0; |
6837 | } |
6838 | args = (const struct btf_param *)(func + 1); |
6839 | nargs = btf_type_vlen(t: func); |
6840 | if (nargs > MAX_BPF_FUNC_ARGS) { |
6841 | bpf_log(log, |
6842 | fmt: "The function %s has %d arguments. Too many.\n" , |
6843 | tname, nargs); |
6844 | return -EINVAL; |
6845 | } |
6846 | ret = __get_type_size(btf, btf_id: func->type, ret_type: &t); |
6847 | if (ret < 0 || __btf_type_is_struct(t)) { |
6848 | bpf_log(log, |
6849 | fmt: "The function %s return type %s is unsupported.\n" , |
6850 | tname, btf_type_str(t)); |
6851 | return -EINVAL; |
6852 | } |
6853 | m->ret_size = ret; |
6854 | m->ret_flags = __get_type_fmodel_flags(t); |
6855 | |
6856 | for (i = 0; i < nargs; i++) { |
6857 | if (i == nargs - 1 && args[i].type == 0) { |
6858 | bpf_log(log, |
6859 | fmt: "The function %s with variable args is unsupported.\n" , |
6860 | tname); |
6861 | return -EINVAL; |
6862 | } |
6863 | ret = __get_type_size(btf, btf_id: args[i].type, ret_type: &t); |
6864 | |
6865 | /* No support of struct argument size greater than 16 bytes */ |
6866 | if (ret < 0 || ret > 16) { |
6867 | bpf_log(log, |
6868 | fmt: "The function %s arg%d type %s is unsupported.\n" , |
6869 | tname, i, btf_type_str(t)); |
6870 | return -EINVAL; |
6871 | } |
6872 | if (ret == 0) { |
6873 | bpf_log(log, |
6874 | fmt: "The function %s has malformed void argument.\n" , |
6875 | tname); |
6876 | return -EINVAL; |
6877 | } |
6878 | m->arg_size[i] = ret; |
6879 | m->arg_flags[i] = __get_type_fmodel_flags(t); |
6880 | } |
6881 | m->nr_args = nargs; |
6882 | return 0; |
6883 | } |
6884 | |
6885 | /* Compare BTFs of two functions assuming only scalars and pointers to context. |
6886 | * t1 points to BTF_KIND_FUNC in btf1 |
6887 | * t2 points to BTF_KIND_FUNC in btf2 |
6888 | * Returns: |
6889 | * EINVAL - function prototype mismatch |
6890 | * EFAULT - verifier bug |
6891 | * 0 - 99% match. The last 1% is validated by the verifier. |
6892 | */ |
6893 | static int btf_check_func_type_match(struct bpf_verifier_log *log, |
6894 | struct btf *btf1, const struct btf_type *t1, |
6895 | struct btf *btf2, const struct btf_type *t2) |
6896 | { |
6897 | const struct btf_param *args1, *args2; |
6898 | const char *fn1, *fn2, *s1, *s2; |
6899 | u32 nargs1, nargs2, i; |
6900 | |
6901 | fn1 = btf_name_by_offset(btf: btf1, offset: t1->name_off); |
6902 | fn2 = btf_name_by_offset(btf: btf2, offset: t2->name_off); |
6903 | |
6904 | if (btf_func_linkage(t: t1) != BTF_FUNC_GLOBAL) { |
6905 | bpf_log(log, fmt: "%s() is not a global function\n" , fn1); |
6906 | return -EINVAL; |
6907 | } |
6908 | if (btf_func_linkage(t: t2) != BTF_FUNC_GLOBAL) { |
6909 | bpf_log(log, fmt: "%s() is not a global function\n" , fn2); |
6910 | return -EINVAL; |
6911 | } |
6912 | |
6913 | t1 = btf_type_by_id(btf1, t1->type); |
6914 | if (!t1 || !btf_type_is_func_proto(t: t1)) |
6915 | return -EFAULT; |
6916 | t2 = btf_type_by_id(btf2, t2->type); |
6917 | if (!t2 || !btf_type_is_func_proto(t: t2)) |
6918 | return -EFAULT; |
6919 | |
6920 | args1 = (const struct btf_param *)(t1 + 1); |
6921 | nargs1 = btf_type_vlen(t: t1); |
6922 | args2 = (const struct btf_param *)(t2 + 1); |
6923 | nargs2 = btf_type_vlen(t: t2); |
6924 | |
6925 | if (nargs1 != nargs2) { |
6926 | bpf_log(log, fmt: "%s() has %d args while %s() has %d args\n" , |
6927 | fn1, nargs1, fn2, nargs2); |
6928 | return -EINVAL; |
6929 | } |
6930 | |
6931 | t1 = btf_type_skip_modifiers(btf: btf1, id: t1->type, NULL); |
6932 | t2 = btf_type_skip_modifiers(btf: btf2, id: t2->type, NULL); |
6933 | if (t1->info != t2->info) { |
6934 | bpf_log(log, |
6935 | fmt: "Return type %s of %s() doesn't match type %s of %s()\n" , |
6936 | btf_type_str(t: t1), fn1, |
6937 | btf_type_str(t: t2), fn2); |
6938 | return -EINVAL; |
6939 | } |
6940 | |
6941 | for (i = 0; i < nargs1; i++) { |
6942 | t1 = btf_type_skip_modifiers(btf: btf1, id: args1[i].type, NULL); |
6943 | t2 = btf_type_skip_modifiers(btf: btf2, id: args2[i].type, NULL); |
6944 | |
6945 | if (t1->info != t2->info) { |
6946 | bpf_log(log, fmt: "arg%d in %s() is %s while %s() has %s\n" , |
6947 | i, fn1, btf_type_str(t: t1), |
6948 | fn2, btf_type_str(t: t2)); |
6949 | return -EINVAL; |
6950 | } |
6951 | if (btf_type_has_size(t: t1) && t1->size != t2->size) { |
6952 | bpf_log(log, |
6953 | fmt: "arg%d in %s() has size %d while %s() has %d\n" , |
6954 | i, fn1, t1->size, |
6955 | fn2, t2->size); |
6956 | return -EINVAL; |
6957 | } |
6958 | |
6959 | /* global functions are validated with scalars and pointers |
6960 | * to context only. And only global functions can be replaced. |
6961 | * Hence type check only those types. |
6962 | */ |
6963 | if (btf_type_is_int(t: t1) || btf_is_any_enum(t: t1)) |
6964 | continue; |
6965 | if (!btf_type_is_ptr(t: t1)) { |
6966 | bpf_log(log, |
6967 | fmt: "arg%d in %s() has unrecognized type\n" , |
6968 | i, fn1); |
6969 | return -EINVAL; |
6970 | } |
6971 | t1 = btf_type_skip_modifiers(btf: btf1, id: t1->type, NULL); |
6972 | t2 = btf_type_skip_modifiers(btf: btf2, id: t2->type, NULL); |
6973 | if (!btf_type_is_struct(t: t1)) { |
6974 | bpf_log(log, |
6975 | fmt: "arg%d in %s() is not a pointer to context\n" , |
6976 | i, fn1); |
6977 | return -EINVAL; |
6978 | } |
6979 | if (!btf_type_is_struct(t: t2)) { |
6980 | bpf_log(log, |
6981 | fmt: "arg%d in %s() is not a pointer to context\n" , |
6982 | i, fn2); |
6983 | return -EINVAL; |
6984 | } |
6985 | /* This is an optional check to make program writing easier. |
6986 | * Compare names of structs and report an error to the user. |
6987 | * btf_prepare_func_args() already checked that t2 struct |
6988 | * is a context type. btf_prepare_func_args() will check |
6989 | * later that t1 struct is a context type as well. |
6990 | */ |
6991 | s1 = btf_name_by_offset(btf: btf1, offset: t1->name_off); |
6992 | s2 = btf_name_by_offset(btf: btf2, offset: t2->name_off); |
6993 | if (strcmp(s1, s2)) { |
6994 | bpf_log(log, |
6995 | fmt: "arg%d %s(struct %s *) doesn't match %s(struct %s *)\n" , |
6996 | i, fn1, s1, fn2, s2); |
6997 | return -EINVAL; |
6998 | } |
6999 | } |
7000 | return 0; |
7001 | } |
7002 | |
7003 | /* Compare BTFs of given program with BTF of target program */ |
7004 | int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, |
7005 | struct btf *btf2, const struct btf_type *t2) |
7006 | { |
7007 | struct btf *btf1 = prog->aux->btf; |
7008 | const struct btf_type *t1; |
7009 | u32 btf_id = 0; |
7010 | |
7011 | if (!prog->aux->func_info) { |
7012 | bpf_log(log, fmt: "Program extension requires BTF\n" ); |
7013 | return -EINVAL; |
7014 | } |
7015 | |
7016 | btf_id = prog->aux->func_info[0].type_id; |
7017 | if (!btf_id) |
7018 | return -EFAULT; |
7019 | |
7020 | t1 = btf_type_by_id(btf1, btf_id); |
7021 | if (!t1 || !btf_type_is_func(t: t1)) |
7022 | return -EFAULT; |
7023 | |
7024 | return btf_check_func_type_match(log, btf1, t1, btf2, t2); |
7025 | } |
7026 | |
7027 | static bool btf_is_dynptr_ptr(const struct btf *btf, const struct btf_type *t) |
7028 | { |
7029 | const char *name; |
7030 | |
7031 | t = btf_type_by_id(btf, t->type); /* skip PTR */ |
7032 | |
7033 | while (btf_type_is_modifier(t)) |
7034 | t = btf_type_by_id(btf, t->type); |
7035 | |
7036 | /* allow either struct or struct forward declaration */ |
7037 | if (btf_type_is_struct(t) || |
7038 | (btf_type_is_fwd(t) && btf_type_kflag(t) == 0)) { |
7039 | name = btf_str_by_offset(btf, offset: t->name_off); |
7040 | return name && strcmp(name, "bpf_dynptr" ) == 0; |
7041 | } |
7042 | |
7043 | return false; |
7044 | } |
7045 | |
7046 | struct bpf_cand_cache { |
7047 | const char *name; |
7048 | u32 name_len; |
7049 | u16 kind; |
7050 | u16 cnt; |
7051 | struct { |
7052 | const struct btf *btf; |
7053 | u32 id; |
7054 | } cands[]; |
7055 | }; |
7056 | |
7057 | static DEFINE_MUTEX(cand_cache_mutex); |
7058 | |
7059 | static struct bpf_cand_cache * |
7060 | bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id); |
7061 | |
7062 | static int btf_get_ptr_to_btf_id(struct bpf_verifier_log *log, int arg_idx, |
7063 | const struct btf *btf, const struct btf_type *t) |
7064 | { |
7065 | struct bpf_cand_cache *cc; |
7066 | struct bpf_core_ctx ctx = { |
7067 | .btf = btf, |
7068 | .log = log, |
7069 | }; |
7070 | u32 kern_type_id, type_id; |
7071 | int err = 0; |
7072 | |
7073 | /* skip PTR and modifiers */ |
7074 | type_id = t->type; |
7075 | t = btf_type_by_id(btf, t->type); |
7076 | while (btf_type_is_modifier(t)) { |
7077 | type_id = t->type; |
7078 | t = btf_type_by_id(btf, t->type); |
7079 | } |
7080 | |
7081 | mutex_lock(&cand_cache_mutex); |
7082 | cc = bpf_core_find_cands(ctx: &ctx, local_type_id: type_id); |
7083 | if (IS_ERR(ptr: cc)) { |
7084 | err = PTR_ERR(ptr: cc); |
7085 | bpf_log(log, fmt: "arg#%d reference type('%s %s') candidate matching error: %d\n" , |
7086 | arg_idx, btf_type_str(t), __btf_name_by_offset(btf, offset: t->name_off), |
7087 | err); |
7088 | goto cand_cache_unlock; |
7089 | } |
7090 | if (cc->cnt != 1) { |
7091 | bpf_log(log, fmt: "arg#%d reference type('%s %s') %s\n" , |
7092 | arg_idx, btf_type_str(t), __btf_name_by_offset(btf, offset: t->name_off), |
7093 | cc->cnt == 0 ? "has no matches" : "is ambiguous" ); |
7094 | err = cc->cnt == 0 ? -ENOENT : -ESRCH; |
7095 | goto cand_cache_unlock; |
7096 | } |
7097 | if (btf_is_module(btf: cc->cands[0].btf)) { |
7098 | bpf_log(log, fmt: "arg#%d reference type('%s %s') points to kernel module type (unsupported)\n" , |
7099 | arg_idx, btf_type_str(t), __btf_name_by_offset(btf, offset: t->name_off)); |
7100 | err = -EOPNOTSUPP; |
7101 | goto cand_cache_unlock; |
7102 | } |
7103 | kern_type_id = cc->cands[0].id; |
7104 | |
7105 | cand_cache_unlock: |
7106 | mutex_unlock(lock: &cand_cache_mutex); |
7107 | if (err) |
7108 | return err; |
7109 | |
7110 | return kern_type_id; |
7111 | } |
7112 | |
7113 | enum btf_arg_tag { |
7114 | ARG_TAG_CTX = BIT_ULL(0), |
7115 | ARG_TAG_NONNULL = BIT_ULL(1), |
7116 | ARG_TAG_TRUSTED = BIT_ULL(2), |
7117 | ARG_TAG_NULLABLE = BIT_ULL(3), |
7118 | ARG_TAG_ARENA = BIT_ULL(4), |
7119 | }; |
7120 | |
7121 | /* Process BTF of a function to produce high-level expectation of function |
7122 | * arguments (like ARG_PTR_TO_CTX, or ARG_PTR_TO_MEM, etc). This information |
7123 | * is cached in subprog info for reuse. |
7124 | * Returns: |
7125 | * EFAULT - there is a verifier bug. Abort verification. |
7126 | * EINVAL - cannot convert BTF. |
7127 | * 0 - Successfully processed BTF and constructed argument expectations. |
7128 | */ |
7129 | int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog) |
7130 | { |
7131 | bool is_global = subprog_aux(env, subprog)->linkage == BTF_FUNC_GLOBAL; |
7132 | struct bpf_subprog_info *sub = subprog_info(env, subprog); |
7133 | struct bpf_verifier_log *log = &env->log; |
7134 | struct bpf_prog *prog = env->prog; |
7135 | enum bpf_prog_type prog_type = prog->type; |
7136 | struct btf *btf = prog->aux->btf; |
7137 | const struct btf_param *args; |
7138 | const struct btf_type *t, *ref_t, *fn_t; |
7139 | u32 i, nargs, btf_id; |
7140 | const char *tname; |
7141 | |
7142 | if (sub->args_cached) |
7143 | return 0; |
7144 | |
7145 | if (!prog->aux->func_info) { |
7146 | bpf_log(log, fmt: "Verifier bug\n" ); |
7147 | return -EFAULT; |
7148 | } |
7149 | |
7150 | btf_id = prog->aux->func_info[subprog].type_id; |
7151 | if (!btf_id) { |
7152 | if (!is_global) /* not fatal for static funcs */ |
7153 | return -EINVAL; |
7154 | bpf_log(log, fmt: "Global functions need valid BTF\n" ); |
7155 | return -EFAULT; |
7156 | } |
7157 | |
7158 | fn_t = btf_type_by_id(btf, btf_id); |
7159 | if (!fn_t || !btf_type_is_func(t: fn_t)) { |
7160 | /* These checks were already done by the verifier while loading |
7161 | * struct bpf_func_info |
7162 | */ |
7163 | bpf_log(log, fmt: "BTF of func#%d doesn't point to KIND_FUNC\n" , |
7164 | subprog); |
7165 | return -EFAULT; |
7166 | } |
7167 | tname = btf_name_by_offset(btf, offset: fn_t->name_off); |
7168 | |
7169 | if (prog->aux->func_info_aux[subprog].unreliable) { |
7170 | bpf_log(log, fmt: "Verifier bug in function %s()\n" , tname); |
7171 | return -EFAULT; |
7172 | } |
7173 | if (prog_type == BPF_PROG_TYPE_EXT) |
7174 | prog_type = prog->aux->dst_prog->type; |
7175 | |
7176 | t = btf_type_by_id(btf, fn_t->type); |
7177 | if (!t || !btf_type_is_func_proto(t)) { |
7178 | bpf_log(log, fmt: "Invalid type of function %s()\n" , tname); |
7179 | return -EFAULT; |
7180 | } |
7181 | args = (const struct btf_param *)(t + 1); |
7182 | nargs = btf_type_vlen(t); |
7183 | if (nargs > MAX_BPF_FUNC_REG_ARGS) { |
7184 | if (!is_global) |
7185 | return -EINVAL; |
7186 | bpf_log(log, fmt: "Global function %s() with %d > %d args. Buggy compiler.\n" , |
7187 | tname, nargs, MAX_BPF_FUNC_REG_ARGS); |
7188 | return -EINVAL; |
7189 | } |
7190 | /* check that function returns int, exception cb also requires this */ |
7191 | t = btf_type_by_id(btf, t->type); |
7192 | while (btf_type_is_modifier(t)) |
7193 | t = btf_type_by_id(btf, t->type); |
7194 | if (!btf_type_is_int(t) && !btf_is_any_enum(t)) { |
7195 | if (!is_global) |
7196 | return -EINVAL; |
7197 | bpf_log(log, |
7198 | fmt: "Global function %s() doesn't return scalar. Only those are supported.\n" , |
7199 | tname); |
7200 | return -EINVAL; |
7201 | } |
7202 | /* Convert BTF function arguments into verifier types. |
7203 | * Only PTR_TO_CTX and SCALAR are supported atm. |
7204 | */ |
7205 | for (i = 0; i < nargs; i++) { |
7206 | u32 tags = 0; |
7207 | int id = 0; |
7208 | |
7209 | /* 'arg:<tag>' decl_tag takes precedence over derivation of |
7210 | * register type from BTF type itself |
7211 | */ |
7212 | while ((id = btf_find_next_decl_tag(btf, pt: fn_t, comp_idx: i, tag_key: "arg:" , last_id: id)) > 0) { |
7213 | const struct btf_type *tag_t = btf_type_by_id(btf, id); |
7214 | const char *tag = __btf_name_by_offset(btf, offset: tag_t->name_off) + 4; |
7215 | |
7216 | /* disallow arg tags in static subprogs */ |
7217 | if (!is_global) { |
7218 | bpf_log(log, fmt: "arg#%d type tag is not supported in static functions\n" , i); |
7219 | return -EOPNOTSUPP; |
7220 | } |
7221 | |
7222 | if (strcmp(tag, "ctx" ) == 0) { |
7223 | tags |= ARG_TAG_CTX; |
7224 | } else if (strcmp(tag, "trusted" ) == 0) { |
7225 | tags |= ARG_TAG_TRUSTED; |
7226 | } else if (strcmp(tag, "nonnull" ) == 0) { |
7227 | tags |= ARG_TAG_NONNULL; |
7228 | } else if (strcmp(tag, "nullable" ) == 0) { |
7229 | tags |= ARG_TAG_NULLABLE; |
7230 | } else if (strcmp(tag, "arena" ) == 0) { |
7231 | tags |= ARG_TAG_ARENA; |
7232 | } else { |
7233 | bpf_log(log, fmt: "arg#%d has unsupported set of tags\n" , i); |
7234 | return -EOPNOTSUPP; |
7235 | } |
7236 | } |
7237 | if (id != -ENOENT) { |
7238 | bpf_log(log, fmt: "arg#%d type tag fetching failure: %d\n" , i, id); |
7239 | return id; |
7240 | } |
7241 | |
7242 | t = btf_type_by_id(btf, args[i].type); |
7243 | while (btf_type_is_modifier(t)) |
7244 | t = btf_type_by_id(btf, t->type); |
7245 | if (!btf_type_is_ptr(t)) |
7246 | goto skip_pointer; |
7247 | |
7248 | if ((tags & ARG_TAG_CTX) || btf_is_prog_ctx_type(log, btf, t, prog_type, arg: i)) { |
7249 | if (tags & ~ARG_TAG_CTX) { |
7250 | bpf_log(log, fmt: "arg#%d has invalid combination of tags\n" , i); |
7251 | return -EINVAL; |
7252 | } |
7253 | if ((tags & ARG_TAG_CTX) && |
7254 | btf_validate_prog_ctx_type(log, btf, t, arg: i, prog_type, |
7255 | attach_type: prog->expected_attach_type)) |
7256 | return -EINVAL; |
7257 | sub->args[i].arg_type = ARG_PTR_TO_CTX; |
7258 | continue; |
7259 | } |
7260 | if (btf_is_dynptr_ptr(btf, t)) { |
7261 | if (tags) { |
7262 | bpf_log(log, fmt: "arg#%d has invalid combination of tags\n" , i); |
7263 | return -EINVAL; |
7264 | } |
7265 | sub->args[i].arg_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY; |
7266 | continue; |
7267 | } |
7268 | if (tags & ARG_TAG_TRUSTED) { |
7269 | int kern_type_id; |
7270 | |
7271 | if (tags & ARG_TAG_NONNULL) { |
7272 | bpf_log(log, fmt: "arg#%d has invalid combination of tags\n" , i); |
7273 | return -EINVAL; |
7274 | } |
7275 | |
7276 | kern_type_id = btf_get_ptr_to_btf_id(log, arg_idx: i, btf, t); |
7277 | if (kern_type_id < 0) |
7278 | return kern_type_id; |
7279 | |
7280 | sub->args[i].arg_type = ARG_PTR_TO_BTF_ID | PTR_TRUSTED; |
7281 | if (tags & ARG_TAG_NULLABLE) |
7282 | sub->args[i].arg_type |= PTR_MAYBE_NULL; |
7283 | sub->args[i].btf_id = kern_type_id; |
7284 | continue; |
7285 | } |
7286 | if (tags & ARG_TAG_ARENA) { |
7287 | if (tags & ~ARG_TAG_ARENA) { |
7288 | bpf_log(log, fmt: "arg#%d arena cannot be combined with any other tags\n" , i); |
7289 | return -EINVAL; |
7290 | } |
7291 | sub->args[i].arg_type = ARG_PTR_TO_ARENA; |
7292 | continue; |
7293 | } |
7294 | if (is_global) { /* generic user data pointer */ |
7295 | u32 mem_size; |
7296 | |
7297 | if (tags & ARG_TAG_NULLABLE) { |
7298 | bpf_log(log, fmt: "arg#%d has invalid combination of tags\n" , i); |
7299 | return -EINVAL; |
7300 | } |
7301 | |
7302 | t = btf_type_skip_modifiers(btf, id: t->type, NULL); |
7303 | ref_t = btf_resolve_size(btf, type: t, type_size: &mem_size); |
7304 | if (IS_ERR(ptr: ref_t)) { |
7305 | bpf_log(log, fmt: "arg#%d reference type('%s %s') size cannot be determined: %ld\n" , |
7306 | i, btf_type_str(t), btf_name_by_offset(btf, offset: t->name_off), |
7307 | PTR_ERR(ptr: ref_t)); |
7308 | return -EINVAL; |
7309 | } |
7310 | |
7311 | sub->args[i].arg_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL; |
7312 | if (tags & ARG_TAG_NONNULL) |
7313 | sub->args[i].arg_type &= ~PTR_MAYBE_NULL; |
7314 | sub->args[i].mem_size = mem_size; |
7315 | continue; |
7316 | } |
7317 | |
7318 | skip_pointer: |
7319 | if (tags) { |
7320 | bpf_log(log, fmt: "arg#%d has pointer tag, but is not a pointer type\n" , i); |
7321 | return -EINVAL; |
7322 | } |
7323 | if (btf_type_is_int(t) || btf_is_any_enum(t)) { |
7324 | sub->args[i].arg_type = ARG_ANYTHING; |
7325 | continue; |
7326 | } |
7327 | if (!is_global) |
7328 | return -EINVAL; |
7329 | bpf_log(log, fmt: "Arg#%d type %s in %s() is not supported yet.\n" , |
7330 | i, btf_type_str(t), tname); |
7331 | return -EINVAL; |
7332 | } |
7333 | |
7334 | sub->arg_cnt = nargs; |
7335 | sub->args_cached = true; |
7336 | |
7337 | return 0; |
7338 | } |
7339 | |
7340 | static void btf_type_show(const struct btf *btf, u32 type_id, void *obj, |
7341 | struct btf_show *show) |
7342 | { |
7343 | const struct btf_type *t = btf_type_by_id(btf, type_id); |
7344 | |
7345 | show->btf = btf; |
7346 | memset(&show->state, 0, sizeof(show->state)); |
7347 | memset(&show->obj, 0, sizeof(show->obj)); |
7348 | |
7349 | btf_type_ops(t)->show(btf, t, type_id, obj, 0, show); |
7350 | } |
7351 | |
7352 | static void btf_seq_show(struct btf_show *show, const char *fmt, |
7353 | va_list args) |
7354 | { |
7355 | seq_vprintf(m: (struct seq_file *)show->target, fmt, args); |
7356 | } |
7357 | |
7358 | int btf_type_seq_show_flags(const struct btf *btf, u32 type_id, |
7359 | void *obj, struct seq_file *m, u64 flags) |
7360 | { |
7361 | struct btf_show sseq; |
7362 | |
7363 | sseq.target = m; |
7364 | sseq.showfn = btf_seq_show; |
7365 | sseq.flags = flags; |
7366 | |
7367 | btf_type_show(btf, type_id, obj, show: &sseq); |
7368 | |
7369 | return sseq.state.status; |
7370 | } |
7371 | |
7372 | void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, |
7373 | struct seq_file *m) |
7374 | { |
7375 | (void) btf_type_seq_show_flags(btf, type_id, obj, m, |
7376 | BTF_SHOW_NONAME | BTF_SHOW_COMPACT | |
7377 | BTF_SHOW_ZERO | BTF_SHOW_UNSAFE); |
7378 | } |
7379 | |
7380 | struct btf_show_snprintf { |
7381 | struct btf_show show; |
7382 | int len_left; /* space left in string */ |
7383 | int len; /* length we would have written */ |
7384 | }; |
7385 | |
7386 | static void btf_snprintf_show(struct btf_show *show, const char *fmt, |
7387 | va_list args) |
7388 | { |
7389 | struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show; |
7390 | int len; |
7391 | |
7392 | len = vsnprintf(buf: show->target, size: ssnprintf->len_left, fmt, args); |
7393 | |
7394 | if (len < 0) { |
7395 | ssnprintf->len_left = 0; |
7396 | ssnprintf->len = len; |
7397 | } else if (len >= ssnprintf->len_left) { |
7398 | /* no space, drive on to get length we would have written */ |
7399 | ssnprintf->len_left = 0; |
7400 | ssnprintf->len += len; |
7401 | } else { |
7402 | ssnprintf->len_left -= len; |
7403 | ssnprintf->len += len; |
7404 | show->target += len; |
7405 | } |
7406 | } |
7407 | |
7408 | int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj, |
7409 | char *buf, int len, u64 flags) |
7410 | { |
7411 | struct btf_show_snprintf ssnprintf; |
7412 | |
7413 | ssnprintf.show.target = buf; |
7414 | ssnprintf.show.flags = flags; |
7415 | ssnprintf.show.showfn = btf_snprintf_show; |
7416 | ssnprintf.len_left = len; |
7417 | ssnprintf.len = 0; |
7418 | |
7419 | btf_type_show(btf, type_id, obj, show: (struct btf_show *)&ssnprintf); |
7420 | |
7421 | /* If we encountered an error, return it. */ |
7422 | if (ssnprintf.show.state.status) |
7423 | return ssnprintf.show.state.status; |
7424 | |
7425 | /* Otherwise return length we would have written */ |
7426 | return ssnprintf.len; |
7427 | } |
7428 | |
7429 | #ifdef CONFIG_PROC_FS |
7430 | static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp) |
7431 | { |
7432 | const struct btf *btf = filp->private_data; |
7433 | |
7434 | seq_printf(m, fmt: "btf_id:\t%u\n" , btf->id); |
7435 | } |
7436 | #endif |
7437 | |
7438 | static int btf_release(struct inode *inode, struct file *filp) |
7439 | { |
7440 | btf_put(btf: filp->private_data); |
7441 | return 0; |
7442 | } |
7443 | |
7444 | const struct file_operations btf_fops = { |
7445 | #ifdef CONFIG_PROC_FS |
7446 | .show_fdinfo = bpf_btf_show_fdinfo, |
7447 | #endif |
7448 | .release = btf_release, |
7449 | }; |
7450 | |
7451 | static int __btf_new_fd(struct btf *btf) |
7452 | { |
7453 | return anon_inode_getfd(name: "btf" , fops: &btf_fops, priv: btf, O_RDONLY | O_CLOEXEC); |
7454 | } |
7455 | |
7456 | int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) |
7457 | { |
7458 | struct btf *btf; |
7459 | int ret; |
7460 | |
7461 | btf = btf_parse(attr, uattr, uattr_size); |
7462 | if (IS_ERR(ptr: btf)) |
7463 | return PTR_ERR(ptr: btf); |
7464 | |
7465 | ret = btf_alloc_id(btf); |
7466 | if (ret) { |
7467 | btf_free(btf); |
7468 | return ret; |
7469 | } |
7470 | |
7471 | /* |
7472 | * The BTF ID is published to the userspace. |
7473 | * All BTF free must go through call_rcu() from |
7474 | * now on (i.e. free by calling btf_put()). |
7475 | */ |
7476 | |
7477 | ret = __btf_new_fd(btf); |
7478 | if (ret < 0) |
7479 | btf_put(btf); |
7480 | |
7481 | return ret; |
7482 | } |
7483 | |
7484 | struct btf *btf_get_by_fd(int fd) |
7485 | { |
7486 | struct btf *btf; |
7487 | struct fd f; |
7488 | |
7489 | f = fdget(fd); |
7490 | |
7491 | if (!f.file) |
7492 | return ERR_PTR(error: -EBADF); |
7493 | |
7494 | if (f.file->f_op != &btf_fops) { |
7495 | fdput(fd: f); |
7496 | return ERR_PTR(error: -EINVAL); |
7497 | } |
7498 | |
7499 | btf = f.file->private_data; |
7500 | refcount_inc(r: &btf->refcnt); |
7501 | fdput(fd: f); |
7502 | |
7503 | return btf; |
7504 | } |
7505 | |
7506 | int btf_get_info_by_fd(const struct btf *btf, |
7507 | const union bpf_attr *attr, |
7508 | union bpf_attr __user *uattr) |
7509 | { |
7510 | struct bpf_btf_info __user *uinfo; |
7511 | struct bpf_btf_info info; |
7512 | u32 info_copy, btf_copy; |
7513 | void __user *ubtf; |
7514 | char __user *uname; |
7515 | u32 uinfo_len, uname_len, name_len; |
7516 | int ret = 0; |
7517 | |
7518 | uinfo = u64_to_user_ptr(attr->info.info); |
7519 | uinfo_len = attr->info.info_len; |
7520 | |
7521 | info_copy = min_t(u32, uinfo_len, sizeof(info)); |
7522 | memset(&info, 0, sizeof(info)); |
7523 | if (copy_from_user(to: &info, from: uinfo, n: info_copy)) |
7524 | return -EFAULT; |
7525 | |
7526 | info.id = btf->id; |
7527 | ubtf = u64_to_user_ptr(info.btf); |
7528 | btf_copy = min_t(u32, btf->data_size, info.btf_size); |
7529 | if (copy_to_user(to: ubtf, from: btf->data, n: btf_copy)) |
7530 | return -EFAULT; |
7531 | info.btf_size = btf->data_size; |
7532 | |
7533 | info.kernel_btf = btf->kernel_btf; |
7534 | |
7535 | uname = u64_to_user_ptr(info.name); |
7536 | uname_len = info.name_len; |
7537 | if (!uname ^ !uname_len) |
7538 | return -EINVAL; |
7539 | |
7540 | name_len = strlen(btf->name); |
7541 | info.name_len = name_len; |
7542 | |
7543 | if (uname) { |
7544 | if (uname_len >= name_len + 1) { |
7545 | if (copy_to_user(to: uname, from: btf->name, n: name_len + 1)) |
7546 | return -EFAULT; |
7547 | } else { |
7548 | char zero = '\0'; |
7549 | |
7550 | if (copy_to_user(to: uname, from: btf->name, n: uname_len - 1)) |
7551 | return -EFAULT; |
7552 | if (put_user(zero, uname + uname_len - 1)) |
7553 | return -EFAULT; |
7554 | /* let user-space know about too short buffer */ |
7555 | ret = -ENOSPC; |
7556 | } |
7557 | } |
7558 | |
7559 | if (copy_to_user(to: uinfo, from: &info, n: info_copy) || |
7560 | put_user(info_copy, &uattr->info.info_len)) |
7561 | return -EFAULT; |
7562 | |
7563 | return ret; |
7564 | } |
7565 | |
7566 | int btf_get_fd_by_id(u32 id) |
7567 | { |
7568 | struct btf *btf; |
7569 | int fd; |
7570 | |
7571 | rcu_read_lock(); |
7572 | btf = idr_find(&btf_idr, id); |
7573 | if (!btf || !refcount_inc_not_zero(r: &btf->refcnt)) |
7574 | btf = ERR_PTR(error: -ENOENT); |
7575 | rcu_read_unlock(); |
7576 | |
7577 | if (IS_ERR(ptr: btf)) |
7578 | return PTR_ERR(ptr: btf); |
7579 | |
7580 | fd = __btf_new_fd(btf); |
7581 | if (fd < 0) |
7582 | btf_put(btf); |
7583 | |
7584 | return fd; |
7585 | } |
7586 | |
7587 | u32 btf_obj_id(const struct btf *btf) |
7588 | { |
7589 | return btf->id; |
7590 | } |
7591 | |
7592 | bool btf_is_kernel(const struct btf *btf) |
7593 | { |
7594 | return btf->kernel_btf; |
7595 | } |
7596 | |
7597 | bool btf_is_module(const struct btf *btf) |
7598 | { |
7599 | return btf->kernel_btf && strcmp(btf->name, "vmlinux" ) != 0; |
7600 | } |
7601 | |
7602 | enum { |
7603 | BTF_MODULE_F_LIVE = (1 << 0), |
7604 | }; |
7605 | |
7606 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
7607 | struct btf_module { |
7608 | struct list_head list; |
7609 | struct module *module; |
7610 | struct btf *btf; |
7611 | struct bin_attribute *sysfs_attr; |
7612 | int flags; |
7613 | }; |
7614 | |
7615 | static LIST_HEAD(btf_modules); |
7616 | static DEFINE_MUTEX(btf_module_mutex); |
7617 | |
7618 | static ssize_t |
7619 | btf_module_read(struct file *file, struct kobject *kobj, |
7620 | struct bin_attribute *bin_attr, |
7621 | char *buf, loff_t off, size_t len) |
7622 | { |
7623 | const struct btf *btf = bin_attr->private; |
7624 | |
7625 | memcpy(buf, btf->data + off, len); |
7626 | return len; |
7627 | } |
7628 | |
7629 | static void purge_cand_cache(struct btf *btf); |
7630 | |
7631 | static int btf_module_notify(struct notifier_block *nb, unsigned long op, |
7632 | void *module) |
7633 | { |
7634 | struct btf_module *btf_mod, *tmp; |
7635 | struct module *mod = module; |
7636 | struct btf *btf; |
7637 | int err = 0; |
7638 | |
7639 | if (mod->btf_data_size == 0 || |
7640 | (op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE && |
7641 | op != MODULE_STATE_GOING)) |
7642 | goto out; |
7643 | |
7644 | switch (op) { |
7645 | case MODULE_STATE_COMING: |
7646 | btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL); |
7647 | if (!btf_mod) { |
7648 | err = -ENOMEM; |
7649 | goto out; |
7650 | } |
7651 | btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size); |
7652 | if (IS_ERR(btf)) { |
7653 | kfree(btf_mod); |
7654 | if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) { |
7655 | pr_warn("failed to validate module [%s] BTF: %ld\n" , |
7656 | mod->name, PTR_ERR(btf)); |
7657 | err = PTR_ERR(btf); |
7658 | } else { |
7659 | pr_warn_once("Kernel module BTF mismatch detected, BTF debug info may be unavailable for some modules\n" ); |
7660 | } |
7661 | goto out; |
7662 | } |
7663 | err = btf_alloc_id(btf); |
7664 | if (err) { |
7665 | btf_free(btf); |
7666 | kfree(btf_mod); |
7667 | goto out; |
7668 | } |
7669 | |
7670 | purge_cand_cache(NULL); |
7671 | mutex_lock(&btf_module_mutex); |
7672 | btf_mod->module = module; |
7673 | btf_mod->btf = btf; |
7674 | list_add(&btf_mod->list, &btf_modules); |
7675 | mutex_unlock(&btf_module_mutex); |
7676 | |
7677 | if (IS_ENABLED(CONFIG_SYSFS)) { |
7678 | struct bin_attribute *attr; |
7679 | |
7680 | attr = kzalloc(sizeof(*attr), GFP_KERNEL); |
7681 | if (!attr) |
7682 | goto out; |
7683 | |
7684 | sysfs_bin_attr_init(attr); |
7685 | attr->attr.name = btf->name; |
7686 | attr->attr.mode = 0444; |
7687 | attr->size = btf->data_size; |
7688 | attr->private = btf; |
7689 | attr->read = btf_module_read; |
7690 | |
7691 | err = sysfs_create_bin_file(btf_kobj, attr); |
7692 | if (err) { |
7693 | pr_warn("failed to register module [%s] BTF in sysfs: %d\n" , |
7694 | mod->name, err); |
7695 | kfree(attr); |
7696 | err = 0; |
7697 | goto out; |
7698 | } |
7699 | |
7700 | btf_mod->sysfs_attr = attr; |
7701 | } |
7702 | |
7703 | break; |
7704 | case MODULE_STATE_LIVE: |
7705 | mutex_lock(&btf_module_mutex); |
7706 | list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { |
7707 | if (btf_mod->module != module) |
7708 | continue; |
7709 | |
7710 | btf_mod->flags |= BTF_MODULE_F_LIVE; |
7711 | break; |
7712 | } |
7713 | mutex_unlock(&btf_module_mutex); |
7714 | break; |
7715 | case MODULE_STATE_GOING: |
7716 | mutex_lock(&btf_module_mutex); |
7717 | list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { |
7718 | if (btf_mod->module != module) |
7719 | continue; |
7720 | |
7721 | list_del(&btf_mod->list); |
7722 | if (btf_mod->sysfs_attr) |
7723 | sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr); |
7724 | purge_cand_cache(btf_mod->btf); |
7725 | btf_put(btf_mod->btf); |
7726 | kfree(btf_mod->sysfs_attr); |
7727 | kfree(btf_mod); |
7728 | break; |
7729 | } |
7730 | mutex_unlock(&btf_module_mutex); |
7731 | break; |
7732 | } |
7733 | out: |
7734 | return notifier_from_errno(err); |
7735 | } |
7736 | |
7737 | static struct notifier_block btf_module_nb = { |
7738 | .notifier_call = btf_module_notify, |
7739 | }; |
7740 | |
7741 | static int __init btf_module_init(void) |
7742 | { |
7743 | register_module_notifier(&btf_module_nb); |
7744 | return 0; |
7745 | } |
7746 | |
7747 | fs_initcall(btf_module_init); |
7748 | #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */ |
7749 | |
7750 | struct module *btf_try_get_module(const struct btf *btf) |
7751 | { |
7752 | struct module *res = NULL; |
7753 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
7754 | struct btf_module *btf_mod, *tmp; |
7755 | |
7756 | mutex_lock(&btf_module_mutex); |
7757 | list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { |
7758 | if (btf_mod->btf != btf) |
7759 | continue; |
7760 | |
7761 | /* We must only consider module whose __init routine has |
7762 | * finished, hence we must check for BTF_MODULE_F_LIVE flag, |
7763 | * which is set from the notifier callback for |
7764 | * MODULE_STATE_LIVE. |
7765 | */ |
7766 | if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module)) |
7767 | res = btf_mod->module; |
7768 | |
7769 | break; |
7770 | } |
7771 | mutex_unlock(&btf_module_mutex); |
7772 | #endif |
7773 | |
7774 | return res; |
7775 | } |
7776 | |
7777 | /* Returns struct btf corresponding to the struct module. |
7778 | * This function can return NULL or ERR_PTR. |
7779 | */ |
7780 | static struct btf *btf_get_module_btf(const struct module *module) |
7781 | { |
7782 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
7783 | struct btf_module *btf_mod, *tmp; |
7784 | #endif |
7785 | struct btf *btf = NULL; |
7786 | |
7787 | if (!module) { |
7788 | btf = bpf_get_btf_vmlinux(); |
7789 | if (!IS_ERR_OR_NULL(ptr: btf)) |
7790 | btf_get(btf); |
7791 | return btf; |
7792 | } |
7793 | |
7794 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
7795 | mutex_lock(&btf_module_mutex); |
7796 | list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) { |
7797 | if (btf_mod->module != module) |
7798 | continue; |
7799 | |
7800 | btf_get(btf_mod->btf); |
7801 | btf = btf_mod->btf; |
7802 | break; |
7803 | } |
7804 | mutex_unlock(&btf_module_mutex); |
7805 | #endif |
7806 | |
7807 | return btf; |
7808 | } |
7809 | |
7810 | static int check_btf_kconfigs(const struct module *module, const char *feature) |
7811 | { |
7812 | if (!module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { |
7813 | pr_err("missing vmlinux BTF, cannot register %s\n" , feature); |
7814 | return -ENOENT; |
7815 | } |
7816 | if (module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) |
7817 | pr_warn("missing module BTF, cannot register %s\n" , feature); |
7818 | return 0; |
7819 | } |
7820 | |
7821 | BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags) |
7822 | { |
7823 | struct btf *btf = NULL; |
7824 | int btf_obj_fd = 0; |
7825 | long ret; |
7826 | |
7827 | if (flags) |
7828 | return -EINVAL; |
7829 | |
7830 | if (name_sz <= 1 || name[name_sz - 1]) |
7831 | return -EINVAL; |
7832 | |
7833 | ret = bpf_find_btf_id(name, kind, btf_p: &btf); |
7834 | if (ret > 0 && btf_is_module(btf)) { |
7835 | btf_obj_fd = __btf_new_fd(btf); |
7836 | if (btf_obj_fd < 0) { |
7837 | btf_put(btf); |
7838 | return btf_obj_fd; |
7839 | } |
7840 | return ret | (((u64)btf_obj_fd) << 32); |
7841 | } |
7842 | if (ret > 0) |
7843 | btf_put(btf); |
7844 | return ret; |
7845 | } |
7846 | |
7847 | const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = { |
7848 | .func = bpf_btf_find_by_name_kind, |
7849 | .gpl_only = false, |
7850 | .ret_type = RET_INTEGER, |
7851 | .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
7852 | .arg2_type = ARG_CONST_SIZE, |
7853 | .arg3_type = ARG_ANYTHING, |
7854 | .arg4_type = ARG_ANYTHING, |
7855 | }; |
7856 | |
7857 | BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE) |
7858 | #define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type) |
7859 | BTF_TRACING_TYPE_xxx |
7860 | #undef BTF_TRACING_TYPE |
7861 | |
7862 | static int btf_check_iter_kfuncs(struct btf *btf, const char *func_name, |
7863 | const struct btf_type *func, u32 func_flags) |
7864 | { |
7865 | u32 flags = func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY); |
7866 | const char *name, *sfx, *iter_name; |
7867 | const struct btf_param *arg; |
7868 | const struct btf_type *t; |
7869 | char exp_name[128]; |
7870 | u32 nr_args; |
7871 | |
7872 | /* exactly one of KF_ITER_{NEW,NEXT,DESTROY} can be set */ |
7873 | if (!flags || (flags & (flags - 1))) |
7874 | return -EINVAL; |
7875 | |
7876 | /* any BPF iter kfunc should have `struct bpf_iter_<type> *` first arg */ |
7877 | nr_args = btf_type_vlen(t: func); |
7878 | if (nr_args < 1) |
7879 | return -EINVAL; |
7880 | |
7881 | arg = &btf_params(t: func)[0]; |
7882 | t = btf_type_skip_modifiers(btf, id: arg->type, NULL); |
7883 | if (!t || !btf_type_is_ptr(t)) |
7884 | return -EINVAL; |
7885 | t = btf_type_skip_modifiers(btf, id: t->type, NULL); |
7886 | if (!t || !__btf_type_is_struct(t)) |
7887 | return -EINVAL; |
7888 | |
7889 | name = btf_name_by_offset(btf, offset: t->name_off); |
7890 | if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1)) |
7891 | return -EINVAL; |
7892 | |
7893 | /* sizeof(struct bpf_iter_<type>) should be a multiple of 8 to |
7894 | * fit nicely in stack slots |
7895 | */ |
7896 | if (t->size == 0 || (t->size % 8)) |
7897 | return -EINVAL; |
7898 | |
7899 | /* validate bpf_iter_<type>_{new,next,destroy}(struct bpf_iter_<type> *) |
7900 | * naming pattern |
7901 | */ |
7902 | iter_name = name + sizeof(ITER_PREFIX) - 1; |
7903 | if (flags & KF_ITER_NEW) |
7904 | sfx = "new" ; |
7905 | else if (flags & KF_ITER_NEXT) |
7906 | sfx = "next" ; |
7907 | else /* (flags & KF_ITER_DESTROY) */ |
7908 | sfx = "destroy" ; |
7909 | |
7910 | snprintf(buf: exp_name, size: sizeof(exp_name), fmt: "bpf_iter_%s_%s" , iter_name, sfx); |
7911 | if (strcmp(func_name, exp_name)) |
7912 | return -EINVAL; |
7913 | |
7914 | /* only iter constructor should have extra arguments */ |
7915 | if (!(flags & KF_ITER_NEW) && nr_args != 1) |
7916 | return -EINVAL; |
7917 | |
7918 | if (flags & KF_ITER_NEXT) { |
7919 | /* bpf_iter_<type>_next() should return pointer */ |
7920 | t = btf_type_skip_modifiers(btf, id: func->type, NULL); |
7921 | if (!t || !btf_type_is_ptr(t)) |
7922 | return -EINVAL; |
7923 | } |
7924 | |
7925 | if (flags & KF_ITER_DESTROY) { |
7926 | /* bpf_iter_<type>_destroy() should return void */ |
7927 | t = btf_type_by_id(btf, func->type); |
7928 | if (!t || !btf_type_is_void(t)) |
7929 | return -EINVAL; |
7930 | } |
7931 | |
7932 | return 0; |
7933 | } |
7934 | |
7935 | static int btf_check_kfunc_protos(struct btf *btf, u32 func_id, u32 func_flags) |
7936 | { |
7937 | const struct btf_type *func; |
7938 | const char *func_name; |
7939 | int err; |
7940 | |
7941 | /* any kfunc should be FUNC -> FUNC_PROTO */ |
7942 | func = btf_type_by_id(btf, func_id); |
7943 | if (!func || !btf_type_is_func(t: func)) |
7944 | return -EINVAL; |
7945 | |
7946 | /* sanity check kfunc name */ |
7947 | func_name = btf_name_by_offset(btf, offset: func->name_off); |
7948 | if (!func_name || !func_name[0]) |
7949 | return -EINVAL; |
7950 | |
7951 | func = btf_type_by_id(btf, func->type); |
7952 | if (!func || !btf_type_is_func_proto(t: func)) |
7953 | return -EINVAL; |
7954 | |
7955 | if (func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY)) { |
7956 | err = btf_check_iter_kfuncs(btf, func_name, func, func_flags); |
7957 | if (err) |
7958 | return err; |
7959 | } |
7960 | |
7961 | return 0; |
7962 | } |
7963 | |
7964 | /* Kernel Function (kfunc) BTF ID set registration API */ |
7965 | |
7966 | static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, |
7967 | const struct btf_kfunc_id_set *kset) |
7968 | { |
7969 | struct btf_kfunc_hook_filter *hook_filter; |
7970 | struct btf_id_set8 *add_set = kset->set; |
7971 | bool vmlinux_set = !btf_is_module(btf); |
7972 | bool add_filter = !!kset->filter; |
7973 | struct btf_kfunc_set_tab *tab; |
7974 | struct btf_id_set8 *set; |
7975 | u32 set_cnt; |
7976 | int ret; |
7977 | |
7978 | if (hook >= BTF_KFUNC_HOOK_MAX) { |
7979 | ret = -EINVAL; |
7980 | goto end; |
7981 | } |
7982 | |
7983 | if (!add_set->cnt) |
7984 | return 0; |
7985 | |
7986 | tab = btf->kfunc_set_tab; |
7987 | |
7988 | if (tab && add_filter) { |
7989 | u32 i; |
7990 | |
7991 | hook_filter = &tab->hook_filters[hook]; |
7992 | for (i = 0; i < hook_filter->nr_filters; i++) { |
7993 | if (hook_filter->filters[i] == kset->filter) { |
7994 | add_filter = false; |
7995 | break; |
7996 | } |
7997 | } |
7998 | |
7999 | if (add_filter && hook_filter->nr_filters == BTF_KFUNC_FILTER_MAX_CNT) { |
8000 | ret = -E2BIG; |
8001 | goto end; |
8002 | } |
8003 | } |
8004 | |
8005 | if (!tab) { |
8006 | tab = kzalloc(size: sizeof(*tab), GFP_KERNEL | __GFP_NOWARN); |
8007 | if (!tab) |
8008 | return -ENOMEM; |
8009 | btf->kfunc_set_tab = tab; |
8010 | } |
8011 | |
8012 | set = tab->sets[hook]; |
8013 | /* Warn when register_btf_kfunc_id_set is called twice for the same hook |
8014 | * for module sets. |
8015 | */ |
8016 | if (WARN_ON_ONCE(set && !vmlinux_set)) { |
8017 | ret = -EINVAL; |
8018 | goto end; |
8019 | } |
8020 | |
8021 | /* We don't need to allocate, concatenate, and sort module sets, because |
8022 | * only one is allowed per hook. Hence, we can directly assign the |
8023 | * pointer and return. |
8024 | */ |
8025 | if (!vmlinux_set) { |
8026 | tab->sets[hook] = add_set; |
8027 | goto do_add_filter; |
8028 | } |
8029 | |
8030 | /* In case of vmlinux sets, there may be more than one set being |
8031 | * registered per hook. To create a unified set, we allocate a new set |
8032 | * and concatenate all individual sets being registered. While each set |
8033 | * is individually sorted, they may become unsorted when concatenated, |
8034 | * hence re-sorting the final set again is required to make binary |
8035 | * searching the set using btf_id_set8_contains function work. |
8036 | */ |
8037 | set_cnt = set ? set->cnt : 0; |
8038 | |
8039 | if (set_cnt > U32_MAX - add_set->cnt) { |
8040 | ret = -EOVERFLOW; |
8041 | goto end; |
8042 | } |
8043 | |
8044 | if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) { |
8045 | ret = -E2BIG; |
8046 | goto end; |
8047 | } |
8048 | |
8049 | /* Grow set */ |
8050 | set = krealloc(objp: tab->sets[hook], |
8051 | offsetof(struct btf_id_set8, pairs[set_cnt + add_set->cnt]), |
8052 | GFP_KERNEL | __GFP_NOWARN); |
8053 | if (!set) { |
8054 | ret = -ENOMEM; |
8055 | goto end; |
8056 | } |
8057 | |
8058 | /* For newly allocated set, initialize set->cnt to 0 */ |
8059 | if (!tab->sets[hook]) |
8060 | set->cnt = 0; |
8061 | tab->sets[hook] = set; |
8062 | |
8063 | /* Concatenate the two sets */ |
8064 | memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0])); |
8065 | set->cnt += add_set->cnt; |
8066 | |
8067 | sort(base: set->pairs, num: set->cnt, size: sizeof(set->pairs[0]), cmp_func: btf_id_cmp_func, NULL); |
8068 | |
8069 | do_add_filter: |
8070 | if (add_filter) { |
8071 | hook_filter = &tab->hook_filters[hook]; |
8072 | hook_filter->filters[hook_filter->nr_filters++] = kset->filter; |
8073 | } |
8074 | return 0; |
8075 | end: |
8076 | btf_free_kfunc_set_tab(btf); |
8077 | return ret; |
8078 | } |
8079 | |
8080 | static u32 *__btf_kfunc_id_set_contains(const struct btf *btf, |
8081 | enum btf_kfunc_hook hook, |
8082 | u32 kfunc_btf_id, |
8083 | const struct bpf_prog *prog) |
8084 | { |
8085 | struct btf_kfunc_hook_filter *hook_filter; |
8086 | struct btf_id_set8 *set; |
8087 | u32 *id, i; |
8088 | |
8089 | if (hook >= BTF_KFUNC_HOOK_MAX) |
8090 | return NULL; |
8091 | if (!btf->kfunc_set_tab) |
8092 | return NULL; |
8093 | hook_filter = &btf->kfunc_set_tab->hook_filters[hook]; |
8094 | for (i = 0; i < hook_filter->nr_filters; i++) { |
8095 | if (hook_filter->filters[i](prog, kfunc_btf_id)) |
8096 | return NULL; |
8097 | } |
8098 | set = btf->kfunc_set_tab->sets[hook]; |
8099 | if (!set) |
8100 | return NULL; |
8101 | id = btf_id_set8_contains(set, id: kfunc_btf_id); |
8102 | if (!id) |
8103 | return NULL; |
8104 | /* The flags for BTF ID are located next to it */ |
8105 | return id + 1; |
8106 | } |
8107 | |
8108 | static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type) |
8109 | { |
8110 | switch (prog_type) { |
8111 | case BPF_PROG_TYPE_UNSPEC: |
8112 | return BTF_KFUNC_HOOK_COMMON; |
8113 | case BPF_PROG_TYPE_XDP: |
8114 | return BTF_KFUNC_HOOK_XDP; |
8115 | case BPF_PROG_TYPE_SCHED_CLS: |
8116 | return BTF_KFUNC_HOOK_TC; |
8117 | case BPF_PROG_TYPE_STRUCT_OPS: |
8118 | return BTF_KFUNC_HOOK_STRUCT_OPS; |
8119 | case BPF_PROG_TYPE_TRACING: |
8120 | case BPF_PROG_TYPE_LSM: |
8121 | return BTF_KFUNC_HOOK_TRACING; |
8122 | case BPF_PROG_TYPE_SYSCALL: |
8123 | return BTF_KFUNC_HOOK_SYSCALL; |
8124 | case BPF_PROG_TYPE_CGROUP_SKB: |
8125 | case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: |
8126 | return BTF_KFUNC_HOOK_CGROUP_SKB; |
8127 | case BPF_PROG_TYPE_SCHED_ACT: |
8128 | return BTF_KFUNC_HOOK_SCHED_ACT; |
8129 | case BPF_PROG_TYPE_SK_SKB: |
8130 | return BTF_KFUNC_HOOK_SK_SKB; |
8131 | case BPF_PROG_TYPE_SOCKET_FILTER: |
8132 | return BTF_KFUNC_HOOK_SOCKET_FILTER; |
8133 | case BPF_PROG_TYPE_LWT_OUT: |
8134 | case BPF_PROG_TYPE_LWT_IN: |
8135 | case BPF_PROG_TYPE_LWT_XMIT: |
8136 | case BPF_PROG_TYPE_LWT_SEG6LOCAL: |
8137 | return BTF_KFUNC_HOOK_LWT; |
8138 | case BPF_PROG_TYPE_NETFILTER: |
8139 | return BTF_KFUNC_HOOK_NETFILTER; |
8140 | default: |
8141 | return BTF_KFUNC_HOOK_MAX; |
8142 | } |
8143 | } |
8144 | |
8145 | /* Caution: |
8146 | * Reference to the module (obtained using btf_try_get_module) corresponding to |
8147 | * the struct btf *MUST* be held when calling this function from verifier |
8148 | * context. This is usually true as we stash references in prog's kfunc_btf_tab; |
8149 | * keeping the reference for the duration of the call provides the necessary |
8150 | * protection for looking up a well-formed btf->kfunc_set_tab. |
8151 | */ |
8152 | u32 *btf_kfunc_id_set_contains(const struct btf *btf, |
8153 | u32 kfunc_btf_id, |
8154 | const struct bpf_prog *prog) |
8155 | { |
8156 | enum bpf_prog_type prog_type = resolve_prog_type(prog); |
8157 | enum btf_kfunc_hook hook; |
8158 | u32 *kfunc_flags; |
8159 | |
8160 | kfunc_flags = __btf_kfunc_id_set_contains(btf, hook: BTF_KFUNC_HOOK_COMMON, kfunc_btf_id, prog); |
8161 | if (kfunc_flags) |
8162 | return kfunc_flags; |
8163 | |
8164 | hook = bpf_prog_type_to_kfunc_hook(prog_type); |
8165 | return __btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id, prog); |
8166 | } |
8167 | |
8168 | u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id, |
8169 | const struct bpf_prog *prog) |
8170 | { |
8171 | return __btf_kfunc_id_set_contains(btf, hook: BTF_KFUNC_HOOK_FMODRET, kfunc_btf_id, prog); |
8172 | } |
8173 | |
8174 | static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook, |
8175 | const struct btf_kfunc_id_set *kset) |
8176 | { |
8177 | struct btf *btf; |
8178 | int ret, i; |
8179 | |
8180 | btf = btf_get_module_btf(module: kset->owner); |
8181 | if (!btf) |
8182 | return check_btf_kconfigs(module: kset->owner, feature: "kfunc" ); |
8183 | if (IS_ERR(ptr: btf)) |
8184 | return PTR_ERR(ptr: btf); |
8185 | |
8186 | for (i = 0; i < kset->set->cnt; i++) { |
8187 | ret = btf_check_kfunc_protos(btf, func_id: kset->set->pairs[i].id, |
8188 | func_flags: kset->set->pairs[i].flags); |
8189 | if (ret) |
8190 | goto err_out; |
8191 | } |
8192 | |
8193 | ret = btf_populate_kfunc_set(btf, hook, kset); |
8194 | |
8195 | err_out: |
8196 | btf_put(btf); |
8197 | return ret; |
8198 | } |
8199 | |
8200 | /* This function must be invoked only from initcalls/module init functions */ |
8201 | int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, |
8202 | const struct btf_kfunc_id_set *kset) |
8203 | { |
8204 | enum btf_kfunc_hook hook; |
8205 | |
8206 | /* All kfuncs need to be tagged as such in BTF. |
8207 | * WARN() for initcall registrations that do not check errors. |
8208 | */ |
8209 | if (!(kset->set->flags & BTF_SET8_KFUNCS)) { |
8210 | WARN_ON(!kset->owner); |
8211 | return -EINVAL; |
8212 | } |
8213 | |
8214 | hook = bpf_prog_type_to_kfunc_hook(prog_type); |
8215 | return __register_btf_kfunc_id_set(hook, kset); |
8216 | } |
8217 | EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set); |
8218 | |
8219 | /* This function must be invoked only from initcalls/module init functions */ |
8220 | int register_btf_fmodret_id_set(const struct btf_kfunc_id_set *kset) |
8221 | { |
8222 | return __register_btf_kfunc_id_set(hook: BTF_KFUNC_HOOK_FMODRET, kset); |
8223 | } |
8224 | EXPORT_SYMBOL_GPL(register_btf_fmodret_id_set); |
8225 | |
8226 | s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id) |
8227 | { |
8228 | struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab; |
8229 | struct btf_id_dtor_kfunc *dtor; |
8230 | |
8231 | if (!tab) |
8232 | return -ENOENT; |
8233 | /* Even though the size of tab->dtors[0] is > sizeof(u32), we only need |
8234 | * to compare the first u32 with btf_id, so we can reuse btf_id_cmp_func. |
8235 | */ |
8236 | BUILD_BUG_ON(offsetof(struct btf_id_dtor_kfunc, btf_id) != 0); |
8237 | dtor = bsearch(key: &btf_id, base: tab->dtors, num: tab->cnt, size: sizeof(tab->dtors[0]), cmp: btf_id_cmp_func); |
8238 | if (!dtor) |
8239 | return -ENOENT; |
8240 | return dtor->kfunc_btf_id; |
8241 | } |
8242 | |
8243 | static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc *dtors, u32 cnt) |
8244 | { |
8245 | const struct btf_type *dtor_func, *dtor_func_proto, *t; |
8246 | const struct btf_param *args; |
8247 | s32 dtor_btf_id; |
8248 | u32 nr_args, i; |
8249 | |
8250 | for (i = 0; i < cnt; i++) { |
8251 | dtor_btf_id = dtors[i].kfunc_btf_id; |
8252 | |
8253 | dtor_func = btf_type_by_id(btf, dtor_btf_id); |
8254 | if (!dtor_func || !btf_type_is_func(t: dtor_func)) |
8255 | return -EINVAL; |
8256 | |
8257 | dtor_func_proto = btf_type_by_id(btf, dtor_func->type); |
8258 | if (!dtor_func_proto || !btf_type_is_func_proto(t: dtor_func_proto)) |
8259 | return -EINVAL; |
8260 | |
8261 | /* Make sure the prototype of the destructor kfunc is 'void func(type *)' */ |
8262 | t = btf_type_by_id(btf, dtor_func_proto->type); |
8263 | if (!t || !btf_type_is_void(t)) |
8264 | return -EINVAL; |
8265 | |
8266 | nr_args = btf_type_vlen(t: dtor_func_proto); |
8267 | if (nr_args != 1) |
8268 | return -EINVAL; |
8269 | args = btf_params(t: dtor_func_proto); |
8270 | t = btf_type_by_id(btf, args[0].type); |
8271 | /* Allow any pointer type, as width on targets Linux supports |
8272 | * will be same for all pointer types (i.e. sizeof(void *)) |
8273 | */ |
8274 | if (!t || !btf_type_is_ptr(t)) |
8275 | return -EINVAL; |
8276 | } |
8277 | return 0; |
8278 | } |
8279 | |
8280 | /* This function must be invoked only from initcalls/module init functions */ |
8281 | int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt, |
8282 | struct module *owner) |
8283 | { |
8284 | struct btf_id_dtor_kfunc_tab *tab; |
8285 | struct btf *btf; |
8286 | u32 tab_cnt; |
8287 | int ret; |
8288 | |
8289 | btf = btf_get_module_btf(module: owner); |
8290 | if (!btf) |
8291 | return check_btf_kconfigs(module: owner, feature: "dtor kfuncs" ); |
8292 | if (IS_ERR(ptr: btf)) |
8293 | return PTR_ERR(ptr: btf); |
8294 | |
8295 | if (add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) { |
8296 | pr_err("cannot register more than %d kfunc destructors\n" , BTF_DTOR_KFUNC_MAX_CNT); |
8297 | ret = -E2BIG; |
8298 | goto end; |
8299 | } |
8300 | |
8301 | /* Ensure that the prototype of dtor kfuncs being registered is sane */ |
8302 | ret = btf_check_dtor_kfuncs(btf, dtors, cnt: add_cnt); |
8303 | if (ret < 0) |
8304 | goto end; |
8305 | |
8306 | tab = btf->dtor_kfunc_tab; |
8307 | /* Only one call allowed for modules */ |
8308 | if (WARN_ON_ONCE(tab && btf_is_module(btf))) { |
8309 | ret = -EINVAL; |
8310 | goto end; |
8311 | } |
8312 | |
8313 | tab_cnt = tab ? tab->cnt : 0; |
8314 | if (tab_cnt > U32_MAX - add_cnt) { |
8315 | ret = -EOVERFLOW; |
8316 | goto end; |
8317 | } |
8318 | if (tab_cnt + add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) { |
8319 | pr_err("cannot register more than %d kfunc destructors\n" , BTF_DTOR_KFUNC_MAX_CNT); |
8320 | ret = -E2BIG; |
8321 | goto end; |
8322 | } |
8323 | |
8324 | tab = krealloc(objp: btf->dtor_kfunc_tab, |
8325 | offsetof(struct btf_id_dtor_kfunc_tab, dtors[tab_cnt + add_cnt]), |
8326 | GFP_KERNEL | __GFP_NOWARN); |
8327 | if (!tab) { |
8328 | ret = -ENOMEM; |
8329 | goto end; |
8330 | } |
8331 | |
8332 | if (!btf->dtor_kfunc_tab) |
8333 | tab->cnt = 0; |
8334 | btf->dtor_kfunc_tab = tab; |
8335 | |
8336 | memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0])); |
8337 | tab->cnt += add_cnt; |
8338 | |
8339 | sort(base: tab->dtors, num: tab->cnt, size: sizeof(tab->dtors[0]), cmp_func: btf_id_cmp_func, NULL); |
8340 | |
8341 | end: |
8342 | if (ret) |
8343 | btf_free_dtor_kfunc_tab(btf); |
8344 | btf_put(btf); |
8345 | return ret; |
8346 | } |
8347 | EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs); |
8348 | |
8349 | #define MAX_TYPES_ARE_COMPAT_DEPTH 2 |
8350 | |
8351 | /* Check local and target types for compatibility. This check is used for |
8352 | * type-based CO-RE relocations and follow slightly different rules than |
8353 | * field-based relocations. This function assumes that root types were already |
8354 | * checked for name match. Beyond that initial root-level name check, names |
8355 | * are completely ignored. Compatibility rules are as follows: |
8356 | * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but |
8357 | * kind should match for local and target types (i.e., STRUCT is not |
8358 | * compatible with UNION); |
8359 | * - for ENUMs/ENUM64s, the size is ignored; |
8360 | * - for INT, size and signedness are ignored; |
8361 | * - for ARRAY, dimensionality is ignored, element types are checked for |
8362 | * compatibility recursively; |
8363 | * - CONST/VOLATILE/RESTRICT modifiers are ignored; |
8364 | * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; |
8365 | * - FUNC_PROTOs are compatible if they have compatible signature: same |
8366 | * number of input args and compatible return and argument types. |
8367 | * These rules are not set in stone and probably will be adjusted as we get |
8368 | * more experience with using BPF CO-RE relocations. |
8369 | */ |
8370 | int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, |
8371 | const struct btf *targ_btf, __u32 targ_id) |
8372 | { |
8373 | return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, |
8374 | MAX_TYPES_ARE_COMPAT_DEPTH); |
8375 | } |
8376 | |
8377 | #define MAX_TYPES_MATCH_DEPTH 2 |
8378 | |
8379 | int bpf_core_types_match(const struct btf *local_btf, u32 local_id, |
8380 | const struct btf *targ_btf, u32 targ_id) |
8381 | { |
8382 | return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, behind_ptr: false, |
8383 | MAX_TYPES_MATCH_DEPTH); |
8384 | } |
8385 | |
8386 | static bool bpf_core_is_flavor_sep(const char *s) |
8387 | { |
8388 | /* check X___Y name pattern, where X and Y are not underscores */ |
8389 | return s[0] != '_' && /* X */ |
8390 | s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */ |
8391 | s[4] != '_'; /* Y */ |
8392 | } |
8393 | |
8394 | size_t bpf_core_essential_name_len(const char *name) |
8395 | { |
8396 | size_t n = strlen(name); |
8397 | int i; |
8398 | |
8399 | for (i = n - 5; i >= 0; i--) { |
8400 | if (bpf_core_is_flavor_sep(s: name + i)) |
8401 | return i + 1; |
8402 | } |
8403 | return n; |
8404 | } |
8405 | |
8406 | static void bpf_free_cands(struct bpf_cand_cache *cands) |
8407 | { |
8408 | if (!cands->cnt) |
8409 | /* empty candidate array was allocated on stack */ |
8410 | return; |
8411 | kfree(objp: cands); |
8412 | } |
8413 | |
8414 | static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands) |
8415 | { |
8416 | kfree(objp: cands->name); |
8417 | kfree(objp: cands); |
8418 | } |
8419 | |
8420 | #define VMLINUX_CAND_CACHE_SIZE 31 |
8421 | static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE]; |
8422 | |
8423 | #define MODULE_CAND_CACHE_SIZE 31 |
8424 | static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE]; |
8425 | |
8426 | static void __print_cand_cache(struct bpf_verifier_log *log, |
8427 | struct bpf_cand_cache **cache, |
8428 | int cache_size) |
8429 | { |
8430 | struct bpf_cand_cache *cc; |
8431 | int i, j; |
8432 | |
8433 | for (i = 0; i < cache_size; i++) { |
8434 | cc = cache[i]; |
8435 | if (!cc) |
8436 | continue; |
8437 | bpf_log(log, fmt: "[%d]%s(" , i, cc->name); |
8438 | for (j = 0; j < cc->cnt; j++) { |
8439 | bpf_log(log, fmt: "%d" , cc->cands[j].id); |
8440 | if (j < cc->cnt - 1) |
8441 | bpf_log(log, fmt: " " ); |
8442 | } |
8443 | bpf_log(log, fmt: "), " ); |
8444 | } |
8445 | } |
8446 | |
8447 | static void print_cand_cache(struct bpf_verifier_log *log) |
8448 | { |
8449 | mutex_lock(&cand_cache_mutex); |
8450 | bpf_log(log, fmt: "vmlinux_cand_cache:" ); |
8451 | __print_cand_cache(log, cache: vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); |
8452 | bpf_log(log, fmt: "\nmodule_cand_cache:" ); |
8453 | __print_cand_cache(log, cache: module_cand_cache, MODULE_CAND_CACHE_SIZE); |
8454 | bpf_log(log, fmt: "\n" ); |
8455 | mutex_unlock(lock: &cand_cache_mutex); |
8456 | } |
8457 | |
8458 | static u32 hash_cands(struct bpf_cand_cache *cands) |
8459 | { |
8460 | return jhash(key: cands->name, length: cands->name_len, initval: 0); |
8461 | } |
8462 | |
8463 | static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands, |
8464 | struct bpf_cand_cache **cache, |
8465 | int cache_size) |
8466 | { |
8467 | struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size]; |
8468 | |
8469 | if (cc && cc->name_len == cands->name_len && |
8470 | !strncmp(cc->name, cands->name, cands->name_len)) |
8471 | return cc; |
8472 | return NULL; |
8473 | } |
8474 | |
8475 | static size_t sizeof_cands(int cnt) |
8476 | { |
8477 | return offsetof(struct bpf_cand_cache, cands[cnt]); |
8478 | } |
8479 | |
8480 | static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands, |
8481 | struct bpf_cand_cache **cache, |
8482 | int cache_size) |
8483 | { |
8484 | struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands; |
8485 | |
8486 | if (*cc) { |
8487 | bpf_free_cands_from_cache(cands: *cc); |
8488 | *cc = NULL; |
8489 | } |
8490 | new_cands = kmemdup(p: cands, size: sizeof_cands(cnt: cands->cnt), GFP_KERNEL); |
8491 | if (!new_cands) { |
8492 | bpf_free_cands(cands); |
8493 | return ERR_PTR(error: -ENOMEM); |
8494 | } |
8495 | /* strdup the name, since it will stay in cache. |
8496 | * the cands->name points to strings in prog's BTF and the prog can be unloaded. |
8497 | */ |
8498 | new_cands->name = kmemdup_nul(s: cands->name, len: cands->name_len, GFP_KERNEL); |
8499 | bpf_free_cands(cands); |
8500 | if (!new_cands->name) { |
8501 | kfree(objp: new_cands); |
8502 | return ERR_PTR(error: -ENOMEM); |
8503 | } |
8504 | *cc = new_cands; |
8505 | return new_cands; |
8506 | } |
8507 | |
8508 | #ifdef CONFIG_DEBUG_INFO_BTF_MODULES |
8509 | static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache, |
8510 | int cache_size) |
8511 | { |
8512 | struct bpf_cand_cache *cc; |
8513 | int i, j; |
8514 | |
8515 | for (i = 0; i < cache_size; i++) { |
8516 | cc = cache[i]; |
8517 | if (!cc) |
8518 | continue; |
8519 | if (!btf) { |
8520 | /* when new module is loaded purge all of module_cand_cache, |
8521 | * since new module might have candidates with the name |
8522 | * that matches cached cands. |
8523 | */ |
8524 | bpf_free_cands_from_cache(cc); |
8525 | cache[i] = NULL; |
8526 | continue; |
8527 | } |
8528 | /* when module is unloaded purge cache entries |
8529 | * that match module's btf |
8530 | */ |
8531 | for (j = 0; j < cc->cnt; j++) |
8532 | if (cc->cands[j].btf == btf) { |
8533 | bpf_free_cands_from_cache(cc); |
8534 | cache[i] = NULL; |
8535 | break; |
8536 | } |
8537 | } |
8538 | |
8539 | } |
8540 | |
8541 | static void purge_cand_cache(struct btf *btf) |
8542 | { |
8543 | mutex_lock(&cand_cache_mutex); |
8544 | __purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE); |
8545 | mutex_unlock(&cand_cache_mutex); |
8546 | } |
8547 | #endif |
8548 | |
8549 | static struct bpf_cand_cache * |
8550 | bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf, |
8551 | int targ_start_id) |
8552 | { |
8553 | struct bpf_cand_cache *new_cands; |
8554 | const struct btf_type *t; |
8555 | const char *targ_name; |
8556 | size_t targ_essent_len; |
8557 | int n, i; |
8558 | |
8559 | n = btf_nr_types(btf: targ_btf); |
8560 | for (i = targ_start_id; i < n; i++) { |
8561 | t = btf_type_by_id(targ_btf, i); |
8562 | if (btf_kind(t) != cands->kind) |
8563 | continue; |
8564 | |
8565 | targ_name = btf_name_by_offset(btf: targ_btf, offset: t->name_off); |
8566 | if (!targ_name) |
8567 | continue; |
8568 | |
8569 | /* the resched point is before strncmp to make sure that search |
8570 | * for non-existing name will have a chance to schedule(). |
8571 | */ |
8572 | cond_resched(); |
8573 | |
8574 | if (strncmp(cands->name, targ_name, cands->name_len) != 0) |
8575 | continue; |
8576 | |
8577 | targ_essent_len = bpf_core_essential_name_len(name: targ_name); |
8578 | if (targ_essent_len != cands->name_len) |
8579 | continue; |
8580 | |
8581 | /* most of the time there is only one candidate for a given kind+name pair */ |
8582 | new_cands = kmalloc(size: sizeof_cands(cnt: cands->cnt + 1), GFP_KERNEL); |
8583 | if (!new_cands) { |
8584 | bpf_free_cands(cands); |
8585 | return ERR_PTR(error: -ENOMEM); |
8586 | } |
8587 | |
8588 | memcpy(new_cands, cands, sizeof_cands(cands->cnt)); |
8589 | bpf_free_cands(cands); |
8590 | cands = new_cands; |
8591 | cands->cands[cands->cnt].btf = targ_btf; |
8592 | cands->cands[cands->cnt].id = i; |
8593 | cands->cnt++; |
8594 | } |
8595 | return cands; |
8596 | } |
8597 | |
8598 | static struct bpf_cand_cache * |
8599 | bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id) |
8600 | { |
8601 | struct bpf_cand_cache *cands, *cc, local_cand = {}; |
8602 | const struct btf *local_btf = ctx->btf; |
8603 | const struct btf_type *local_type; |
8604 | const struct btf *main_btf; |
8605 | size_t local_essent_len; |
8606 | struct btf *mod_btf; |
8607 | const char *name; |
8608 | int id; |
8609 | |
8610 | main_btf = bpf_get_btf_vmlinux(); |
8611 | if (IS_ERR(ptr: main_btf)) |
8612 | return ERR_CAST(ptr: main_btf); |
8613 | if (!main_btf) |
8614 | return ERR_PTR(error: -EINVAL); |
8615 | |
8616 | local_type = btf_type_by_id(local_btf, local_type_id); |
8617 | if (!local_type) |
8618 | return ERR_PTR(error: -EINVAL); |
8619 | |
8620 | name = btf_name_by_offset(btf: local_btf, offset: local_type->name_off); |
8621 | if (str_is_empty(s: name)) |
8622 | return ERR_PTR(error: -EINVAL); |
8623 | local_essent_len = bpf_core_essential_name_len(name); |
8624 | |
8625 | cands = &local_cand; |
8626 | cands->name = name; |
8627 | cands->kind = btf_kind(t: local_type); |
8628 | cands->name_len = local_essent_len; |
8629 | |
8630 | cc = check_cand_cache(cands, cache: vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); |
8631 | /* cands is a pointer to stack here */ |
8632 | if (cc) { |
8633 | if (cc->cnt) |
8634 | return cc; |
8635 | goto check_modules; |
8636 | } |
8637 | |
8638 | /* Attempt to find target candidates in vmlinux BTF first */ |
8639 | cands = bpf_core_add_cands(cands, targ_btf: main_btf, targ_start_id: 1); |
8640 | if (IS_ERR(ptr: cands)) |
8641 | return ERR_CAST(ptr: cands); |
8642 | |
8643 | /* cands is a pointer to kmalloced memory here if cands->cnt > 0 */ |
8644 | |
8645 | /* populate cache even when cands->cnt == 0 */ |
8646 | cc = populate_cand_cache(cands, cache: vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE); |
8647 | if (IS_ERR(ptr: cc)) |
8648 | return ERR_CAST(ptr: cc); |
8649 | |
8650 | /* if vmlinux BTF has any candidate, don't go for module BTFs */ |
8651 | if (cc->cnt) |
8652 | return cc; |
8653 | |
8654 | check_modules: |
8655 | /* cands is a pointer to stack here and cands->cnt == 0 */ |
8656 | cc = check_cand_cache(cands, cache: module_cand_cache, MODULE_CAND_CACHE_SIZE); |
8657 | if (cc) |
8658 | /* if cache has it return it even if cc->cnt == 0 */ |
8659 | return cc; |
8660 | |
8661 | /* If candidate is not found in vmlinux's BTF then search in module's BTFs */ |
8662 | spin_lock_bh(lock: &btf_idr_lock); |
8663 | idr_for_each_entry(&btf_idr, mod_btf, id) { |
8664 | if (!btf_is_module(btf: mod_btf)) |
8665 | continue; |
8666 | /* linear search could be slow hence unlock/lock |
8667 | * the IDR to avoiding holding it for too long |
8668 | */ |
8669 | btf_get(btf: mod_btf); |
8670 | spin_unlock_bh(lock: &btf_idr_lock); |
8671 | cands = bpf_core_add_cands(cands, targ_btf: mod_btf, targ_start_id: btf_nr_types(btf: main_btf)); |
8672 | btf_put(btf: mod_btf); |
8673 | if (IS_ERR(ptr: cands)) |
8674 | return ERR_CAST(ptr: cands); |
8675 | spin_lock_bh(lock: &btf_idr_lock); |
8676 | } |
8677 | spin_unlock_bh(lock: &btf_idr_lock); |
8678 | /* cands is a pointer to kmalloced memory here if cands->cnt > 0 |
8679 | * or pointer to stack if cands->cnd == 0. |
8680 | * Copy it into the cache even when cands->cnt == 0 and |
8681 | * return the result. |
8682 | */ |
8683 | return populate_cand_cache(cands, cache: module_cand_cache, MODULE_CAND_CACHE_SIZE); |
8684 | } |
8685 | |
8686 | int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, |
8687 | int relo_idx, void *insn) |
8688 | { |
8689 | bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL; |
8690 | struct bpf_core_cand_list cands = {}; |
8691 | struct bpf_core_relo_res targ_res; |
8692 | struct bpf_core_spec *specs; |
8693 | int err; |
8694 | |
8695 | /* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5" |
8696 | * into arrays of btf_ids of struct fields and array indices. |
8697 | */ |
8698 | specs = kcalloc(n: 3, size: sizeof(*specs), GFP_KERNEL); |
8699 | if (!specs) |
8700 | return -ENOMEM; |
8701 | |
8702 | if (need_cands) { |
8703 | struct bpf_cand_cache *cc; |
8704 | int i; |
8705 | |
8706 | mutex_lock(&cand_cache_mutex); |
8707 | cc = bpf_core_find_cands(ctx, local_type_id: relo->type_id); |
8708 | if (IS_ERR(ptr: cc)) { |
8709 | bpf_log(log: ctx->log, fmt: "target candidate search failed for %d\n" , |
8710 | relo->type_id); |
8711 | err = PTR_ERR(ptr: cc); |
8712 | goto out; |
8713 | } |
8714 | if (cc->cnt) { |
8715 | cands.cands = kcalloc(n: cc->cnt, size: sizeof(*cands.cands), GFP_KERNEL); |
8716 | if (!cands.cands) { |
8717 | err = -ENOMEM; |
8718 | goto out; |
8719 | } |
8720 | } |
8721 | for (i = 0; i < cc->cnt; i++) { |
8722 | bpf_log(log: ctx->log, |
8723 | fmt: "CO-RE relocating %s %s: found target candidate [%d]\n" , |
8724 | btf_kind_str[cc->kind], cc->name, cc->cands[i].id); |
8725 | cands.cands[i].btf = cc->cands[i].btf; |
8726 | cands.cands[i].id = cc->cands[i].id; |
8727 | } |
8728 | cands.len = cc->cnt; |
8729 | /* cand_cache_mutex needs to span the cache lookup and |
8730 | * copy of btf pointer into bpf_core_cand_list, |
8731 | * since module can be unloaded while bpf_core_calc_relo_insn |
8732 | * is working with module's btf. |
8733 | */ |
8734 | } |
8735 | |
8736 | err = bpf_core_calc_relo_insn(prog_name: (void *)ctx->log, relo, relo_idx, local_btf: ctx->btf, cands: &cands, specs_scratch: specs, |
8737 | targ_res: &targ_res); |
8738 | if (err) |
8739 | goto out; |
8740 | |
8741 | err = bpf_core_patch_insn(prog_name: (void *)ctx->log, insn, insn_idx: relo->insn_off / 8, relo, relo_idx, |
8742 | res: &targ_res); |
8743 | |
8744 | out: |
8745 | kfree(objp: specs); |
8746 | if (need_cands) { |
8747 | kfree(objp: cands.cands); |
8748 | mutex_unlock(lock: &cand_cache_mutex); |
8749 | if (ctx->log->level & BPF_LOG_LEVEL2) |
8750 | print_cand_cache(log: ctx->log); |
8751 | } |
8752 | return err; |
8753 | } |
8754 | |
8755 | bool btf_nested_type_is_trusted(struct bpf_verifier_log *log, |
8756 | const struct bpf_reg_state *reg, |
8757 | const char *field_name, u32 btf_id, const char *suffix) |
8758 | { |
8759 | struct btf *btf = reg->btf; |
8760 | const struct btf_type *walk_type, *safe_type; |
8761 | const char *tname; |
8762 | char safe_tname[64]; |
8763 | long ret, safe_id; |
8764 | const struct btf_member *member; |
8765 | u32 i; |
8766 | |
8767 | walk_type = btf_type_by_id(btf, reg->btf_id); |
8768 | if (!walk_type) |
8769 | return false; |
8770 | |
8771 | tname = btf_name_by_offset(btf, offset: walk_type->name_off); |
8772 | |
8773 | ret = snprintf(buf: safe_tname, size: sizeof(safe_tname), fmt: "%s%s" , tname, suffix); |
8774 | if (ret >= sizeof(safe_tname)) |
8775 | return false; |
8776 | |
8777 | safe_id = btf_find_by_name_kind(btf, name: safe_tname, BTF_INFO_KIND(walk_type->info)); |
8778 | if (safe_id < 0) |
8779 | return false; |
8780 | |
8781 | safe_type = btf_type_by_id(btf, safe_id); |
8782 | if (!safe_type) |
8783 | return false; |
8784 | |
8785 | for_each_member(i, safe_type, member) { |
8786 | const char *m_name = __btf_name_by_offset(btf, offset: member->name_off); |
8787 | const struct btf_type *mtype = btf_type_by_id(btf, member->type); |
8788 | u32 id; |
8789 | |
8790 | if (!btf_type_is_ptr(t: mtype)) |
8791 | continue; |
8792 | |
8793 | btf_type_skip_modifiers(btf, id: mtype->type, res_id: &id); |
8794 | /* If we match on both type and name, the field is considered trusted. */ |
8795 | if (btf_id == id && !strcmp(field_name, m_name)) |
8796 | return true; |
8797 | } |
8798 | |
8799 | return false; |
8800 | } |
8801 | |
8802 | bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log, |
8803 | const struct btf *reg_btf, u32 reg_id, |
8804 | const struct btf *arg_btf, u32 arg_id) |
8805 | { |
8806 | const char *reg_name, *arg_name, *search_needle; |
8807 | const struct btf_type *reg_type, *arg_type; |
8808 | int reg_len, arg_len, cmp_len; |
8809 | size_t pattern_len = sizeof(NOCAST_ALIAS_SUFFIX) - sizeof(char); |
8810 | |
8811 | reg_type = btf_type_by_id(reg_btf, reg_id); |
8812 | if (!reg_type) |
8813 | return false; |
8814 | |
8815 | arg_type = btf_type_by_id(arg_btf, arg_id); |
8816 | if (!arg_type) |
8817 | return false; |
8818 | |
8819 | reg_name = btf_name_by_offset(btf: reg_btf, offset: reg_type->name_off); |
8820 | arg_name = btf_name_by_offset(btf: arg_btf, offset: arg_type->name_off); |
8821 | |
8822 | reg_len = strlen(reg_name); |
8823 | arg_len = strlen(arg_name); |
8824 | |
8825 | /* Exactly one of the two type names may be suffixed with ___init, so |
8826 | * if the strings are the same size, they can't possibly be no-cast |
8827 | * aliases of one another. If you have two of the same type names, e.g. |
8828 | * they're both nf_conn___init, it would be improper to return true |
8829 | * because they are _not_ no-cast aliases, they are the same type. |
8830 | */ |
8831 | if (reg_len == arg_len) |
8832 | return false; |
8833 | |
8834 | /* Either of the two names must be the other name, suffixed with ___init. */ |
8835 | if ((reg_len != arg_len + pattern_len) && |
8836 | (arg_len != reg_len + pattern_len)) |
8837 | return false; |
8838 | |
8839 | if (reg_len < arg_len) { |
8840 | search_needle = strstr(arg_name, NOCAST_ALIAS_SUFFIX); |
8841 | cmp_len = reg_len; |
8842 | } else { |
8843 | search_needle = strstr(reg_name, NOCAST_ALIAS_SUFFIX); |
8844 | cmp_len = arg_len; |
8845 | } |
8846 | |
8847 | if (!search_needle) |
8848 | return false; |
8849 | |
8850 | /* ___init suffix must come at the end of the name */ |
8851 | if (*(search_needle + pattern_len) != '\0') |
8852 | return false; |
8853 | |
8854 | return !strncmp(reg_name, arg_name, cmp_len); |
8855 | } |
8856 | |
8857 | #ifdef CONFIG_BPF_JIT |
8858 | static int |
8859 | btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops, |
8860 | struct bpf_verifier_log *log) |
8861 | { |
8862 | struct btf_struct_ops_tab *tab, *new_tab; |
8863 | int i, err; |
8864 | |
8865 | tab = btf->struct_ops_tab; |
8866 | if (!tab) { |
8867 | tab = kzalloc(offsetof(struct btf_struct_ops_tab, ops[4]), |
8868 | GFP_KERNEL); |
8869 | if (!tab) |
8870 | return -ENOMEM; |
8871 | tab->capacity = 4; |
8872 | btf->struct_ops_tab = tab; |
8873 | } |
8874 | |
8875 | for (i = 0; i < tab->cnt; i++) |
8876 | if (tab->ops[i].st_ops == st_ops) |
8877 | return -EEXIST; |
8878 | |
8879 | if (tab->cnt == tab->capacity) { |
8880 | new_tab = krealloc(objp: tab, |
8881 | offsetof(struct btf_struct_ops_tab, |
8882 | ops[tab->capacity * 2]), |
8883 | GFP_KERNEL); |
8884 | if (!new_tab) |
8885 | return -ENOMEM; |
8886 | tab = new_tab; |
8887 | tab->capacity *= 2; |
8888 | btf->struct_ops_tab = tab; |
8889 | } |
8890 | |
8891 | tab->ops[btf->struct_ops_tab->cnt].st_ops = st_ops; |
8892 | |
8893 | err = bpf_struct_ops_desc_init(st_ops_desc: &tab->ops[btf->struct_ops_tab->cnt], btf, log); |
8894 | if (err) |
8895 | return err; |
8896 | |
8897 | btf->struct_ops_tab->cnt++; |
8898 | |
8899 | return 0; |
8900 | } |
8901 | |
8902 | const struct bpf_struct_ops_desc * |
8903 | bpf_struct_ops_find_value(struct btf *btf, u32 value_id) |
8904 | { |
8905 | const struct bpf_struct_ops_desc *st_ops_list; |
8906 | unsigned int i; |
8907 | u32 cnt; |
8908 | |
8909 | if (!value_id) |
8910 | return NULL; |
8911 | if (!btf->struct_ops_tab) |
8912 | return NULL; |
8913 | |
8914 | cnt = btf->struct_ops_tab->cnt; |
8915 | st_ops_list = btf->struct_ops_tab->ops; |
8916 | for (i = 0; i < cnt; i++) { |
8917 | if (st_ops_list[i].value_id == value_id) |
8918 | return &st_ops_list[i]; |
8919 | } |
8920 | |
8921 | return NULL; |
8922 | } |
8923 | |
8924 | const struct bpf_struct_ops_desc * |
8925 | bpf_struct_ops_find(struct btf *btf, u32 type_id) |
8926 | { |
8927 | const struct bpf_struct_ops_desc *st_ops_list; |
8928 | unsigned int i; |
8929 | u32 cnt; |
8930 | |
8931 | if (!type_id) |
8932 | return NULL; |
8933 | if (!btf->struct_ops_tab) |
8934 | return NULL; |
8935 | |
8936 | cnt = btf->struct_ops_tab->cnt; |
8937 | st_ops_list = btf->struct_ops_tab->ops; |
8938 | for (i = 0; i < cnt; i++) { |
8939 | if (st_ops_list[i].type_id == type_id) |
8940 | return &st_ops_list[i]; |
8941 | } |
8942 | |
8943 | return NULL; |
8944 | } |
8945 | |
8946 | int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops) |
8947 | { |
8948 | struct bpf_verifier_log *log; |
8949 | struct btf *btf; |
8950 | int err = 0; |
8951 | |
8952 | btf = btf_get_module_btf(module: st_ops->owner); |
8953 | if (!btf) |
8954 | return check_btf_kconfigs(module: st_ops->owner, feature: "struct_ops" ); |
8955 | if (IS_ERR(ptr: btf)) |
8956 | return PTR_ERR(ptr: btf); |
8957 | |
8958 | log = kzalloc(size: sizeof(*log), GFP_KERNEL | __GFP_NOWARN); |
8959 | if (!log) { |
8960 | err = -ENOMEM; |
8961 | goto errout; |
8962 | } |
8963 | |
8964 | log->level = BPF_LOG_KERNEL; |
8965 | |
8966 | err = btf_add_struct_ops(btf, st_ops, log); |
8967 | |
8968 | errout: |
8969 | kfree(objp: log); |
8970 | btf_put(btf); |
8971 | |
8972 | return err; |
8973 | } |
8974 | EXPORT_SYMBOL_GPL(__register_bpf_struct_ops); |
8975 | #endif |
8976 | |
8977 | bool btf_param_match_suffix(const struct btf *btf, |
8978 | const struct btf_param *arg, |
8979 | const char *suffix) |
8980 | { |
8981 | int suffix_len = strlen(suffix), len; |
8982 | const char *param_name; |
8983 | |
8984 | /* In the future, this can be ported to use BTF tagging */ |
8985 | param_name = btf_name_by_offset(btf, offset: arg->name_off); |
8986 | if (str_is_empty(s: param_name)) |
8987 | return false; |
8988 | len = strlen(param_name); |
8989 | if (len <= suffix_len) |
8990 | return false; |
8991 | param_name += len - suffix_len; |
8992 | return !strncmp(param_name, suffix, suffix_len); |
8993 | } |
8994 | |