1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
3 | */ |
4 | #include <linux/bpf.h> |
5 | #include <linux/btf.h> |
6 | #include <linux/bpf-cgroup.h> |
7 | #include <linux/cgroup.h> |
8 | #include <linux/rcupdate.h> |
9 | #include <linux/random.h> |
10 | #include <linux/smp.h> |
11 | #include <linux/topology.h> |
12 | #include <linux/ktime.h> |
13 | #include <linux/sched.h> |
14 | #include <linux/uidgid.h> |
15 | #include <linux/filter.h> |
16 | #include <linux/ctype.h> |
17 | #include <linux/jiffies.h> |
18 | #include <linux/pid_namespace.h> |
19 | #include <linux/poison.h> |
20 | #include <linux/proc_ns.h> |
21 | #include <linux/sched/task.h> |
22 | #include <linux/security.h> |
23 | #include <linux/btf_ids.h> |
24 | #include <linux/bpf_mem_alloc.h> |
25 | #include <linux/kasan.h> |
26 | |
27 | #include "../../lib/kstrtox.h" |
28 | |
29 | /* If kernel subsystem is allowing eBPF programs to call this function, |
30 | * inside its own verifier_ops->get_func_proto() callback it should return |
31 | * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments |
32 | * |
33 | * Different map implementations will rely on rcu in map methods |
34 | * lookup/update/delete, therefore eBPF programs must run under rcu lock |
35 | * if program is allowed to access maps, so check rcu_read_lock_held() or |
36 | * rcu_read_lock_trace_held() in all three functions. |
37 | */ |
38 | BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) |
39 | { |
40 | WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && |
41 | !rcu_read_lock_bh_held()); |
42 | return (unsigned long) map->ops->map_lookup_elem(map, key); |
43 | } |
44 | |
45 | const struct bpf_func_proto bpf_map_lookup_elem_proto = { |
46 | .func = bpf_map_lookup_elem, |
47 | .gpl_only = false, |
48 | .pkt_access = true, |
49 | .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, |
50 | .arg1_type = ARG_CONST_MAP_PTR, |
51 | .arg2_type = ARG_PTR_TO_MAP_KEY, |
52 | }; |
53 | |
54 | BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, |
55 | void *, value, u64, flags) |
56 | { |
57 | WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && |
58 | !rcu_read_lock_bh_held()); |
59 | return map->ops->map_update_elem(map, key, value, flags); |
60 | } |
61 | |
62 | const struct bpf_func_proto bpf_map_update_elem_proto = { |
63 | .func = bpf_map_update_elem, |
64 | .gpl_only = false, |
65 | .pkt_access = true, |
66 | .ret_type = RET_INTEGER, |
67 | .arg1_type = ARG_CONST_MAP_PTR, |
68 | .arg2_type = ARG_PTR_TO_MAP_KEY, |
69 | .arg3_type = ARG_PTR_TO_MAP_VALUE, |
70 | .arg4_type = ARG_ANYTHING, |
71 | }; |
72 | |
73 | BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) |
74 | { |
75 | WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && |
76 | !rcu_read_lock_bh_held()); |
77 | return map->ops->map_delete_elem(map, key); |
78 | } |
79 | |
80 | const struct bpf_func_proto bpf_map_delete_elem_proto = { |
81 | .func = bpf_map_delete_elem, |
82 | .gpl_only = false, |
83 | .pkt_access = true, |
84 | .ret_type = RET_INTEGER, |
85 | .arg1_type = ARG_CONST_MAP_PTR, |
86 | .arg2_type = ARG_PTR_TO_MAP_KEY, |
87 | }; |
88 | |
89 | BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) |
90 | { |
91 | return map->ops->map_push_elem(map, value, flags); |
92 | } |
93 | |
94 | const struct bpf_func_proto bpf_map_push_elem_proto = { |
95 | .func = bpf_map_push_elem, |
96 | .gpl_only = false, |
97 | .pkt_access = true, |
98 | .ret_type = RET_INTEGER, |
99 | .arg1_type = ARG_CONST_MAP_PTR, |
100 | .arg2_type = ARG_PTR_TO_MAP_VALUE, |
101 | .arg3_type = ARG_ANYTHING, |
102 | }; |
103 | |
104 | BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) |
105 | { |
106 | return map->ops->map_pop_elem(map, value); |
107 | } |
108 | |
109 | const struct bpf_func_proto bpf_map_pop_elem_proto = { |
110 | .func = bpf_map_pop_elem, |
111 | .gpl_only = false, |
112 | .ret_type = RET_INTEGER, |
113 | .arg1_type = ARG_CONST_MAP_PTR, |
114 | .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT, |
115 | }; |
116 | |
117 | BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) |
118 | { |
119 | return map->ops->map_peek_elem(map, value); |
120 | } |
121 | |
122 | const struct bpf_func_proto bpf_map_peek_elem_proto = { |
123 | .func = bpf_map_peek_elem, |
124 | .gpl_only = false, |
125 | .ret_type = RET_INTEGER, |
126 | .arg1_type = ARG_CONST_MAP_PTR, |
127 | .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT, |
128 | }; |
129 | |
130 | BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu) |
131 | { |
132 | WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); |
133 | return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu); |
134 | } |
135 | |
136 | const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = { |
137 | .func = bpf_map_lookup_percpu_elem, |
138 | .gpl_only = false, |
139 | .pkt_access = true, |
140 | .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, |
141 | .arg1_type = ARG_CONST_MAP_PTR, |
142 | .arg2_type = ARG_PTR_TO_MAP_KEY, |
143 | .arg3_type = ARG_ANYTHING, |
144 | }; |
145 | |
146 | const struct bpf_func_proto bpf_get_prandom_u32_proto = { |
147 | .func = bpf_user_rnd_u32, |
148 | .gpl_only = false, |
149 | .ret_type = RET_INTEGER, |
150 | }; |
151 | |
152 | BPF_CALL_0(bpf_get_smp_processor_id) |
153 | { |
154 | return smp_processor_id(); |
155 | } |
156 | |
157 | const struct bpf_func_proto bpf_get_smp_processor_id_proto = { |
158 | .func = bpf_get_smp_processor_id, |
159 | .gpl_only = false, |
160 | .ret_type = RET_INTEGER, |
161 | }; |
162 | |
163 | BPF_CALL_0(bpf_get_numa_node_id) |
164 | { |
165 | return numa_node_id(); |
166 | } |
167 | |
168 | const struct bpf_func_proto bpf_get_numa_node_id_proto = { |
169 | .func = bpf_get_numa_node_id, |
170 | .gpl_only = false, |
171 | .ret_type = RET_INTEGER, |
172 | }; |
173 | |
174 | BPF_CALL_0(bpf_ktime_get_ns) |
175 | { |
176 | /* NMI safe access to clock monotonic */ |
177 | return ktime_get_mono_fast_ns(); |
178 | } |
179 | |
180 | const struct bpf_func_proto bpf_ktime_get_ns_proto = { |
181 | .func = bpf_ktime_get_ns, |
182 | .gpl_only = false, |
183 | .ret_type = RET_INTEGER, |
184 | }; |
185 | |
186 | BPF_CALL_0(bpf_ktime_get_boot_ns) |
187 | { |
188 | /* NMI safe access to clock boottime */ |
189 | return ktime_get_boot_fast_ns(); |
190 | } |
191 | |
192 | const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = { |
193 | .func = bpf_ktime_get_boot_ns, |
194 | .gpl_only = false, |
195 | .ret_type = RET_INTEGER, |
196 | }; |
197 | |
198 | BPF_CALL_0(bpf_ktime_get_coarse_ns) |
199 | { |
200 | return ktime_get_coarse_ns(); |
201 | } |
202 | |
203 | const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = { |
204 | .func = bpf_ktime_get_coarse_ns, |
205 | .gpl_only = false, |
206 | .ret_type = RET_INTEGER, |
207 | }; |
208 | |
209 | BPF_CALL_0(bpf_ktime_get_tai_ns) |
210 | { |
211 | /* NMI safe access to clock tai */ |
212 | return ktime_get_tai_fast_ns(); |
213 | } |
214 | |
215 | const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = { |
216 | .func = bpf_ktime_get_tai_ns, |
217 | .gpl_only = false, |
218 | .ret_type = RET_INTEGER, |
219 | }; |
220 | |
221 | BPF_CALL_0(bpf_get_current_pid_tgid) |
222 | { |
223 | struct task_struct *task = current; |
224 | |
225 | if (unlikely(!task)) |
226 | return -EINVAL; |
227 | |
228 | return (u64) task->tgid << 32 | task->pid; |
229 | } |
230 | |
231 | const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { |
232 | .func = bpf_get_current_pid_tgid, |
233 | .gpl_only = false, |
234 | .ret_type = RET_INTEGER, |
235 | }; |
236 | |
237 | BPF_CALL_0(bpf_get_current_uid_gid) |
238 | { |
239 | struct task_struct *task = current; |
240 | kuid_t uid; |
241 | kgid_t gid; |
242 | |
243 | if (unlikely(!task)) |
244 | return -EINVAL; |
245 | |
246 | current_uid_gid(&uid, &gid); |
247 | return (u64) from_kgid(to: &init_user_ns, gid) << 32 | |
248 | from_kuid(to: &init_user_ns, uid); |
249 | } |
250 | |
251 | const struct bpf_func_proto bpf_get_current_uid_gid_proto = { |
252 | .func = bpf_get_current_uid_gid, |
253 | .gpl_only = false, |
254 | .ret_type = RET_INTEGER, |
255 | }; |
256 | |
257 | BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) |
258 | { |
259 | struct task_struct *task = current; |
260 | |
261 | if (unlikely(!task)) |
262 | goto err_clear; |
263 | |
264 | /* Verifier guarantees that size > 0 */ |
265 | strscpy_pad(buf, task->comm, size); |
266 | return 0; |
267 | err_clear: |
268 | memset(buf, 0, size); |
269 | return -EINVAL; |
270 | } |
271 | |
272 | const struct bpf_func_proto bpf_get_current_comm_proto = { |
273 | .func = bpf_get_current_comm, |
274 | .gpl_only = false, |
275 | .ret_type = RET_INTEGER, |
276 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
277 | .arg2_type = ARG_CONST_SIZE, |
278 | }; |
279 | |
280 | #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK) |
281 | |
282 | static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) |
283 | { |
284 | arch_spinlock_t *l = (void *)lock; |
285 | union { |
286 | __u32 val; |
287 | arch_spinlock_t lock; |
288 | } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; |
289 | |
290 | compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0" ); |
291 | BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); |
292 | BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); |
293 | preempt_disable(); |
294 | arch_spin_lock(l); |
295 | } |
296 | |
297 | static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) |
298 | { |
299 | arch_spinlock_t *l = (void *)lock; |
300 | |
301 | arch_spin_unlock(l); |
302 | preempt_enable(); |
303 | } |
304 | |
305 | #else |
306 | |
307 | static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) |
308 | { |
309 | atomic_t *l = (void *)lock; |
310 | |
311 | BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); |
312 | do { |
313 | atomic_cond_read_relaxed(l, !VAL); |
314 | } while (atomic_xchg(l, 1)); |
315 | } |
316 | |
317 | static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) |
318 | { |
319 | atomic_t *l = (void *)lock; |
320 | |
321 | atomic_set_release(l, 0); |
322 | } |
323 | |
324 | #endif |
325 | |
326 | static DEFINE_PER_CPU(unsigned long, irqsave_flags); |
327 | |
328 | static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock) |
329 | { |
330 | unsigned long flags; |
331 | |
332 | local_irq_save(flags); |
333 | __bpf_spin_lock(lock); |
334 | __this_cpu_write(irqsave_flags, flags); |
335 | } |
336 | |
337 | NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) |
338 | { |
339 | __bpf_spin_lock_irqsave(lock); |
340 | return 0; |
341 | } |
342 | |
343 | const struct bpf_func_proto bpf_spin_lock_proto = { |
344 | .func = bpf_spin_lock, |
345 | .gpl_only = false, |
346 | .ret_type = RET_VOID, |
347 | .arg1_type = ARG_PTR_TO_SPIN_LOCK, |
348 | .arg1_btf_id = BPF_PTR_POISON, |
349 | }; |
350 | |
351 | static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock) |
352 | { |
353 | unsigned long flags; |
354 | |
355 | flags = __this_cpu_read(irqsave_flags); |
356 | __bpf_spin_unlock(lock); |
357 | local_irq_restore(flags); |
358 | } |
359 | |
360 | NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) |
361 | { |
362 | __bpf_spin_unlock_irqrestore(lock); |
363 | return 0; |
364 | } |
365 | |
366 | const struct bpf_func_proto bpf_spin_unlock_proto = { |
367 | .func = bpf_spin_unlock, |
368 | .gpl_only = false, |
369 | .ret_type = RET_VOID, |
370 | .arg1_type = ARG_PTR_TO_SPIN_LOCK, |
371 | .arg1_btf_id = BPF_PTR_POISON, |
372 | }; |
373 | |
374 | void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, |
375 | bool lock_src) |
376 | { |
377 | struct bpf_spin_lock *lock; |
378 | |
379 | if (lock_src) |
380 | lock = src + map->record->spin_lock_off; |
381 | else |
382 | lock = dst + map->record->spin_lock_off; |
383 | preempt_disable(); |
384 | __bpf_spin_lock_irqsave(lock); |
385 | copy_map_value(map, dst, src); |
386 | __bpf_spin_unlock_irqrestore(lock); |
387 | preempt_enable(); |
388 | } |
389 | |
390 | BPF_CALL_0(bpf_jiffies64) |
391 | { |
392 | return get_jiffies_64(); |
393 | } |
394 | |
395 | const struct bpf_func_proto bpf_jiffies64_proto = { |
396 | .func = bpf_jiffies64, |
397 | .gpl_only = false, |
398 | .ret_type = RET_INTEGER, |
399 | }; |
400 | |
401 | #ifdef CONFIG_CGROUPS |
402 | BPF_CALL_0(bpf_get_current_cgroup_id) |
403 | { |
404 | struct cgroup *cgrp; |
405 | u64 cgrp_id; |
406 | |
407 | rcu_read_lock(); |
408 | cgrp = task_dfl_cgroup(current); |
409 | cgrp_id = cgroup_id(cgrp); |
410 | rcu_read_unlock(); |
411 | |
412 | return cgrp_id; |
413 | } |
414 | |
415 | const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { |
416 | .func = bpf_get_current_cgroup_id, |
417 | .gpl_only = false, |
418 | .ret_type = RET_INTEGER, |
419 | }; |
420 | |
421 | BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level) |
422 | { |
423 | struct cgroup *cgrp; |
424 | struct cgroup *ancestor; |
425 | u64 cgrp_id; |
426 | |
427 | rcu_read_lock(); |
428 | cgrp = task_dfl_cgroup(current); |
429 | ancestor = cgroup_ancestor(cgrp, ancestor_level); |
430 | cgrp_id = ancestor ? cgroup_id(cgrp: ancestor) : 0; |
431 | rcu_read_unlock(); |
432 | |
433 | return cgrp_id; |
434 | } |
435 | |
436 | const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = { |
437 | .func = bpf_get_current_ancestor_cgroup_id, |
438 | .gpl_only = false, |
439 | .ret_type = RET_INTEGER, |
440 | .arg1_type = ARG_ANYTHING, |
441 | }; |
442 | #endif /* CONFIG_CGROUPS */ |
443 | |
444 | #define BPF_STRTOX_BASE_MASK 0x1F |
445 | |
446 | static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags, |
447 | unsigned long long *res, bool *is_negative) |
448 | { |
449 | unsigned int base = flags & BPF_STRTOX_BASE_MASK; |
450 | const char *cur_buf = buf; |
451 | size_t cur_len = buf_len; |
452 | unsigned int consumed; |
453 | size_t val_len; |
454 | char str[64]; |
455 | |
456 | if (!buf || !buf_len || !res || !is_negative) |
457 | return -EINVAL; |
458 | |
459 | if (base != 0 && base != 8 && base != 10 && base != 16) |
460 | return -EINVAL; |
461 | |
462 | if (flags & ~BPF_STRTOX_BASE_MASK) |
463 | return -EINVAL; |
464 | |
465 | while (cur_buf < buf + buf_len && isspace(*cur_buf)) |
466 | ++cur_buf; |
467 | |
468 | *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); |
469 | if (*is_negative) |
470 | ++cur_buf; |
471 | |
472 | consumed = cur_buf - buf; |
473 | cur_len -= consumed; |
474 | if (!cur_len) |
475 | return -EINVAL; |
476 | |
477 | cur_len = min(cur_len, sizeof(str) - 1); |
478 | memcpy(str, cur_buf, cur_len); |
479 | str[cur_len] = '\0'; |
480 | cur_buf = str; |
481 | |
482 | cur_buf = _parse_integer_fixup_radix(s: cur_buf, base: &base); |
483 | val_len = _parse_integer(s: cur_buf, base, res); |
484 | |
485 | if (val_len & KSTRTOX_OVERFLOW) |
486 | return -ERANGE; |
487 | |
488 | if (val_len == 0) |
489 | return -EINVAL; |
490 | |
491 | cur_buf += val_len; |
492 | consumed += cur_buf - str; |
493 | |
494 | return consumed; |
495 | } |
496 | |
497 | static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, |
498 | long long *res) |
499 | { |
500 | unsigned long long _res; |
501 | bool is_negative; |
502 | int err; |
503 | |
504 | err = __bpf_strtoull(buf, buf_len, flags, res: &_res, is_negative: &is_negative); |
505 | if (err < 0) |
506 | return err; |
507 | if (is_negative) { |
508 | if ((long long)-_res > 0) |
509 | return -ERANGE; |
510 | *res = -_res; |
511 | } else { |
512 | if ((long long)_res < 0) |
513 | return -ERANGE; |
514 | *res = _res; |
515 | } |
516 | return err; |
517 | } |
518 | |
519 | BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, |
520 | long *, res) |
521 | { |
522 | long long _res; |
523 | int err; |
524 | |
525 | err = __bpf_strtoll(buf, buf_len, flags, res: &_res); |
526 | if (err < 0) |
527 | return err; |
528 | if (_res != (long)_res) |
529 | return -ERANGE; |
530 | *res = _res; |
531 | return err; |
532 | } |
533 | |
534 | const struct bpf_func_proto bpf_strtol_proto = { |
535 | .func = bpf_strtol, |
536 | .gpl_only = false, |
537 | .ret_type = RET_INTEGER, |
538 | .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
539 | .arg2_type = ARG_CONST_SIZE, |
540 | .arg3_type = ARG_ANYTHING, |
541 | .arg4_type = ARG_PTR_TO_LONG, |
542 | }; |
543 | |
544 | BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, |
545 | unsigned long *, res) |
546 | { |
547 | unsigned long long _res; |
548 | bool is_negative; |
549 | int err; |
550 | |
551 | err = __bpf_strtoull(buf, buf_len, flags, res: &_res, is_negative: &is_negative); |
552 | if (err < 0) |
553 | return err; |
554 | if (is_negative) |
555 | return -EINVAL; |
556 | if (_res != (unsigned long)_res) |
557 | return -ERANGE; |
558 | *res = _res; |
559 | return err; |
560 | } |
561 | |
562 | const struct bpf_func_proto bpf_strtoul_proto = { |
563 | .func = bpf_strtoul, |
564 | .gpl_only = false, |
565 | .ret_type = RET_INTEGER, |
566 | .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
567 | .arg2_type = ARG_CONST_SIZE, |
568 | .arg3_type = ARG_ANYTHING, |
569 | .arg4_type = ARG_PTR_TO_LONG, |
570 | }; |
571 | |
572 | BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2) |
573 | { |
574 | return strncmp(s1, s2, s1_sz); |
575 | } |
576 | |
577 | static const struct bpf_func_proto bpf_strncmp_proto = { |
578 | .func = bpf_strncmp, |
579 | .gpl_only = false, |
580 | .ret_type = RET_INTEGER, |
581 | .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
582 | .arg2_type = ARG_CONST_SIZE, |
583 | .arg3_type = ARG_PTR_TO_CONST_STR, |
584 | }; |
585 | |
586 | BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino, |
587 | struct bpf_pidns_info *, nsdata, u32, size) |
588 | { |
589 | struct task_struct *task = current; |
590 | struct pid_namespace *pidns; |
591 | int err = -EINVAL; |
592 | |
593 | if (unlikely(size != sizeof(struct bpf_pidns_info))) |
594 | goto clear; |
595 | |
596 | if (unlikely((u64)(dev_t)dev != dev)) |
597 | goto clear; |
598 | |
599 | if (unlikely(!task)) |
600 | goto clear; |
601 | |
602 | pidns = task_active_pid_ns(tsk: task); |
603 | if (unlikely(!pidns)) { |
604 | err = -ENOENT; |
605 | goto clear; |
606 | } |
607 | |
608 | if (!ns_match(ns: &pidns->ns, dev: (dev_t)dev, ino)) |
609 | goto clear; |
610 | |
611 | nsdata->pid = task_pid_nr_ns(tsk: task, ns: pidns); |
612 | nsdata->tgid = task_tgid_nr_ns(tsk: task, ns: pidns); |
613 | return 0; |
614 | clear: |
615 | memset((void *)nsdata, 0, (size_t) size); |
616 | return err; |
617 | } |
618 | |
619 | const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = { |
620 | .func = bpf_get_ns_current_pid_tgid, |
621 | .gpl_only = false, |
622 | .ret_type = RET_INTEGER, |
623 | .arg1_type = ARG_ANYTHING, |
624 | .arg2_type = ARG_ANYTHING, |
625 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, |
626 | .arg4_type = ARG_CONST_SIZE, |
627 | }; |
628 | |
629 | static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { |
630 | .func = bpf_get_raw_cpu_id, |
631 | .gpl_only = false, |
632 | .ret_type = RET_INTEGER, |
633 | }; |
634 | |
635 | BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map, |
636 | u64, flags, void *, data, u64, size) |
637 | { |
638 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) |
639 | return -EINVAL; |
640 | |
641 | return bpf_event_output(map, flags, meta: data, meta_size: size, NULL, ctx_size: 0, NULL); |
642 | } |
643 | |
644 | const struct bpf_func_proto bpf_event_output_data_proto = { |
645 | .func = bpf_event_output_data, |
646 | .gpl_only = true, |
647 | .ret_type = RET_INTEGER, |
648 | .arg1_type = ARG_PTR_TO_CTX, |
649 | .arg2_type = ARG_CONST_MAP_PTR, |
650 | .arg3_type = ARG_ANYTHING, |
651 | .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
652 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
653 | }; |
654 | |
655 | BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size, |
656 | const void __user *, user_ptr) |
657 | { |
658 | int ret = copy_from_user(to: dst, from: user_ptr, n: size); |
659 | |
660 | if (unlikely(ret)) { |
661 | memset(dst, 0, size); |
662 | ret = -EFAULT; |
663 | } |
664 | |
665 | return ret; |
666 | } |
667 | |
668 | const struct bpf_func_proto bpf_copy_from_user_proto = { |
669 | .func = bpf_copy_from_user, |
670 | .gpl_only = false, |
671 | .might_sleep = true, |
672 | .ret_type = RET_INTEGER, |
673 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
674 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
675 | .arg3_type = ARG_ANYTHING, |
676 | }; |
677 | |
678 | BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size, |
679 | const void __user *, user_ptr, struct task_struct *, tsk, u64, flags) |
680 | { |
681 | int ret; |
682 | |
683 | /* flags is not used yet */ |
684 | if (unlikely(flags)) |
685 | return -EINVAL; |
686 | |
687 | if (unlikely(!size)) |
688 | return 0; |
689 | |
690 | ret = access_process_vm(tsk, addr: (unsigned long)user_ptr, buf: dst, len: size, gup_flags: 0); |
691 | if (ret == size) |
692 | return 0; |
693 | |
694 | memset(dst, 0, size); |
695 | /* Return -EFAULT for partial read */ |
696 | return ret < 0 ? ret : -EFAULT; |
697 | } |
698 | |
699 | const struct bpf_func_proto bpf_copy_from_user_task_proto = { |
700 | .func = bpf_copy_from_user_task, |
701 | .gpl_only = true, |
702 | .might_sleep = true, |
703 | .ret_type = RET_INTEGER, |
704 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
705 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
706 | .arg3_type = ARG_ANYTHING, |
707 | .arg4_type = ARG_PTR_TO_BTF_ID, |
708 | .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], |
709 | .arg5_type = ARG_ANYTHING |
710 | }; |
711 | |
712 | BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) |
713 | { |
714 | if (cpu >= nr_cpu_ids) |
715 | return (unsigned long)NULL; |
716 | |
717 | return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu); |
718 | } |
719 | |
720 | const struct bpf_func_proto bpf_per_cpu_ptr_proto = { |
721 | .func = bpf_per_cpu_ptr, |
722 | .gpl_only = false, |
723 | .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY, |
724 | .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, |
725 | .arg2_type = ARG_ANYTHING, |
726 | }; |
727 | |
728 | BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr) |
729 | { |
730 | return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr); |
731 | } |
732 | |
733 | const struct bpf_func_proto bpf_this_cpu_ptr_proto = { |
734 | .func = bpf_this_cpu_ptr, |
735 | .gpl_only = false, |
736 | .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY, |
737 | .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, |
738 | }; |
739 | |
740 | static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, |
741 | size_t bufsz) |
742 | { |
743 | void __user *user_ptr = (__force void __user *)unsafe_ptr; |
744 | |
745 | buf[0] = 0; |
746 | |
747 | switch (fmt_ptype) { |
748 | case 's': |
749 | #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE |
750 | if ((unsigned long)unsafe_ptr < TASK_SIZE) |
751 | return strncpy_from_user_nofault(dst: buf, unsafe_addr: user_ptr, count: bufsz); |
752 | fallthrough; |
753 | #endif |
754 | case 'k': |
755 | return strncpy_from_kernel_nofault(dst: buf, unsafe_addr: unsafe_ptr, count: bufsz); |
756 | case 'u': |
757 | return strncpy_from_user_nofault(dst: buf, unsafe_addr: user_ptr, count: bufsz); |
758 | } |
759 | |
760 | return -EINVAL; |
761 | } |
762 | |
763 | /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary |
764 | * arguments representation. |
765 | */ |
766 | #define MAX_BPRINTF_BIN_ARGS 512 |
767 | |
768 | /* Support executing three nested bprintf helper calls on a given CPU */ |
769 | #define MAX_BPRINTF_NEST_LEVEL 3 |
770 | struct bpf_bprintf_buffers { |
771 | char bin_args[MAX_BPRINTF_BIN_ARGS]; |
772 | char buf[MAX_BPRINTF_BUF]; |
773 | }; |
774 | |
775 | static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs); |
776 | static DEFINE_PER_CPU(int, bpf_bprintf_nest_level); |
777 | |
778 | static int try_get_buffers(struct bpf_bprintf_buffers **bufs) |
779 | { |
780 | int nest_level; |
781 | |
782 | preempt_disable(); |
783 | nest_level = this_cpu_inc_return(bpf_bprintf_nest_level); |
784 | if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) { |
785 | this_cpu_dec(bpf_bprintf_nest_level); |
786 | preempt_enable(); |
787 | return -EBUSY; |
788 | } |
789 | *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]); |
790 | |
791 | return 0; |
792 | } |
793 | |
794 | void bpf_bprintf_cleanup(struct bpf_bprintf_data *data) |
795 | { |
796 | if (!data->bin_args && !data->buf) |
797 | return; |
798 | if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0)) |
799 | return; |
800 | this_cpu_dec(bpf_bprintf_nest_level); |
801 | preempt_enable(); |
802 | } |
803 | |
804 | /* |
805 | * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers |
806 | * |
807 | * Returns a negative value if fmt is an invalid format string or 0 otherwise. |
808 | * |
809 | * This can be used in two ways: |
810 | * - Format string verification only: when data->get_bin_args is false |
811 | * - Arguments preparation: in addition to the above verification, it writes in |
812 | * data->bin_args a binary representation of arguments usable by bstr_printf |
813 | * where pointers from BPF have been sanitized. |
814 | * |
815 | * In argument preparation mode, if 0 is returned, safe temporary buffers are |
816 | * allocated and bpf_bprintf_cleanup should be called to free them after use. |
817 | */ |
818 | int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, |
819 | u32 num_args, struct bpf_bprintf_data *data) |
820 | { |
821 | bool get_buffers = (data->get_bin_args && num_args) || data->get_buf; |
822 | char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end; |
823 | struct bpf_bprintf_buffers *buffers = NULL; |
824 | size_t sizeof_cur_arg, sizeof_cur_ip; |
825 | int err, i, num_spec = 0; |
826 | u64 cur_arg; |
827 | char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX" ; |
828 | |
829 | fmt_end = strnchr(fmt, fmt_size, 0); |
830 | if (!fmt_end) |
831 | return -EINVAL; |
832 | fmt_size = fmt_end - fmt; |
833 | |
834 | if (get_buffers && try_get_buffers(bufs: &buffers)) |
835 | return -EBUSY; |
836 | |
837 | if (data->get_bin_args) { |
838 | if (num_args) |
839 | tmp_buf = buffers->bin_args; |
840 | tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS; |
841 | data->bin_args = (u32 *)tmp_buf; |
842 | } |
843 | |
844 | if (data->get_buf) |
845 | data->buf = buffers->buf; |
846 | |
847 | for (i = 0; i < fmt_size; i++) { |
848 | if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { |
849 | err = -EINVAL; |
850 | goto out; |
851 | } |
852 | |
853 | if (fmt[i] != '%') |
854 | continue; |
855 | |
856 | if (fmt[i + 1] == '%') { |
857 | i++; |
858 | continue; |
859 | } |
860 | |
861 | if (num_spec >= num_args) { |
862 | err = -EINVAL; |
863 | goto out; |
864 | } |
865 | |
866 | /* The string is zero-terminated so if fmt[i] != 0, we can |
867 | * always access fmt[i + 1], in the worst case it will be a 0 |
868 | */ |
869 | i++; |
870 | |
871 | /* skip optional "[0 +-][num]" width formatting field */ |
872 | while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || |
873 | fmt[i] == ' ') |
874 | i++; |
875 | if (fmt[i] >= '1' && fmt[i] <= '9') { |
876 | i++; |
877 | while (fmt[i] >= '0' && fmt[i] <= '9') |
878 | i++; |
879 | } |
880 | |
881 | if (fmt[i] == 'p') { |
882 | sizeof_cur_arg = sizeof(long); |
883 | |
884 | if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') && |
885 | fmt[i + 2] == 's') { |
886 | fmt_ptype = fmt[i + 1]; |
887 | i += 2; |
888 | goto fmt_str; |
889 | } |
890 | |
891 | if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) || |
892 | ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' || |
893 | fmt[i + 1] == 'x' || fmt[i + 1] == 's' || |
894 | fmt[i + 1] == 'S') { |
895 | /* just kernel pointers */ |
896 | if (tmp_buf) |
897 | cur_arg = raw_args[num_spec]; |
898 | i++; |
899 | goto nocopy_fmt; |
900 | } |
901 | |
902 | if (fmt[i + 1] == 'B') { |
903 | if (tmp_buf) { |
904 | err = snprintf(buf: tmp_buf, |
905 | size: (tmp_buf_end - tmp_buf), |
906 | fmt: "%pB" , |
907 | (void *)(long)raw_args[num_spec]); |
908 | tmp_buf += (err + 1); |
909 | } |
910 | |
911 | i++; |
912 | num_spec++; |
913 | continue; |
914 | } |
915 | |
916 | /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */ |
917 | if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') || |
918 | (fmt[i + 2] != '4' && fmt[i + 2] != '6')) { |
919 | err = -EINVAL; |
920 | goto out; |
921 | } |
922 | |
923 | i += 2; |
924 | if (!tmp_buf) |
925 | goto nocopy_fmt; |
926 | |
927 | sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16; |
928 | if (tmp_buf_end - tmp_buf < sizeof_cur_ip) { |
929 | err = -ENOSPC; |
930 | goto out; |
931 | } |
932 | |
933 | unsafe_ptr = (char *)(long)raw_args[num_spec]; |
934 | err = copy_from_kernel_nofault(dst: cur_ip, src: unsafe_ptr, |
935 | size: sizeof_cur_ip); |
936 | if (err < 0) |
937 | memset(cur_ip, 0, sizeof_cur_ip); |
938 | |
939 | /* hack: bstr_printf expects IP addresses to be |
940 | * pre-formatted as strings, ironically, the easiest way |
941 | * to do that is to call snprintf. |
942 | */ |
943 | ip_spec[2] = fmt[i - 1]; |
944 | ip_spec[3] = fmt[i]; |
945 | err = snprintf(buf: tmp_buf, size: tmp_buf_end - tmp_buf, |
946 | fmt: ip_spec, &cur_ip); |
947 | |
948 | tmp_buf += err + 1; |
949 | num_spec++; |
950 | |
951 | continue; |
952 | } else if (fmt[i] == 's') { |
953 | fmt_ptype = fmt[i]; |
954 | fmt_str: |
955 | if (fmt[i + 1] != 0 && |
956 | !isspace(fmt[i + 1]) && |
957 | !ispunct(fmt[i + 1])) { |
958 | err = -EINVAL; |
959 | goto out; |
960 | } |
961 | |
962 | if (!tmp_buf) |
963 | goto nocopy_fmt; |
964 | |
965 | if (tmp_buf_end == tmp_buf) { |
966 | err = -ENOSPC; |
967 | goto out; |
968 | } |
969 | |
970 | unsafe_ptr = (char *)(long)raw_args[num_spec]; |
971 | err = bpf_trace_copy_string(buf: tmp_buf, unsafe_ptr, |
972 | fmt_ptype, |
973 | bufsz: tmp_buf_end - tmp_buf); |
974 | if (err < 0) { |
975 | tmp_buf[0] = '\0'; |
976 | err = 1; |
977 | } |
978 | |
979 | tmp_buf += err; |
980 | num_spec++; |
981 | |
982 | continue; |
983 | } else if (fmt[i] == 'c') { |
984 | if (!tmp_buf) |
985 | goto nocopy_fmt; |
986 | |
987 | if (tmp_buf_end == tmp_buf) { |
988 | err = -ENOSPC; |
989 | goto out; |
990 | } |
991 | |
992 | *tmp_buf = raw_args[num_spec]; |
993 | tmp_buf++; |
994 | num_spec++; |
995 | |
996 | continue; |
997 | } |
998 | |
999 | sizeof_cur_arg = sizeof(int); |
1000 | |
1001 | if (fmt[i] == 'l') { |
1002 | sizeof_cur_arg = sizeof(long); |
1003 | i++; |
1004 | } |
1005 | if (fmt[i] == 'l') { |
1006 | sizeof_cur_arg = sizeof(long long); |
1007 | i++; |
1008 | } |
1009 | |
1010 | if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' && |
1011 | fmt[i] != 'x' && fmt[i] != 'X') { |
1012 | err = -EINVAL; |
1013 | goto out; |
1014 | } |
1015 | |
1016 | if (tmp_buf) |
1017 | cur_arg = raw_args[num_spec]; |
1018 | nocopy_fmt: |
1019 | if (tmp_buf) { |
1020 | tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32)); |
1021 | if (tmp_buf_end - tmp_buf < sizeof_cur_arg) { |
1022 | err = -ENOSPC; |
1023 | goto out; |
1024 | } |
1025 | |
1026 | if (sizeof_cur_arg == 8) { |
1027 | *(u32 *)tmp_buf = *(u32 *)&cur_arg; |
1028 | *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1); |
1029 | } else { |
1030 | *(u32 *)tmp_buf = (u32)(long)cur_arg; |
1031 | } |
1032 | tmp_buf += sizeof_cur_arg; |
1033 | } |
1034 | num_spec++; |
1035 | } |
1036 | |
1037 | err = 0; |
1038 | out: |
1039 | if (err) |
1040 | bpf_bprintf_cleanup(data); |
1041 | return err; |
1042 | } |
1043 | |
1044 | BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, |
1045 | const void *, args, u32, data_len) |
1046 | { |
1047 | struct bpf_bprintf_data data = { |
1048 | .get_bin_args = true, |
1049 | }; |
1050 | int err, num_args; |
1051 | |
1052 | if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 || |
1053 | (data_len && !args)) |
1054 | return -EINVAL; |
1055 | num_args = data_len / 8; |
1056 | |
1057 | /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we |
1058 | * can safely give an unbounded size. |
1059 | */ |
1060 | err = bpf_bprintf_prepare(fmt, UINT_MAX, raw_args: args, num_args, data: &data); |
1061 | if (err < 0) |
1062 | return err; |
1063 | |
1064 | err = bstr_printf(buf: str, size: str_size, fmt, bin_buf: data.bin_args); |
1065 | |
1066 | bpf_bprintf_cleanup(data: &data); |
1067 | |
1068 | return err + 1; |
1069 | } |
1070 | |
1071 | const struct bpf_func_proto bpf_snprintf_proto = { |
1072 | .func = bpf_snprintf, |
1073 | .gpl_only = true, |
1074 | .ret_type = RET_INTEGER, |
1075 | .arg1_type = ARG_PTR_TO_MEM_OR_NULL, |
1076 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
1077 | .arg3_type = ARG_PTR_TO_CONST_STR, |
1078 | .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, |
1079 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
1080 | }; |
1081 | |
1082 | /* BPF map elements can contain 'struct bpf_timer'. |
1083 | * Such map owns all of its BPF timers. |
1084 | * 'struct bpf_timer' is allocated as part of map element allocation |
1085 | * and it's zero initialized. |
1086 | * That space is used to keep 'struct bpf_timer_kern'. |
1087 | * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and |
1088 | * remembers 'struct bpf_map *' pointer it's part of. |
1089 | * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn. |
1090 | * bpf_timer_start() arms the timer. |
1091 | * If user space reference to a map goes to zero at this point |
1092 | * ops->map_release_uref callback is responsible for cancelling the timers, |
1093 | * freeing their memory, and decrementing prog's refcnts. |
1094 | * bpf_timer_cancel() cancels the timer and decrements prog's refcnt. |
1095 | * Inner maps can contain bpf timers as well. ops->map_release_uref is |
1096 | * freeing the timers when inner map is replaced or deleted by user space. |
1097 | */ |
1098 | struct bpf_hrtimer { |
1099 | struct hrtimer timer; |
1100 | struct bpf_map *map; |
1101 | struct bpf_prog *prog; |
1102 | void __rcu *callback_fn; |
1103 | void *value; |
1104 | struct rcu_head rcu; |
1105 | }; |
1106 | |
1107 | /* the actual struct hidden inside uapi struct bpf_timer */ |
1108 | struct bpf_timer_kern { |
1109 | struct bpf_hrtimer *timer; |
1110 | /* bpf_spin_lock is used here instead of spinlock_t to make |
1111 | * sure that it always fits into space reserved by struct bpf_timer |
1112 | * regardless of LOCKDEP and spinlock debug flags. |
1113 | */ |
1114 | struct bpf_spin_lock lock; |
1115 | } __attribute__((aligned(8))); |
1116 | |
1117 | static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running); |
1118 | |
1119 | static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) |
1120 | { |
1121 | struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); |
1122 | struct bpf_map *map = t->map; |
1123 | void *value = t->value; |
1124 | bpf_callback_t callback_fn; |
1125 | void *key; |
1126 | u32 idx; |
1127 | |
1128 | BTF_TYPE_EMIT(struct bpf_timer); |
1129 | callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held()); |
1130 | if (!callback_fn) |
1131 | goto out; |
1132 | |
1133 | /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and |
1134 | * cannot be preempted by another bpf_timer_cb() on the same cpu. |
1135 | * Remember the timer this callback is servicing to prevent |
1136 | * deadlock if callback_fn() calls bpf_timer_cancel() or |
1137 | * bpf_map_delete_elem() on the same timer. |
1138 | */ |
1139 | this_cpu_write(hrtimer_running, t); |
1140 | if (map->map_type == BPF_MAP_TYPE_ARRAY) { |
1141 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
1142 | |
1143 | /* compute the key */ |
1144 | idx = ((char *)value - array->value) / array->elem_size; |
1145 | key = &idx; |
1146 | } else { /* hash or lru */ |
1147 | key = value - round_up(map->key_size, 8); |
1148 | } |
1149 | |
1150 | callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); |
1151 | /* The verifier checked that return value is zero. */ |
1152 | |
1153 | this_cpu_write(hrtimer_running, NULL); |
1154 | out: |
1155 | return HRTIMER_NORESTART; |
1156 | } |
1157 | |
1158 | BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map, |
1159 | u64, flags) |
1160 | { |
1161 | clockid_t clockid = flags & (MAX_CLOCKS - 1); |
1162 | struct bpf_hrtimer *t; |
1163 | int ret = 0; |
1164 | |
1165 | BUILD_BUG_ON(MAX_CLOCKS != 16); |
1166 | BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer)); |
1167 | BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer)); |
1168 | |
1169 | if (in_nmi()) |
1170 | return -EOPNOTSUPP; |
1171 | |
1172 | if (flags >= MAX_CLOCKS || |
1173 | /* similar to timerfd except _ALARM variants are not supported */ |
1174 | (clockid != CLOCK_MONOTONIC && |
1175 | clockid != CLOCK_REALTIME && |
1176 | clockid != CLOCK_BOOTTIME)) |
1177 | return -EINVAL; |
1178 | __bpf_spin_lock_irqsave(lock: &timer->lock); |
1179 | t = timer->timer; |
1180 | if (t) { |
1181 | ret = -EBUSY; |
1182 | goto out; |
1183 | } |
1184 | /* allocate hrtimer via map_kmalloc to use memcg accounting */ |
1185 | t = bpf_map_kmalloc_node(map, size: sizeof(*t), GFP_ATOMIC, node: map->numa_node); |
1186 | if (!t) { |
1187 | ret = -ENOMEM; |
1188 | goto out; |
1189 | } |
1190 | t->value = (void *)timer - map->record->timer_off; |
1191 | t->map = map; |
1192 | t->prog = NULL; |
1193 | rcu_assign_pointer(t->callback_fn, NULL); |
1194 | hrtimer_init(timer: &t->timer, which_clock: clockid, mode: HRTIMER_MODE_REL_SOFT); |
1195 | t->timer.function = bpf_timer_cb; |
1196 | WRITE_ONCE(timer->timer, t); |
1197 | /* Guarantee the order between timer->timer and map->usercnt. So |
1198 | * when there are concurrent uref release and bpf timer init, either |
1199 | * bpf_timer_cancel_and_free() called by uref release reads a no-NULL |
1200 | * timer or atomic64_read() below returns a zero usercnt. |
1201 | */ |
1202 | smp_mb(); |
1203 | if (!atomic64_read(v: &map->usercnt)) { |
1204 | /* maps with timers must be either held by user space |
1205 | * or pinned in bpffs. |
1206 | */ |
1207 | WRITE_ONCE(timer->timer, NULL); |
1208 | kfree(objp: t); |
1209 | ret = -EPERM; |
1210 | } |
1211 | out: |
1212 | __bpf_spin_unlock_irqrestore(lock: &timer->lock); |
1213 | return ret; |
1214 | } |
1215 | |
1216 | static const struct bpf_func_proto bpf_timer_init_proto = { |
1217 | .func = bpf_timer_init, |
1218 | .gpl_only = true, |
1219 | .ret_type = RET_INTEGER, |
1220 | .arg1_type = ARG_PTR_TO_TIMER, |
1221 | .arg2_type = ARG_CONST_MAP_PTR, |
1222 | .arg3_type = ARG_ANYTHING, |
1223 | }; |
1224 | |
1225 | BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn, |
1226 | struct bpf_prog_aux *, aux) |
1227 | { |
1228 | struct bpf_prog *prev, *prog = aux->prog; |
1229 | struct bpf_hrtimer *t; |
1230 | int ret = 0; |
1231 | |
1232 | if (in_nmi()) |
1233 | return -EOPNOTSUPP; |
1234 | __bpf_spin_lock_irqsave(lock: &timer->lock); |
1235 | t = timer->timer; |
1236 | if (!t) { |
1237 | ret = -EINVAL; |
1238 | goto out; |
1239 | } |
1240 | if (!atomic64_read(v: &t->map->usercnt)) { |
1241 | /* maps with timers must be either held by user space |
1242 | * or pinned in bpffs. Otherwise timer might still be |
1243 | * running even when bpf prog is detached and user space |
1244 | * is gone, since map_release_uref won't ever be called. |
1245 | */ |
1246 | ret = -EPERM; |
1247 | goto out; |
1248 | } |
1249 | prev = t->prog; |
1250 | if (prev != prog) { |
1251 | /* Bump prog refcnt once. Every bpf_timer_set_callback() |
1252 | * can pick different callback_fn-s within the same prog. |
1253 | */ |
1254 | prog = bpf_prog_inc_not_zero(prog); |
1255 | if (IS_ERR(ptr: prog)) { |
1256 | ret = PTR_ERR(ptr: prog); |
1257 | goto out; |
1258 | } |
1259 | if (prev) |
1260 | /* Drop prev prog refcnt when swapping with new prog */ |
1261 | bpf_prog_put(prog: prev); |
1262 | t->prog = prog; |
1263 | } |
1264 | rcu_assign_pointer(t->callback_fn, callback_fn); |
1265 | out: |
1266 | __bpf_spin_unlock_irqrestore(lock: &timer->lock); |
1267 | return ret; |
1268 | } |
1269 | |
1270 | static const struct bpf_func_proto bpf_timer_set_callback_proto = { |
1271 | .func = bpf_timer_set_callback, |
1272 | .gpl_only = true, |
1273 | .ret_type = RET_INTEGER, |
1274 | .arg1_type = ARG_PTR_TO_TIMER, |
1275 | .arg2_type = ARG_PTR_TO_FUNC, |
1276 | }; |
1277 | |
1278 | BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags) |
1279 | { |
1280 | struct bpf_hrtimer *t; |
1281 | int ret = 0; |
1282 | enum hrtimer_mode mode; |
1283 | |
1284 | if (in_nmi()) |
1285 | return -EOPNOTSUPP; |
1286 | if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN)) |
1287 | return -EINVAL; |
1288 | __bpf_spin_lock_irqsave(lock: &timer->lock); |
1289 | t = timer->timer; |
1290 | if (!t || !t->prog) { |
1291 | ret = -EINVAL; |
1292 | goto out; |
1293 | } |
1294 | |
1295 | if (flags & BPF_F_TIMER_ABS) |
1296 | mode = HRTIMER_MODE_ABS_SOFT; |
1297 | else |
1298 | mode = HRTIMER_MODE_REL_SOFT; |
1299 | |
1300 | if (flags & BPF_F_TIMER_CPU_PIN) |
1301 | mode |= HRTIMER_MODE_PINNED; |
1302 | |
1303 | hrtimer_start(timer: &t->timer, tim: ns_to_ktime(ns: nsecs), mode); |
1304 | out: |
1305 | __bpf_spin_unlock_irqrestore(lock: &timer->lock); |
1306 | return ret; |
1307 | } |
1308 | |
1309 | static const struct bpf_func_proto bpf_timer_start_proto = { |
1310 | .func = bpf_timer_start, |
1311 | .gpl_only = true, |
1312 | .ret_type = RET_INTEGER, |
1313 | .arg1_type = ARG_PTR_TO_TIMER, |
1314 | .arg2_type = ARG_ANYTHING, |
1315 | .arg3_type = ARG_ANYTHING, |
1316 | }; |
1317 | |
1318 | static void drop_prog_refcnt(struct bpf_hrtimer *t) |
1319 | { |
1320 | struct bpf_prog *prog = t->prog; |
1321 | |
1322 | if (prog) { |
1323 | bpf_prog_put(prog); |
1324 | t->prog = NULL; |
1325 | rcu_assign_pointer(t->callback_fn, NULL); |
1326 | } |
1327 | } |
1328 | |
1329 | BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer) |
1330 | { |
1331 | struct bpf_hrtimer *t; |
1332 | int ret = 0; |
1333 | |
1334 | if (in_nmi()) |
1335 | return -EOPNOTSUPP; |
1336 | rcu_read_lock(); |
1337 | __bpf_spin_lock_irqsave(lock: &timer->lock); |
1338 | t = timer->timer; |
1339 | if (!t) { |
1340 | ret = -EINVAL; |
1341 | goto out; |
1342 | } |
1343 | if (this_cpu_read(hrtimer_running) == t) { |
1344 | /* If bpf callback_fn is trying to bpf_timer_cancel() |
1345 | * its own timer the hrtimer_cancel() will deadlock |
1346 | * since it waits for callback_fn to finish |
1347 | */ |
1348 | ret = -EDEADLK; |
1349 | goto out; |
1350 | } |
1351 | drop_prog_refcnt(t); |
1352 | out: |
1353 | __bpf_spin_unlock_irqrestore(lock: &timer->lock); |
1354 | /* Cancel the timer and wait for associated callback to finish |
1355 | * if it was running. |
1356 | */ |
1357 | ret = ret ?: hrtimer_cancel(timer: &t->timer); |
1358 | rcu_read_unlock(); |
1359 | return ret; |
1360 | } |
1361 | |
1362 | static const struct bpf_func_proto bpf_timer_cancel_proto = { |
1363 | .func = bpf_timer_cancel, |
1364 | .gpl_only = true, |
1365 | .ret_type = RET_INTEGER, |
1366 | .arg1_type = ARG_PTR_TO_TIMER, |
1367 | }; |
1368 | |
1369 | /* This function is called by map_delete/update_elem for individual element and |
1370 | * by ops->map_release_uref when the user space reference to a map reaches zero. |
1371 | */ |
1372 | void bpf_timer_cancel_and_free(void *val) |
1373 | { |
1374 | struct bpf_timer_kern *timer = val; |
1375 | struct bpf_hrtimer *t; |
1376 | |
1377 | /* Performance optimization: read timer->timer without lock first. */ |
1378 | if (!READ_ONCE(timer->timer)) |
1379 | return; |
1380 | |
1381 | __bpf_spin_lock_irqsave(lock: &timer->lock); |
1382 | /* re-read it under lock */ |
1383 | t = timer->timer; |
1384 | if (!t) |
1385 | goto out; |
1386 | drop_prog_refcnt(t); |
1387 | /* The subsequent bpf_timer_start/cancel() helpers won't be able to use |
1388 | * this timer, since it won't be initialized. |
1389 | */ |
1390 | WRITE_ONCE(timer->timer, NULL); |
1391 | out: |
1392 | __bpf_spin_unlock_irqrestore(lock: &timer->lock); |
1393 | if (!t) |
1394 | return; |
1395 | /* Cancel the timer and wait for callback to complete if it was running. |
1396 | * If hrtimer_cancel() can be safely called it's safe to call kfree(t) |
1397 | * right after for both preallocated and non-preallocated maps. |
1398 | * The timer->timer = NULL was already done and no code path can |
1399 | * see address 't' anymore. |
1400 | * |
1401 | * Check that bpf_map_delete/update_elem() wasn't called from timer |
1402 | * callback_fn. In such case don't call hrtimer_cancel() (since it will |
1403 | * deadlock) and don't call hrtimer_try_to_cancel() (since it will just |
1404 | * return -1). Though callback_fn is still running on this cpu it's |
1405 | * safe to do kfree(t) because bpf_timer_cb() read everything it needed |
1406 | * from 't'. The bpf subprog callback_fn won't be able to access 't', |
1407 | * since timer->timer = NULL was already done. The timer will be |
1408 | * effectively cancelled because bpf_timer_cb() will return |
1409 | * HRTIMER_NORESTART. |
1410 | */ |
1411 | if (this_cpu_read(hrtimer_running) != t) |
1412 | hrtimer_cancel(timer: &t->timer); |
1413 | kfree_rcu(t, rcu); |
1414 | } |
1415 | |
1416 | BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr) |
1417 | { |
1418 | unsigned long *kptr = map_value; |
1419 | |
1420 | /* This helper may be inlined by verifier. */ |
1421 | return xchg(kptr, (unsigned long)ptr); |
1422 | } |
1423 | |
1424 | /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg() |
1425 | * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to |
1426 | * denote type that verifier will determine. |
1427 | */ |
1428 | static const struct bpf_func_proto bpf_kptr_xchg_proto = { |
1429 | .func = bpf_kptr_xchg, |
1430 | .gpl_only = false, |
1431 | .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, |
1432 | .ret_btf_id = BPF_PTR_POISON, |
1433 | .arg1_type = ARG_PTR_TO_KPTR, |
1434 | .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE, |
1435 | .arg2_btf_id = BPF_PTR_POISON, |
1436 | }; |
1437 | |
1438 | /* Since the upper 8 bits of dynptr->size is reserved, the |
1439 | * maximum supported size is 2^24 - 1. |
1440 | */ |
1441 | #define DYNPTR_MAX_SIZE ((1UL << 24) - 1) |
1442 | #define DYNPTR_TYPE_SHIFT 28 |
1443 | #define DYNPTR_SIZE_MASK 0xFFFFFF |
1444 | #define DYNPTR_RDONLY_BIT BIT(31) |
1445 | |
1446 | static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr) |
1447 | { |
1448 | return ptr->size & DYNPTR_RDONLY_BIT; |
1449 | } |
1450 | |
1451 | void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr) |
1452 | { |
1453 | ptr->size |= DYNPTR_RDONLY_BIT; |
1454 | } |
1455 | |
1456 | static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type) |
1457 | { |
1458 | ptr->size |= type << DYNPTR_TYPE_SHIFT; |
1459 | } |
1460 | |
1461 | static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr) |
1462 | { |
1463 | return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT; |
1464 | } |
1465 | |
1466 | u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr) |
1467 | { |
1468 | return ptr->size & DYNPTR_SIZE_MASK; |
1469 | } |
1470 | |
1471 | static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size) |
1472 | { |
1473 | u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK; |
1474 | |
1475 | ptr->size = new_size | metadata; |
1476 | } |
1477 | |
1478 | int bpf_dynptr_check_size(u32 size) |
1479 | { |
1480 | return size > DYNPTR_MAX_SIZE ? -E2BIG : 0; |
1481 | } |
1482 | |
1483 | void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, |
1484 | enum bpf_dynptr_type type, u32 offset, u32 size) |
1485 | { |
1486 | ptr->data = data; |
1487 | ptr->offset = offset; |
1488 | ptr->size = size; |
1489 | bpf_dynptr_set_type(ptr, type); |
1490 | } |
1491 | |
1492 | void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) |
1493 | { |
1494 | memset(ptr, 0, sizeof(*ptr)); |
1495 | } |
1496 | |
1497 | static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len) |
1498 | { |
1499 | u32 size = __bpf_dynptr_size(ptr); |
1500 | |
1501 | if (len > size || offset > size - len) |
1502 | return -E2BIG; |
1503 | |
1504 | return 0; |
1505 | } |
1506 | |
1507 | BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr) |
1508 | { |
1509 | int err; |
1510 | |
1511 | BTF_TYPE_EMIT(struct bpf_dynptr); |
1512 | |
1513 | err = bpf_dynptr_check_size(size); |
1514 | if (err) |
1515 | goto error; |
1516 | |
1517 | /* flags is currently unsupported */ |
1518 | if (flags) { |
1519 | err = -EINVAL; |
1520 | goto error; |
1521 | } |
1522 | |
1523 | bpf_dynptr_init(ptr, data, type: BPF_DYNPTR_TYPE_LOCAL, offset: 0, size); |
1524 | |
1525 | return 0; |
1526 | |
1527 | error: |
1528 | bpf_dynptr_set_null(ptr); |
1529 | return err; |
1530 | } |
1531 | |
1532 | static const struct bpf_func_proto bpf_dynptr_from_mem_proto = { |
1533 | .func = bpf_dynptr_from_mem, |
1534 | .gpl_only = false, |
1535 | .ret_type = RET_INTEGER, |
1536 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
1537 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
1538 | .arg3_type = ARG_ANYTHING, |
1539 | .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT, |
1540 | }; |
1541 | |
1542 | BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src, |
1543 | u32, offset, u64, flags) |
1544 | { |
1545 | enum bpf_dynptr_type type; |
1546 | int err; |
1547 | |
1548 | if (!src->data || flags) |
1549 | return -EINVAL; |
1550 | |
1551 | err = bpf_dynptr_check_off_len(ptr: src, offset, len); |
1552 | if (err) |
1553 | return err; |
1554 | |
1555 | type = bpf_dynptr_get_type(ptr: src); |
1556 | |
1557 | switch (type) { |
1558 | case BPF_DYNPTR_TYPE_LOCAL: |
1559 | case BPF_DYNPTR_TYPE_RINGBUF: |
1560 | /* Source and destination may possibly overlap, hence use memmove to |
1561 | * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr |
1562 | * pointing to overlapping PTR_TO_MAP_VALUE regions. |
1563 | */ |
1564 | memmove(dst, src->data + src->offset + offset, len); |
1565 | return 0; |
1566 | case BPF_DYNPTR_TYPE_SKB: |
1567 | return __bpf_skb_load_bytes(skb: src->data, offset: src->offset + offset, to: dst, len); |
1568 | case BPF_DYNPTR_TYPE_XDP: |
1569 | return __bpf_xdp_load_bytes(xdp: src->data, offset: src->offset + offset, buf: dst, len); |
1570 | default: |
1571 | WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n" , type); |
1572 | return -EFAULT; |
1573 | } |
1574 | } |
1575 | |
1576 | static const struct bpf_func_proto bpf_dynptr_read_proto = { |
1577 | .func = bpf_dynptr_read, |
1578 | .gpl_only = false, |
1579 | .ret_type = RET_INTEGER, |
1580 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
1581 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
1582 | .arg3_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, |
1583 | .arg4_type = ARG_ANYTHING, |
1584 | .arg5_type = ARG_ANYTHING, |
1585 | }; |
1586 | |
1587 | BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src, |
1588 | u32, len, u64, flags) |
1589 | { |
1590 | enum bpf_dynptr_type type; |
1591 | int err; |
1592 | |
1593 | if (!dst->data || __bpf_dynptr_is_rdonly(ptr: dst)) |
1594 | return -EINVAL; |
1595 | |
1596 | err = bpf_dynptr_check_off_len(ptr: dst, offset, len); |
1597 | if (err) |
1598 | return err; |
1599 | |
1600 | type = bpf_dynptr_get_type(ptr: dst); |
1601 | |
1602 | switch (type) { |
1603 | case BPF_DYNPTR_TYPE_LOCAL: |
1604 | case BPF_DYNPTR_TYPE_RINGBUF: |
1605 | if (flags) |
1606 | return -EINVAL; |
1607 | /* Source and destination may possibly overlap, hence use memmove to |
1608 | * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr |
1609 | * pointing to overlapping PTR_TO_MAP_VALUE regions. |
1610 | */ |
1611 | memmove(dst->data + dst->offset + offset, src, len); |
1612 | return 0; |
1613 | case BPF_DYNPTR_TYPE_SKB: |
1614 | return __bpf_skb_store_bytes(skb: dst->data, offset: dst->offset + offset, from: src, len, |
1615 | flags); |
1616 | case BPF_DYNPTR_TYPE_XDP: |
1617 | if (flags) |
1618 | return -EINVAL; |
1619 | return __bpf_xdp_store_bytes(xdp: dst->data, offset: dst->offset + offset, buf: src, len); |
1620 | default: |
1621 | WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n" , type); |
1622 | return -EFAULT; |
1623 | } |
1624 | } |
1625 | |
1626 | static const struct bpf_func_proto bpf_dynptr_write_proto = { |
1627 | .func = bpf_dynptr_write, |
1628 | .gpl_only = false, |
1629 | .ret_type = RET_INTEGER, |
1630 | .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, |
1631 | .arg2_type = ARG_ANYTHING, |
1632 | .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
1633 | .arg4_type = ARG_CONST_SIZE_OR_ZERO, |
1634 | .arg5_type = ARG_ANYTHING, |
1635 | }; |
1636 | |
1637 | BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len) |
1638 | { |
1639 | enum bpf_dynptr_type type; |
1640 | int err; |
1641 | |
1642 | if (!ptr->data) |
1643 | return 0; |
1644 | |
1645 | err = bpf_dynptr_check_off_len(ptr, offset, len); |
1646 | if (err) |
1647 | return 0; |
1648 | |
1649 | if (__bpf_dynptr_is_rdonly(ptr)) |
1650 | return 0; |
1651 | |
1652 | type = bpf_dynptr_get_type(ptr); |
1653 | |
1654 | switch (type) { |
1655 | case BPF_DYNPTR_TYPE_LOCAL: |
1656 | case BPF_DYNPTR_TYPE_RINGBUF: |
1657 | return (unsigned long)(ptr->data + ptr->offset + offset); |
1658 | case BPF_DYNPTR_TYPE_SKB: |
1659 | case BPF_DYNPTR_TYPE_XDP: |
1660 | /* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */ |
1661 | return 0; |
1662 | default: |
1663 | WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n" , type); |
1664 | return 0; |
1665 | } |
1666 | } |
1667 | |
1668 | static const struct bpf_func_proto bpf_dynptr_data_proto = { |
1669 | .func = bpf_dynptr_data, |
1670 | .gpl_only = false, |
1671 | .ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL, |
1672 | .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, |
1673 | .arg2_type = ARG_ANYTHING, |
1674 | .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO, |
1675 | }; |
1676 | |
1677 | const struct bpf_func_proto bpf_get_current_task_proto __weak; |
1678 | const struct bpf_func_proto bpf_get_current_task_btf_proto __weak; |
1679 | const struct bpf_func_proto bpf_probe_read_user_proto __weak; |
1680 | const struct bpf_func_proto bpf_probe_read_user_str_proto __weak; |
1681 | const struct bpf_func_proto bpf_probe_read_kernel_proto __weak; |
1682 | const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak; |
1683 | const struct bpf_func_proto bpf_task_pt_regs_proto __weak; |
1684 | |
1685 | const struct bpf_func_proto * |
1686 | bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
1687 | { |
1688 | switch (func_id) { |
1689 | case BPF_FUNC_map_lookup_elem: |
1690 | return &bpf_map_lookup_elem_proto; |
1691 | case BPF_FUNC_map_update_elem: |
1692 | return &bpf_map_update_elem_proto; |
1693 | case BPF_FUNC_map_delete_elem: |
1694 | return &bpf_map_delete_elem_proto; |
1695 | case BPF_FUNC_map_push_elem: |
1696 | return &bpf_map_push_elem_proto; |
1697 | case BPF_FUNC_map_pop_elem: |
1698 | return &bpf_map_pop_elem_proto; |
1699 | case BPF_FUNC_map_peek_elem: |
1700 | return &bpf_map_peek_elem_proto; |
1701 | case BPF_FUNC_map_lookup_percpu_elem: |
1702 | return &bpf_map_lookup_percpu_elem_proto; |
1703 | case BPF_FUNC_get_prandom_u32: |
1704 | return &bpf_get_prandom_u32_proto; |
1705 | case BPF_FUNC_get_smp_processor_id: |
1706 | return &bpf_get_raw_smp_processor_id_proto; |
1707 | case BPF_FUNC_get_numa_node_id: |
1708 | return &bpf_get_numa_node_id_proto; |
1709 | case BPF_FUNC_tail_call: |
1710 | return &bpf_tail_call_proto; |
1711 | case BPF_FUNC_ktime_get_ns: |
1712 | return &bpf_ktime_get_ns_proto; |
1713 | case BPF_FUNC_ktime_get_boot_ns: |
1714 | return &bpf_ktime_get_boot_ns_proto; |
1715 | case BPF_FUNC_ktime_get_tai_ns: |
1716 | return &bpf_ktime_get_tai_ns_proto; |
1717 | case BPF_FUNC_ringbuf_output: |
1718 | return &bpf_ringbuf_output_proto; |
1719 | case BPF_FUNC_ringbuf_reserve: |
1720 | return &bpf_ringbuf_reserve_proto; |
1721 | case BPF_FUNC_ringbuf_submit: |
1722 | return &bpf_ringbuf_submit_proto; |
1723 | case BPF_FUNC_ringbuf_discard: |
1724 | return &bpf_ringbuf_discard_proto; |
1725 | case BPF_FUNC_ringbuf_query: |
1726 | return &bpf_ringbuf_query_proto; |
1727 | case BPF_FUNC_strncmp: |
1728 | return &bpf_strncmp_proto; |
1729 | case BPF_FUNC_strtol: |
1730 | return &bpf_strtol_proto; |
1731 | case BPF_FUNC_strtoul: |
1732 | return &bpf_strtoul_proto; |
1733 | default: |
1734 | break; |
1735 | } |
1736 | |
1737 | if (!bpf_token_capable(token: prog->aux->token, CAP_BPF)) |
1738 | return NULL; |
1739 | |
1740 | switch (func_id) { |
1741 | case BPF_FUNC_spin_lock: |
1742 | return &bpf_spin_lock_proto; |
1743 | case BPF_FUNC_spin_unlock: |
1744 | return &bpf_spin_unlock_proto; |
1745 | case BPF_FUNC_jiffies64: |
1746 | return &bpf_jiffies64_proto; |
1747 | case BPF_FUNC_per_cpu_ptr: |
1748 | return &bpf_per_cpu_ptr_proto; |
1749 | case BPF_FUNC_this_cpu_ptr: |
1750 | return &bpf_this_cpu_ptr_proto; |
1751 | case BPF_FUNC_timer_init: |
1752 | return &bpf_timer_init_proto; |
1753 | case BPF_FUNC_timer_set_callback: |
1754 | return &bpf_timer_set_callback_proto; |
1755 | case BPF_FUNC_timer_start: |
1756 | return &bpf_timer_start_proto; |
1757 | case BPF_FUNC_timer_cancel: |
1758 | return &bpf_timer_cancel_proto; |
1759 | case BPF_FUNC_kptr_xchg: |
1760 | return &bpf_kptr_xchg_proto; |
1761 | case BPF_FUNC_for_each_map_elem: |
1762 | return &bpf_for_each_map_elem_proto; |
1763 | case BPF_FUNC_loop: |
1764 | return &bpf_loop_proto; |
1765 | case BPF_FUNC_user_ringbuf_drain: |
1766 | return &bpf_user_ringbuf_drain_proto; |
1767 | case BPF_FUNC_ringbuf_reserve_dynptr: |
1768 | return &bpf_ringbuf_reserve_dynptr_proto; |
1769 | case BPF_FUNC_ringbuf_submit_dynptr: |
1770 | return &bpf_ringbuf_submit_dynptr_proto; |
1771 | case BPF_FUNC_ringbuf_discard_dynptr: |
1772 | return &bpf_ringbuf_discard_dynptr_proto; |
1773 | case BPF_FUNC_dynptr_from_mem: |
1774 | return &bpf_dynptr_from_mem_proto; |
1775 | case BPF_FUNC_dynptr_read: |
1776 | return &bpf_dynptr_read_proto; |
1777 | case BPF_FUNC_dynptr_write: |
1778 | return &bpf_dynptr_write_proto; |
1779 | case BPF_FUNC_dynptr_data: |
1780 | return &bpf_dynptr_data_proto; |
1781 | #ifdef CONFIG_CGROUPS |
1782 | case BPF_FUNC_cgrp_storage_get: |
1783 | return &bpf_cgrp_storage_get_proto; |
1784 | case BPF_FUNC_cgrp_storage_delete: |
1785 | return &bpf_cgrp_storage_delete_proto; |
1786 | case BPF_FUNC_get_current_cgroup_id: |
1787 | return &bpf_get_current_cgroup_id_proto; |
1788 | case BPF_FUNC_get_current_ancestor_cgroup_id: |
1789 | return &bpf_get_current_ancestor_cgroup_id_proto; |
1790 | #endif |
1791 | default: |
1792 | break; |
1793 | } |
1794 | |
1795 | if (!bpf_token_capable(token: prog->aux->token, CAP_PERFMON)) |
1796 | return NULL; |
1797 | |
1798 | switch (func_id) { |
1799 | case BPF_FUNC_trace_printk: |
1800 | return bpf_get_trace_printk_proto(); |
1801 | case BPF_FUNC_get_current_task: |
1802 | return &bpf_get_current_task_proto; |
1803 | case BPF_FUNC_get_current_task_btf: |
1804 | return &bpf_get_current_task_btf_proto; |
1805 | case BPF_FUNC_probe_read_user: |
1806 | return &bpf_probe_read_user_proto; |
1807 | case BPF_FUNC_probe_read_kernel: |
1808 | return security_locked_down(what: LOCKDOWN_BPF_READ_KERNEL) < 0 ? |
1809 | NULL : &bpf_probe_read_kernel_proto; |
1810 | case BPF_FUNC_probe_read_user_str: |
1811 | return &bpf_probe_read_user_str_proto; |
1812 | case BPF_FUNC_probe_read_kernel_str: |
1813 | return security_locked_down(what: LOCKDOWN_BPF_READ_KERNEL) < 0 ? |
1814 | NULL : &bpf_probe_read_kernel_str_proto; |
1815 | case BPF_FUNC_snprintf_btf: |
1816 | return &bpf_snprintf_btf_proto; |
1817 | case BPF_FUNC_snprintf: |
1818 | return &bpf_snprintf_proto; |
1819 | case BPF_FUNC_task_pt_regs: |
1820 | return &bpf_task_pt_regs_proto; |
1821 | case BPF_FUNC_trace_vprintk: |
1822 | return bpf_get_trace_vprintk_proto(); |
1823 | default: |
1824 | return NULL; |
1825 | } |
1826 | } |
1827 | |
1828 | void bpf_list_head_free(const struct btf_field *field, void *list_head, |
1829 | struct bpf_spin_lock *spin_lock) |
1830 | { |
1831 | struct list_head *head = list_head, *orig_head = list_head; |
1832 | |
1833 | BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head)); |
1834 | BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head)); |
1835 | |
1836 | /* Do the actual list draining outside the lock to not hold the lock for |
1837 | * too long, and also prevent deadlocks if tracing programs end up |
1838 | * executing on entry/exit of functions called inside the critical |
1839 | * section, and end up doing map ops that call bpf_list_head_free for |
1840 | * the same map value again. |
1841 | */ |
1842 | __bpf_spin_lock_irqsave(lock: spin_lock); |
1843 | if (!head->next || list_empty(head)) |
1844 | goto unlock; |
1845 | head = head->next; |
1846 | unlock: |
1847 | INIT_LIST_HEAD(list: orig_head); |
1848 | __bpf_spin_unlock_irqrestore(lock: spin_lock); |
1849 | |
1850 | while (head != orig_head) { |
1851 | void *obj = head; |
1852 | |
1853 | obj -= field->graph_root.node_offset; |
1854 | head = head->next; |
1855 | /* The contained type can also have resources, including a |
1856 | * bpf_list_head which needs to be freed. |
1857 | */ |
1858 | migrate_disable(); |
1859 | __bpf_obj_drop_impl(p: obj, rec: field->graph_root.value_rec, percpu: false); |
1860 | migrate_enable(); |
1861 | } |
1862 | } |
1863 | |
1864 | /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are |
1865 | * 'rb_node *', so field name of rb_node within containing struct is not |
1866 | * needed. |
1867 | * |
1868 | * Since bpf_rb_tree's node type has a corresponding struct btf_field with |
1869 | * graph_root.node_offset, it's not necessary to know field name |
1870 | * or type of node struct |
1871 | */ |
1872 | #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \ |
1873 | for (pos = rb_first_postorder(root); \ |
1874 | pos && ({ n = rb_next_postorder(pos); 1; }); \ |
1875 | pos = n) |
1876 | |
1877 | void bpf_rb_root_free(const struct btf_field *field, void *rb_root, |
1878 | struct bpf_spin_lock *spin_lock) |
1879 | { |
1880 | struct rb_root_cached orig_root, *root = rb_root; |
1881 | struct rb_node *pos, *n; |
1882 | void *obj; |
1883 | |
1884 | BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root)); |
1885 | BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root)); |
1886 | |
1887 | __bpf_spin_lock_irqsave(lock: spin_lock); |
1888 | orig_root = *root; |
1889 | *root = RB_ROOT_CACHED; |
1890 | __bpf_spin_unlock_irqrestore(lock: spin_lock); |
1891 | |
1892 | bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) { |
1893 | obj = pos; |
1894 | obj -= field->graph_root.node_offset; |
1895 | |
1896 | |
1897 | migrate_disable(); |
1898 | __bpf_obj_drop_impl(p: obj, rec: field->graph_root.value_rec, percpu: false); |
1899 | migrate_enable(); |
1900 | } |
1901 | } |
1902 | |
1903 | __bpf_kfunc_start_defs(); |
1904 | |
1905 | __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) |
1906 | { |
1907 | struct btf_struct_meta *meta = meta__ign; |
1908 | u64 size = local_type_id__k; |
1909 | void *p; |
1910 | |
1911 | p = bpf_mem_alloc(ma: &bpf_global_ma, size); |
1912 | if (!p) |
1913 | return NULL; |
1914 | if (meta) |
1915 | bpf_obj_init(rec: meta->record, obj: p); |
1916 | return p; |
1917 | } |
1918 | |
1919 | __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign) |
1920 | { |
1921 | u64 size = local_type_id__k; |
1922 | |
1923 | /* The verifier has ensured that meta__ign must be NULL */ |
1924 | return bpf_mem_alloc(ma: &bpf_global_percpu_ma, size); |
1925 | } |
1926 | |
1927 | /* Must be called under migrate_disable(), as required by bpf_mem_free */ |
1928 | void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu) |
1929 | { |
1930 | struct bpf_mem_alloc *ma; |
1931 | |
1932 | if (rec && rec->refcount_off >= 0 && |
1933 | !refcount_dec_and_test(r: (refcount_t *)(p + rec->refcount_off))) { |
1934 | /* Object is refcounted and refcount_dec didn't result in 0 |
1935 | * refcount. Return without freeing the object |
1936 | */ |
1937 | return; |
1938 | } |
1939 | |
1940 | if (rec) |
1941 | bpf_obj_free_fields(rec, obj: p); |
1942 | |
1943 | if (percpu) |
1944 | ma = &bpf_global_percpu_ma; |
1945 | else |
1946 | ma = &bpf_global_ma; |
1947 | bpf_mem_free_rcu(ma, ptr: p); |
1948 | } |
1949 | |
1950 | __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) |
1951 | { |
1952 | struct btf_struct_meta *meta = meta__ign; |
1953 | void *p = p__alloc; |
1954 | |
1955 | __bpf_obj_drop_impl(p, rec: meta ? meta->record : NULL, percpu: false); |
1956 | } |
1957 | |
1958 | __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign) |
1959 | { |
1960 | /* The verifier has ensured that meta__ign must be NULL */ |
1961 | bpf_mem_free_rcu(ma: &bpf_global_percpu_ma, ptr: p__alloc); |
1962 | } |
1963 | |
1964 | __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign) |
1965 | { |
1966 | struct btf_struct_meta *meta = meta__ign; |
1967 | struct bpf_refcount *ref; |
1968 | |
1969 | /* Could just cast directly to refcount_t *, but need some code using |
1970 | * bpf_refcount type so that it is emitted in vmlinux BTF |
1971 | */ |
1972 | ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off); |
1973 | if (!refcount_inc_not_zero(r: (refcount_t *)ref)) |
1974 | return NULL; |
1975 | |
1976 | /* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null |
1977 | * in verifier.c |
1978 | */ |
1979 | return (void *)p__refcounted_kptr; |
1980 | } |
1981 | |
1982 | static int __bpf_list_add(struct bpf_list_node_kern *node, |
1983 | struct bpf_list_head *head, |
1984 | bool tail, struct btf_record *rec, u64 off) |
1985 | { |
1986 | struct list_head *n = &node->list_head, *h = (void *)head; |
1987 | |
1988 | /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't |
1989 | * called on its fields, so init here |
1990 | */ |
1991 | if (unlikely(!h->next)) |
1992 | INIT_LIST_HEAD(list: h); |
1993 | |
1994 | /* node->owner != NULL implies !list_empty(n), no need to separately |
1995 | * check the latter |
1996 | */ |
1997 | if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { |
1998 | /* Only called from BPF prog, no need to migrate_disable */ |
1999 | __bpf_obj_drop_impl(p: (void *)n - off, rec, percpu: false); |
2000 | return -EINVAL; |
2001 | } |
2002 | |
2003 | tail ? list_add_tail(new: n, head: h) : list_add(new: n, head: h); |
2004 | WRITE_ONCE(node->owner, head); |
2005 | |
2006 | return 0; |
2007 | } |
2008 | |
2009 | __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head, |
2010 | struct bpf_list_node *node, |
2011 | void *meta__ign, u64 off) |
2012 | { |
2013 | struct bpf_list_node_kern *n = (void *)node; |
2014 | struct btf_struct_meta *meta = meta__ign; |
2015 | |
2016 | return __bpf_list_add(node: n, head, tail: false, rec: meta ? meta->record : NULL, off); |
2017 | } |
2018 | |
2019 | __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head, |
2020 | struct bpf_list_node *node, |
2021 | void *meta__ign, u64 off) |
2022 | { |
2023 | struct bpf_list_node_kern *n = (void *)node; |
2024 | struct btf_struct_meta *meta = meta__ign; |
2025 | |
2026 | return __bpf_list_add(node: n, head, tail: true, rec: meta ? meta->record : NULL, off); |
2027 | } |
2028 | |
2029 | static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail) |
2030 | { |
2031 | struct list_head *n, *h = (void *)head; |
2032 | struct bpf_list_node_kern *node; |
2033 | |
2034 | /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't |
2035 | * called on its fields, so init here |
2036 | */ |
2037 | if (unlikely(!h->next)) |
2038 | INIT_LIST_HEAD(list: h); |
2039 | if (list_empty(head: h)) |
2040 | return NULL; |
2041 | |
2042 | n = tail ? h->prev : h->next; |
2043 | node = container_of(n, struct bpf_list_node_kern, list_head); |
2044 | if (WARN_ON_ONCE(READ_ONCE(node->owner) != head)) |
2045 | return NULL; |
2046 | |
2047 | list_del_init(entry: n); |
2048 | WRITE_ONCE(node->owner, NULL); |
2049 | return (struct bpf_list_node *)n; |
2050 | } |
2051 | |
2052 | __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) |
2053 | { |
2054 | return __bpf_list_del(head, tail: false); |
2055 | } |
2056 | |
2057 | __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) |
2058 | { |
2059 | return __bpf_list_del(head, tail: true); |
2060 | } |
2061 | |
2062 | __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, |
2063 | struct bpf_rb_node *node) |
2064 | { |
2065 | struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node; |
2066 | struct rb_root_cached *r = (struct rb_root_cached *)root; |
2067 | struct rb_node *n = &node_internal->rb_node; |
2068 | |
2069 | /* node_internal->owner != root implies either RB_EMPTY_NODE(n) or |
2070 | * n is owned by some other tree. No need to check RB_EMPTY_NODE(n) |
2071 | */ |
2072 | if (READ_ONCE(node_internal->owner) != root) |
2073 | return NULL; |
2074 | |
2075 | rb_erase_cached(node: n, root: r); |
2076 | RB_CLEAR_NODE(n); |
2077 | WRITE_ONCE(node_internal->owner, NULL); |
2078 | return (struct bpf_rb_node *)n; |
2079 | } |
2080 | |
2081 | /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF |
2082 | * program |
2083 | */ |
2084 | static int __bpf_rbtree_add(struct bpf_rb_root *root, |
2085 | struct bpf_rb_node_kern *node, |
2086 | void *less, struct btf_record *rec, u64 off) |
2087 | { |
2088 | struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node; |
2089 | struct rb_node *parent = NULL, *n = &node->rb_node; |
2090 | bpf_callback_t cb = (bpf_callback_t)less; |
2091 | bool leftmost = true; |
2092 | |
2093 | /* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately |
2094 | * check the latter |
2095 | */ |
2096 | if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { |
2097 | /* Only called from BPF prog, no need to migrate_disable */ |
2098 | __bpf_obj_drop_impl(p: (void *)n - off, rec, percpu: false); |
2099 | return -EINVAL; |
2100 | } |
2101 | |
2102 | while (*link) { |
2103 | parent = *link; |
2104 | if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) { |
2105 | link = &parent->rb_left; |
2106 | } else { |
2107 | link = &parent->rb_right; |
2108 | leftmost = false; |
2109 | } |
2110 | } |
2111 | |
2112 | rb_link_node(node: n, parent, rb_link: link); |
2113 | rb_insert_color_cached(node: n, root: (struct rb_root_cached *)root, leftmost); |
2114 | WRITE_ONCE(node->owner, root); |
2115 | return 0; |
2116 | } |
2117 | |
2118 | __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, |
2119 | bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), |
2120 | void *meta__ign, u64 off) |
2121 | { |
2122 | struct btf_struct_meta *meta = meta__ign; |
2123 | struct bpf_rb_node_kern *n = (void *)node; |
2124 | |
2125 | return __bpf_rbtree_add(root, node: n, less: (void *)less, rec: meta ? meta->record : NULL, off); |
2126 | } |
2127 | |
2128 | __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) |
2129 | { |
2130 | struct rb_root_cached *r = (struct rb_root_cached *)root; |
2131 | |
2132 | return (struct bpf_rb_node *)rb_first_cached(r); |
2133 | } |
2134 | |
2135 | /** |
2136 | * bpf_task_acquire - Acquire a reference to a task. A task acquired by this |
2137 | * kfunc which is not stored in a map as a kptr, must be released by calling |
2138 | * bpf_task_release(). |
2139 | * @p: The task on which a reference is being acquired. |
2140 | */ |
2141 | __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p) |
2142 | { |
2143 | if (refcount_inc_not_zero(r: &p->rcu_users)) |
2144 | return p; |
2145 | return NULL; |
2146 | } |
2147 | |
2148 | /** |
2149 | * bpf_task_release - Release the reference acquired on a task. |
2150 | * @p: The task on which a reference is being released. |
2151 | */ |
2152 | __bpf_kfunc void bpf_task_release(struct task_struct *p) |
2153 | { |
2154 | put_task_struct_rcu_user(task: p); |
2155 | } |
2156 | |
2157 | __bpf_kfunc void bpf_task_release_dtor(void *p) |
2158 | { |
2159 | put_task_struct_rcu_user(task: p); |
2160 | } |
2161 | CFI_NOSEAL(bpf_task_release_dtor); |
2162 | |
2163 | #ifdef CONFIG_CGROUPS |
2164 | /** |
2165 | * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by |
2166 | * this kfunc which is not stored in a map as a kptr, must be released by |
2167 | * calling bpf_cgroup_release(). |
2168 | * @cgrp: The cgroup on which a reference is being acquired. |
2169 | */ |
2170 | __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) |
2171 | { |
2172 | return cgroup_tryget(cgrp) ? cgrp : NULL; |
2173 | } |
2174 | |
2175 | /** |
2176 | * bpf_cgroup_release - Release the reference acquired on a cgroup. |
2177 | * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to |
2178 | * not be freed until the current grace period has ended, even if its refcount |
2179 | * drops to 0. |
2180 | * @cgrp: The cgroup on which a reference is being released. |
2181 | */ |
2182 | __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp) |
2183 | { |
2184 | cgroup_put(cgrp); |
2185 | } |
2186 | |
2187 | __bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp) |
2188 | { |
2189 | cgroup_put(cgrp); |
2190 | } |
2191 | CFI_NOSEAL(bpf_cgroup_release_dtor); |
2192 | |
2193 | /** |
2194 | * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor |
2195 | * array. A cgroup returned by this kfunc which is not subsequently stored in a |
2196 | * map, must be released by calling bpf_cgroup_release(). |
2197 | * @cgrp: The cgroup for which we're performing a lookup. |
2198 | * @level: The level of ancestor to look up. |
2199 | */ |
2200 | __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) |
2201 | { |
2202 | struct cgroup *ancestor; |
2203 | |
2204 | if (level > cgrp->level || level < 0) |
2205 | return NULL; |
2206 | |
2207 | /* cgrp's refcnt could be 0 here, but ancestors can still be accessed */ |
2208 | ancestor = cgrp->ancestors[level]; |
2209 | if (!cgroup_tryget(cgrp: ancestor)) |
2210 | return NULL; |
2211 | return ancestor; |
2212 | } |
2213 | |
2214 | /** |
2215 | * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this |
2216 | * kfunc which is not subsequently stored in a map, must be released by calling |
2217 | * bpf_cgroup_release(). |
2218 | * @cgid: cgroup id. |
2219 | */ |
2220 | __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid) |
2221 | { |
2222 | struct cgroup *cgrp; |
2223 | |
2224 | cgrp = cgroup_get_from_id(id: cgid); |
2225 | if (IS_ERR(ptr: cgrp)) |
2226 | return NULL; |
2227 | return cgrp; |
2228 | } |
2229 | |
2230 | /** |
2231 | * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test |
2232 | * task's membership of cgroup ancestry. |
2233 | * @task: the task to be tested |
2234 | * @ancestor: possible ancestor of @task's cgroup |
2235 | * |
2236 | * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. |
2237 | * It follows all the same rules as cgroup_is_descendant, and only applies |
2238 | * to the default hierarchy. |
2239 | */ |
2240 | __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task, |
2241 | struct cgroup *ancestor) |
2242 | { |
2243 | long ret; |
2244 | |
2245 | rcu_read_lock(); |
2246 | ret = task_under_cgroup_hierarchy(task, ancestor); |
2247 | rcu_read_unlock(); |
2248 | return ret; |
2249 | } |
2250 | |
2251 | /** |
2252 | * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a |
2253 | * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its |
2254 | * hierarchy ID. |
2255 | * @task: The target task |
2256 | * @hierarchy_id: The ID of a cgroup1 hierarchy |
2257 | * |
2258 | * On success, the cgroup is returen. On failure, NULL is returned. |
2259 | */ |
2260 | __bpf_kfunc struct cgroup * |
2261 | bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) |
2262 | { |
2263 | struct cgroup *cgrp = task_get_cgroup1(tsk: task, hierarchy_id); |
2264 | |
2265 | if (IS_ERR(ptr: cgrp)) |
2266 | return NULL; |
2267 | return cgrp; |
2268 | } |
2269 | #endif /* CONFIG_CGROUPS */ |
2270 | |
2271 | /** |
2272 | * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up |
2273 | * in the root pid namespace idr. If a task is returned, it must either be |
2274 | * stored in a map, or released with bpf_task_release(). |
2275 | * @pid: The pid of the task being looked up. |
2276 | */ |
2277 | __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) |
2278 | { |
2279 | struct task_struct *p; |
2280 | |
2281 | rcu_read_lock(); |
2282 | p = find_task_by_pid_ns(nr: pid, ns: &init_pid_ns); |
2283 | if (p) |
2284 | p = bpf_task_acquire(p); |
2285 | rcu_read_unlock(); |
2286 | |
2287 | return p; |
2288 | } |
2289 | |
2290 | /** |
2291 | * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data. |
2292 | * @ptr: The dynptr whose data slice to retrieve |
2293 | * @offset: Offset into the dynptr |
2294 | * @buffer__opt: User-provided buffer to copy contents into. May be NULL |
2295 | * @buffer__szk: Size (in bytes) of the buffer if present. This is the |
2296 | * length of the requested slice. This must be a constant. |
2297 | * |
2298 | * For non-skb and non-xdp type dynptrs, there is no difference between |
2299 | * bpf_dynptr_slice and bpf_dynptr_data. |
2300 | * |
2301 | * If buffer__opt is NULL, the call will fail if buffer_opt was needed. |
2302 | * |
2303 | * If the intention is to write to the data slice, please use |
2304 | * bpf_dynptr_slice_rdwr. |
2305 | * |
2306 | * The user must check that the returned pointer is not null before using it. |
2307 | * |
2308 | * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice |
2309 | * does not change the underlying packet data pointers, so a call to |
2310 | * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in |
2311 | * the bpf program. |
2312 | * |
2313 | * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only |
2314 | * data slice (can be either direct pointer to the data or a pointer to the user |
2315 | * provided buffer, with its contents containing the data, if unable to obtain |
2316 | * direct pointer) |
2317 | */ |
2318 | __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset, |
2319 | void *buffer__opt, u32 buffer__szk) |
2320 | { |
2321 | enum bpf_dynptr_type type; |
2322 | u32 len = buffer__szk; |
2323 | int err; |
2324 | |
2325 | if (!ptr->data) |
2326 | return NULL; |
2327 | |
2328 | err = bpf_dynptr_check_off_len(ptr, offset, len); |
2329 | if (err) |
2330 | return NULL; |
2331 | |
2332 | type = bpf_dynptr_get_type(ptr); |
2333 | |
2334 | switch (type) { |
2335 | case BPF_DYNPTR_TYPE_LOCAL: |
2336 | case BPF_DYNPTR_TYPE_RINGBUF: |
2337 | return ptr->data + ptr->offset + offset; |
2338 | case BPF_DYNPTR_TYPE_SKB: |
2339 | if (buffer__opt) |
2340 | return skb_header_pointer(skb: ptr->data, offset: ptr->offset + offset, len, buffer: buffer__opt); |
2341 | else |
2342 | return skb_pointer_if_linear(skb: ptr->data, offset: ptr->offset + offset, len); |
2343 | case BPF_DYNPTR_TYPE_XDP: |
2344 | { |
2345 | void *xdp_ptr = bpf_xdp_pointer(xdp: ptr->data, offset: ptr->offset + offset, len); |
2346 | if (!IS_ERR_OR_NULL(ptr: xdp_ptr)) |
2347 | return xdp_ptr; |
2348 | |
2349 | if (!buffer__opt) |
2350 | return NULL; |
2351 | bpf_xdp_copy_buf(xdp: ptr->data, off: ptr->offset + offset, buf: buffer__opt, len, flush: false); |
2352 | return buffer__opt; |
2353 | } |
2354 | default: |
2355 | WARN_ONCE(true, "unknown dynptr type %d\n" , type); |
2356 | return NULL; |
2357 | } |
2358 | } |
2359 | |
2360 | /** |
2361 | * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data. |
2362 | * @ptr: The dynptr whose data slice to retrieve |
2363 | * @offset: Offset into the dynptr |
2364 | * @buffer__opt: User-provided buffer to copy contents into. May be NULL |
2365 | * @buffer__szk: Size (in bytes) of the buffer if present. This is the |
2366 | * length of the requested slice. This must be a constant. |
2367 | * |
2368 | * For non-skb and non-xdp type dynptrs, there is no difference between |
2369 | * bpf_dynptr_slice and bpf_dynptr_data. |
2370 | * |
2371 | * If buffer__opt is NULL, the call will fail if buffer_opt was needed. |
2372 | * |
2373 | * The returned pointer is writable and may point to either directly the dynptr |
2374 | * data at the requested offset or to the buffer if unable to obtain a direct |
2375 | * data pointer to (example: the requested slice is to the paged area of an skb |
2376 | * packet). In the case where the returned pointer is to the buffer, the user |
2377 | * is responsible for persisting writes through calling bpf_dynptr_write(). This |
2378 | * usually looks something like this pattern: |
2379 | * |
2380 | * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer)); |
2381 | * if (!eth) |
2382 | * return TC_ACT_SHOT; |
2383 | * |
2384 | * // mutate eth header // |
2385 | * |
2386 | * if (eth == buffer) |
2387 | * bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0); |
2388 | * |
2389 | * Please note that, as in the example above, the user must check that the |
2390 | * returned pointer is not null before using it. |
2391 | * |
2392 | * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr |
2393 | * does not change the underlying packet data pointers, so a call to |
2394 | * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in |
2395 | * the bpf program. |
2396 | * |
2397 | * Return: NULL if the call failed (eg invalid dynptr), pointer to a |
2398 | * data slice (can be either direct pointer to the data or a pointer to the user |
2399 | * provided buffer, with its contents containing the data, if unable to obtain |
2400 | * direct pointer) |
2401 | */ |
2402 | __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 offset, |
2403 | void *buffer__opt, u32 buffer__szk) |
2404 | { |
2405 | if (!ptr->data || __bpf_dynptr_is_rdonly(ptr)) |
2406 | return NULL; |
2407 | |
2408 | /* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice. |
2409 | * |
2410 | * For skb-type dynptrs, it is safe to write into the returned pointer |
2411 | * if the bpf program allows skb data writes. There are two possiblities |
2412 | * that may occur when calling bpf_dynptr_slice_rdwr: |
2413 | * |
2414 | * 1) The requested slice is in the head of the skb. In this case, the |
2415 | * returned pointer is directly to skb data, and if the skb is cloned, the |
2416 | * verifier will have uncloned it (see bpf_unclone_prologue()) already. |
2417 | * The pointer can be directly written into. |
2418 | * |
2419 | * 2) Some portion of the requested slice is in the paged buffer area. |
2420 | * In this case, the requested data will be copied out into the buffer |
2421 | * and the returned pointer will be a pointer to the buffer. The skb |
2422 | * will not be pulled. To persist the write, the user will need to call |
2423 | * bpf_dynptr_write(), which will pull the skb and commit the write. |
2424 | * |
2425 | * Similarly for xdp programs, if the requested slice is not across xdp |
2426 | * fragments, then a direct pointer will be returned, otherwise the data |
2427 | * will be copied out into the buffer and the user will need to call |
2428 | * bpf_dynptr_write() to commit changes. |
2429 | */ |
2430 | return bpf_dynptr_slice(ptr, offset, buffer__opt, buffer__szk); |
2431 | } |
2432 | |
2433 | __bpf_kfunc int bpf_dynptr_adjust(struct bpf_dynptr_kern *ptr, u32 start, u32 end) |
2434 | { |
2435 | u32 size; |
2436 | |
2437 | if (!ptr->data || start > end) |
2438 | return -EINVAL; |
2439 | |
2440 | size = __bpf_dynptr_size(ptr); |
2441 | |
2442 | if (start > size || end > size) |
2443 | return -ERANGE; |
2444 | |
2445 | ptr->offset += start; |
2446 | bpf_dynptr_set_size(ptr, new_size: end - start); |
2447 | |
2448 | return 0; |
2449 | } |
2450 | |
2451 | __bpf_kfunc bool bpf_dynptr_is_null(struct bpf_dynptr_kern *ptr) |
2452 | { |
2453 | return !ptr->data; |
2454 | } |
2455 | |
2456 | __bpf_kfunc bool bpf_dynptr_is_rdonly(struct bpf_dynptr_kern *ptr) |
2457 | { |
2458 | if (!ptr->data) |
2459 | return false; |
2460 | |
2461 | return __bpf_dynptr_is_rdonly(ptr); |
2462 | } |
2463 | |
2464 | __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr_kern *ptr) |
2465 | { |
2466 | if (!ptr->data) |
2467 | return -EINVAL; |
2468 | |
2469 | return __bpf_dynptr_size(ptr); |
2470 | } |
2471 | |
2472 | __bpf_kfunc int bpf_dynptr_clone(struct bpf_dynptr_kern *ptr, |
2473 | struct bpf_dynptr_kern *clone__uninit) |
2474 | { |
2475 | if (!ptr->data) { |
2476 | bpf_dynptr_set_null(ptr: clone__uninit); |
2477 | return -EINVAL; |
2478 | } |
2479 | |
2480 | *clone__uninit = *ptr; |
2481 | |
2482 | return 0; |
2483 | } |
2484 | |
2485 | __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj) |
2486 | { |
2487 | return obj; |
2488 | } |
2489 | |
2490 | __bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k) |
2491 | { |
2492 | return (void *)obj__ign; |
2493 | } |
2494 | |
2495 | __bpf_kfunc void bpf_rcu_read_lock(void) |
2496 | { |
2497 | rcu_read_lock(); |
2498 | } |
2499 | |
2500 | __bpf_kfunc void bpf_rcu_read_unlock(void) |
2501 | { |
2502 | rcu_read_unlock(); |
2503 | } |
2504 | |
2505 | struct bpf_throw_ctx { |
2506 | struct bpf_prog_aux *aux; |
2507 | u64 sp; |
2508 | u64 bp; |
2509 | int cnt; |
2510 | }; |
2511 | |
2512 | static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp) |
2513 | { |
2514 | struct bpf_throw_ctx *ctx = cookie; |
2515 | struct bpf_prog *prog; |
2516 | |
2517 | if (!is_bpf_text_address(addr: ip)) |
2518 | return !ctx->cnt; |
2519 | prog = bpf_prog_ksym_find(addr: ip); |
2520 | ctx->cnt++; |
2521 | if (bpf_is_subprog(prog)) |
2522 | return true; |
2523 | ctx->aux = prog->aux; |
2524 | ctx->sp = sp; |
2525 | ctx->bp = bp; |
2526 | return false; |
2527 | } |
2528 | |
2529 | __bpf_kfunc void bpf_throw(u64 cookie) |
2530 | { |
2531 | struct bpf_throw_ctx ctx = {}; |
2532 | |
2533 | arch_bpf_stack_walk(consume_fn: bpf_stack_walker, cookie: &ctx); |
2534 | WARN_ON_ONCE(!ctx.aux); |
2535 | if (ctx.aux) |
2536 | WARN_ON_ONCE(!ctx.aux->exception_boundary); |
2537 | WARN_ON_ONCE(!ctx.bp); |
2538 | WARN_ON_ONCE(!ctx.cnt); |
2539 | /* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning |
2540 | * deeper stack depths than ctx.sp as we do not return from bpf_throw, |
2541 | * which skips compiler generated instrumentation to do the same. |
2542 | */ |
2543 | kasan_unpoison_task_stack_below(watermark: (void *)(long)ctx.sp); |
2544 | ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0); |
2545 | WARN(1, "A call to BPF exception callback should never return\n" ); |
2546 | } |
2547 | |
2548 | __bpf_kfunc_end_defs(); |
2549 | |
2550 | BTF_KFUNCS_START(generic_btf_ids) |
2551 | #ifdef CONFIG_CRASH_DUMP |
2552 | BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE) |
2553 | #endif |
2554 | BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) |
2555 | BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) |
2556 | BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE) |
2557 | BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE) |
2558 | BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU) |
2559 | BTF_ID_FLAGS(func, bpf_list_push_front_impl) |
2560 | BTF_ID_FLAGS(func, bpf_list_push_back_impl) |
2561 | BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) |
2562 | BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) |
2563 | BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) |
2564 | BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) |
2565 | BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL) |
2566 | BTF_ID_FLAGS(func, bpf_rbtree_add_impl) |
2567 | BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL) |
2568 | |
2569 | #ifdef CONFIG_CGROUPS |
2570 | BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) |
2571 | BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE) |
2572 | BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL) |
2573 | BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL) |
2574 | BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU) |
2575 | BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL) |
2576 | #endif |
2577 | BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL) |
2578 | BTF_ID_FLAGS(func, bpf_throw) |
2579 | BTF_KFUNCS_END(generic_btf_ids) |
2580 | |
2581 | static const struct btf_kfunc_id_set generic_kfunc_set = { |
2582 | .owner = THIS_MODULE, |
2583 | .set = &generic_btf_ids, |
2584 | }; |
2585 | |
2586 | |
2587 | BTF_ID_LIST(generic_dtor_ids) |
2588 | BTF_ID(struct, task_struct) |
2589 | BTF_ID(func, bpf_task_release_dtor) |
2590 | #ifdef CONFIG_CGROUPS |
2591 | BTF_ID(struct, cgroup) |
2592 | BTF_ID(func, bpf_cgroup_release_dtor) |
2593 | #endif |
2594 | |
2595 | BTF_KFUNCS_START(common_btf_ids) |
2596 | BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx) |
2597 | BTF_ID_FLAGS(func, bpf_rdonly_cast) |
2598 | BTF_ID_FLAGS(func, bpf_rcu_read_lock) |
2599 | BTF_ID_FLAGS(func, bpf_rcu_read_unlock) |
2600 | BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL) |
2601 | BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL) |
2602 | BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW) |
2603 | BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL) |
2604 | BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY) |
2605 | BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU) |
2606 | BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL) |
2607 | BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY) |
2608 | #ifdef CONFIG_CGROUPS |
2609 | BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS) |
2610 | BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL) |
2611 | BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY) |
2612 | BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) |
2613 | BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL) |
2614 | BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY) |
2615 | #endif |
2616 | BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) |
2617 | BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL) |
2618 | BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY) |
2619 | BTF_ID_FLAGS(func, bpf_dynptr_adjust) |
2620 | BTF_ID_FLAGS(func, bpf_dynptr_is_null) |
2621 | BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly) |
2622 | BTF_ID_FLAGS(func, bpf_dynptr_size) |
2623 | BTF_ID_FLAGS(func, bpf_dynptr_clone) |
2624 | BTF_KFUNCS_END(common_btf_ids) |
2625 | |
2626 | static const struct btf_kfunc_id_set common_kfunc_set = { |
2627 | .owner = THIS_MODULE, |
2628 | .set = &common_btf_ids, |
2629 | }; |
2630 | |
2631 | static int __init kfunc_init(void) |
2632 | { |
2633 | int ret; |
2634 | const struct btf_id_dtor_kfunc generic_dtors[] = { |
2635 | { |
2636 | .btf_id = generic_dtor_ids[0], |
2637 | .kfunc_btf_id = generic_dtor_ids[1] |
2638 | }, |
2639 | #ifdef CONFIG_CGROUPS |
2640 | { |
2641 | .btf_id = generic_dtor_ids[2], |
2642 | .kfunc_btf_id = generic_dtor_ids[3] |
2643 | }, |
2644 | #endif |
2645 | }; |
2646 | |
2647 | ret = register_btf_kfunc_id_set(prog_type: BPF_PROG_TYPE_TRACING, s: &generic_kfunc_set); |
2648 | ret = ret ?: register_btf_kfunc_id_set(prog_type: BPF_PROG_TYPE_SCHED_CLS, s: &generic_kfunc_set); |
2649 | ret = ret ?: register_btf_kfunc_id_set(prog_type: BPF_PROG_TYPE_XDP, s: &generic_kfunc_set); |
2650 | ret = ret ?: register_btf_kfunc_id_set(prog_type: BPF_PROG_TYPE_STRUCT_OPS, s: &generic_kfunc_set); |
2651 | ret = ret ?: register_btf_id_dtor_kfuncs(dtors: generic_dtors, |
2652 | ARRAY_SIZE(generic_dtors), |
2653 | THIS_MODULE); |
2654 | return ret ?: register_btf_kfunc_id_set(prog_type: BPF_PROG_TYPE_UNSPEC, s: &common_kfunc_set); |
2655 | } |
2656 | |
2657 | late_initcall(kfunc_init); |
2658 | |
2659 | /* Get a pointer to dynptr data up to len bytes for read only access. If |
2660 | * the dynptr doesn't have continuous data up to len bytes, return NULL. |
2661 | */ |
2662 | const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len) |
2663 | { |
2664 | return bpf_dynptr_slice(ptr, offset: 0, NULL, buffer__szk: len); |
2665 | } |
2666 | |
2667 | /* Get a pointer to dynptr data up to len bytes for read write access. If |
2668 | * the dynptr doesn't have continuous data up to len bytes, or the dynptr |
2669 | * is read only, return NULL. |
2670 | */ |
2671 | void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len) |
2672 | { |
2673 | if (__bpf_dynptr_is_rdonly(ptr)) |
2674 | return NULL; |
2675 | return (void *)__bpf_dynptr_data(ptr, len); |
2676 | } |
2677 | |