1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com |
3 | * Copyright (c) 2016 Facebook |
4 | */ |
5 | #include <linux/kernel.h> |
6 | #include <linux/types.h> |
7 | #include <linux/slab.h> |
8 | #include <linux/bpf.h> |
9 | #include <linux/bpf_verifier.h> |
10 | #include <linux/bpf_perf_event.h> |
11 | #include <linux/btf.h> |
12 | #include <linux/filter.h> |
13 | #include <linux/uaccess.h> |
14 | #include <linux/ctype.h> |
15 | #include <linux/kprobes.h> |
16 | #include <linux/spinlock.h> |
17 | #include <linux/syscalls.h> |
18 | #include <linux/error-injection.h> |
19 | #include <linux/btf_ids.h> |
20 | #include <linux/bpf_lsm.h> |
21 | #include <linux/fprobe.h> |
22 | #include <linux/bsearch.h> |
23 | #include <linux/sort.h> |
24 | #include <linux/key.h> |
25 | #include <linux/verification.h> |
26 | #include <linux/namei.h> |
27 | #include <linux/fileattr.h> |
28 | |
29 | #include <net/bpf_sk_storage.h> |
30 | |
31 | #include <uapi/linux/bpf.h> |
32 | #include <uapi/linux/btf.h> |
33 | |
34 | #include <asm/tlb.h> |
35 | |
36 | #include "trace_probe.h" |
37 | #include "trace.h" |
38 | |
39 | #define CREATE_TRACE_POINTS |
40 | #include "bpf_trace.h" |
41 | |
42 | #define bpf_event_rcu_dereference(p) \ |
43 | rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) |
44 | |
45 | #define MAX_UPROBE_MULTI_CNT (1U << 20) |
46 | #define MAX_KPROBE_MULTI_CNT (1U << 20) |
47 | |
48 | #ifdef CONFIG_MODULES |
49 | struct bpf_trace_module { |
50 | struct module *module; |
51 | struct list_head list; |
52 | }; |
53 | |
54 | static LIST_HEAD(bpf_trace_modules); |
55 | static DEFINE_MUTEX(bpf_module_mutex); |
56 | |
57 | static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) |
58 | { |
59 | struct bpf_raw_event_map *btp, *ret = NULL; |
60 | struct bpf_trace_module *btm; |
61 | unsigned int i; |
62 | |
63 | mutex_lock(&bpf_module_mutex); |
64 | list_for_each_entry(btm, &bpf_trace_modules, list) { |
65 | for (i = 0; i < btm->module->num_bpf_raw_events; ++i) { |
66 | btp = &btm->module->bpf_raw_events[i]; |
67 | if (!strcmp(btp->tp->name, name)) { |
68 | if (try_module_get(module: btm->module)) |
69 | ret = btp; |
70 | goto out; |
71 | } |
72 | } |
73 | } |
74 | out: |
75 | mutex_unlock(lock: &bpf_module_mutex); |
76 | return ret; |
77 | } |
78 | #else |
79 | static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) |
80 | { |
81 | return NULL; |
82 | } |
83 | #endif /* CONFIG_MODULES */ |
84 | |
85 | u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
86 | u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
87 | |
88 | static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, |
89 | u64 flags, const struct btf **btf, |
90 | s32 *btf_id); |
91 | static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx); |
92 | static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx); |
93 | |
94 | static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx); |
95 | static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx); |
96 | |
97 | /** |
98 | * trace_call_bpf - invoke BPF program |
99 | * @call: tracepoint event |
100 | * @ctx: opaque context pointer |
101 | * |
102 | * kprobe handlers execute BPF programs via this helper. |
103 | * Can be used from static tracepoints in the future. |
104 | * |
105 | * Return: BPF programs always return an integer which is interpreted by |
106 | * kprobe handler as: |
107 | * 0 - return from kprobe (event is filtered out) |
108 | * 1 - store kprobe event into ring buffer |
109 | * Other values are reserved and currently alias to 1 |
110 | */ |
111 | unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) |
112 | { |
113 | unsigned int ret; |
114 | |
115 | cant_sleep(); |
116 | |
117 | if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { |
118 | /* |
119 | * since some bpf program is already running on this cpu, |
120 | * don't call into another bpf program (same or different) |
121 | * and don't send kprobe event into ring-buffer, |
122 | * so return zero here |
123 | */ |
124 | rcu_read_lock(); |
125 | bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array)); |
126 | rcu_read_unlock(); |
127 | ret = 0; |
128 | goto out; |
129 | } |
130 | |
131 | /* |
132 | * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock |
133 | * to all call sites, we did a bpf_prog_array_valid() there to check |
134 | * whether call->prog_array is empty or not, which is |
135 | * a heuristic to speed up execution. |
136 | * |
137 | * If bpf_prog_array_valid() fetched prog_array was |
138 | * non-NULL, we go into trace_call_bpf() and do the actual |
139 | * proper rcu_dereference() under RCU lock. |
140 | * If it turns out that prog_array is NULL then, we bail out. |
141 | * For the opposite, if the bpf_prog_array_valid() fetched pointer |
142 | * was NULL, you'll skip the prog_array with the risk of missing |
143 | * out of events when it was updated in between this and the |
144 | * rcu_dereference() which is accepted risk. |
145 | */ |
146 | rcu_read_lock(); |
147 | ret = bpf_prog_run_array(rcu_dereference(call->prog_array), |
148 | ctx, run_prog: bpf_prog_run); |
149 | rcu_read_unlock(); |
150 | |
151 | out: |
152 | __this_cpu_dec(bpf_prog_active); |
153 | |
154 | return ret; |
155 | } |
156 | |
157 | #ifdef CONFIG_BPF_KPROBE_OVERRIDE |
158 | BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) |
159 | { |
160 | regs_set_return_value(regs, rc); |
161 | override_function_with_return(regs); |
162 | return 0; |
163 | } |
164 | |
165 | static const struct bpf_func_proto bpf_override_return_proto = { |
166 | .func = bpf_override_return, |
167 | .gpl_only = true, |
168 | .ret_type = RET_INTEGER, |
169 | .arg1_type = ARG_PTR_TO_CTX, |
170 | .arg2_type = ARG_ANYTHING, |
171 | }; |
172 | #endif |
173 | |
174 | static __always_inline int |
175 | bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr) |
176 | { |
177 | int ret; |
178 | |
179 | ret = copy_from_user_nofault(dst, src: unsafe_ptr, size); |
180 | if (unlikely(ret < 0)) |
181 | memset(dst, 0, size); |
182 | return ret; |
183 | } |
184 | |
185 | BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, |
186 | const void __user *, unsafe_ptr) |
187 | { |
188 | return bpf_probe_read_user_common(dst, size, unsafe_ptr); |
189 | } |
190 | |
191 | const struct bpf_func_proto bpf_probe_read_user_proto = { |
192 | .func = bpf_probe_read_user, |
193 | .gpl_only = true, |
194 | .ret_type = RET_INTEGER, |
195 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
196 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
197 | .arg3_type = ARG_ANYTHING, |
198 | }; |
199 | |
200 | static __always_inline int |
201 | bpf_probe_read_user_str_common(void *dst, u32 size, |
202 | const void __user *unsafe_ptr) |
203 | { |
204 | int ret; |
205 | |
206 | /* |
207 | * NB: We rely on strncpy_from_user() not copying junk past the NUL |
208 | * terminator into `dst`. |
209 | * |
210 | * strncpy_from_user() does long-sized strides in the fast path. If the |
211 | * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`, |
212 | * then there could be junk after the NUL in `dst`. If user takes `dst` |
213 | * and keys a hash map with it, then semantically identical strings can |
214 | * occupy multiple entries in the map. |
215 | */ |
216 | ret = strncpy_from_user_nofault(dst, unsafe_addr: unsafe_ptr, count: size); |
217 | if (unlikely(ret < 0)) |
218 | memset(dst, 0, size); |
219 | return ret; |
220 | } |
221 | |
222 | BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, |
223 | const void __user *, unsafe_ptr) |
224 | { |
225 | return bpf_probe_read_user_str_common(dst, size, unsafe_ptr); |
226 | } |
227 | |
228 | const struct bpf_func_proto bpf_probe_read_user_str_proto = { |
229 | .func = bpf_probe_read_user_str, |
230 | .gpl_only = true, |
231 | .ret_type = RET_INTEGER, |
232 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
233 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
234 | .arg3_type = ARG_ANYTHING, |
235 | }; |
236 | |
237 | BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, |
238 | const void *, unsafe_ptr) |
239 | { |
240 | return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); |
241 | } |
242 | |
243 | const struct bpf_func_proto bpf_probe_read_kernel_proto = { |
244 | .func = bpf_probe_read_kernel, |
245 | .gpl_only = true, |
246 | .ret_type = RET_INTEGER, |
247 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
248 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
249 | .arg3_type = ARG_ANYTHING, |
250 | }; |
251 | |
252 | static __always_inline int |
253 | bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) |
254 | { |
255 | int ret; |
256 | |
257 | /* |
258 | * The strncpy_from_kernel_nofault() call will likely not fill the |
259 | * entire buffer, but that's okay in this circumstance as we're probing |
260 | * arbitrary memory anyway similar to bpf_probe_read_*() and might |
261 | * as well probe the stack. Thus, memory is explicitly cleared |
262 | * only in error case, so that improper users ignoring return |
263 | * code altogether don't copy garbage; otherwise length of string |
264 | * is returned that can be used for bpf_perf_event_output() et al. |
265 | */ |
266 | ret = strncpy_from_kernel_nofault(dst, unsafe_addr: unsafe_ptr, count: size); |
267 | if (unlikely(ret < 0)) |
268 | memset(dst, 0, size); |
269 | return ret; |
270 | } |
271 | |
272 | BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, |
273 | const void *, unsafe_ptr) |
274 | { |
275 | return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); |
276 | } |
277 | |
278 | const struct bpf_func_proto bpf_probe_read_kernel_str_proto = { |
279 | .func = bpf_probe_read_kernel_str, |
280 | .gpl_only = true, |
281 | .ret_type = RET_INTEGER, |
282 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
283 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
284 | .arg3_type = ARG_ANYTHING, |
285 | }; |
286 | |
287 | #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE |
288 | BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, |
289 | const void *, unsafe_ptr) |
290 | { |
291 | if ((unsigned long)unsafe_ptr < TASK_SIZE) { |
292 | return bpf_probe_read_user_common(dst, size, |
293 | unsafe_ptr: (__force void __user *)unsafe_ptr); |
294 | } |
295 | return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); |
296 | } |
297 | |
298 | static const struct bpf_func_proto bpf_probe_read_compat_proto = { |
299 | .func = bpf_probe_read_compat, |
300 | .gpl_only = true, |
301 | .ret_type = RET_INTEGER, |
302 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
303 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
304 | .arg3_type = ARG_ANYTHING, |
305 | }; |
306 | |
307 | BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size, |
308 | const void *, unsafe_ptr) |
309 | { |
310 | if ((unsigned long)unsafe_ptr < TASK_SIZE) { |
311 | return bpf_probe_read_user_str_common(dst, size, |
312 | unsafe_ptr: (__force void __user *)unsafe_ptr); |
313 | } |
314 | return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); |
315 | } |
316 | |
317 | static const struct bpf_func_proto bpf_probe_read_compat_str_proto = { |
318 | .func = bpf_probe_read_compat_str, |
319 | .gpl_only = true, |
320 | .ret_type = RET_INTEGER, |
321 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
322 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
323 | .arg3_type = ARG_ANYTHING, |
324 | }; |
325 | #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ |
326 | |
327 | BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, |
328 | u32, size) |
329 | { |
330 | /* |
331 | * Ensure we're in user context which is safe for the helper to |
332 | * run. This helper has no business in a kthread. |
333 | * |
334 | * access_ok() should prevent writing to non-user memory, but in |
335 | * some situations (nommu, temporary switch, etc) access_ok() does |
336 | * not provide enough validation, hence the check on KERNEL_DS. |
337 | * |
338 | * nmi_uaccess_okay() ensures the probe is not run in an interim |
339 | * state, when the task or mm are switched. This is specifically |
340 | * required to prevent the use of temporary mm. |
341 | */ |
342 | |
343 | if (unlikely(in_interrupt() || |
344 | current->flags & (PF_KTHREAD | PF_EXITING))) |
345 | return -EPERM; |
346 | if (unlikely(!nmi_uaccess_okay())) |
347 | return -EPERM; |
348 | |
349 | return copy_to_user_nofault(dst: unsafe_ptr, src, size); |
350 | } |
351 | |
352 | static const struct bpf_func_proto bpf_probe_write_user_proto = { |
353 | .func = bpf_probe_write_user, |
354 | .gpl_only = true, |
355 | .ret_type = RET_INTEGER, |
356 | .arg1_type = ARG_ANYTHING, |
357 | .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
358 | .arg3_type = ARG_CONST_SIZE, |
359 | }; |
360 | |
361 | static const struct bpf_func_proto *bpf_get_probe_write_proto(void) |
362 | { |
363 | if (!capable(CAP_SYS_ADMIN)) |
364 | return NULL; |
365 | |
366 | pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!" , |
367 | current->comm, task_pid_nr(current)); |
368 | |
369 | return &bpf_probe_write_user_proto; |
370 | } |
371 | |
372 | #define MAX_TRACE_PRINTK_VARARGS 3 |
373 | #define BPF_TRACE_PRINTK_SIZE 1024 |
374 | |
375 | BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, |
376 | u64, arg2, u64, arg3) |
377 | { |
378 | u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 }; |
379 | struct bpf_bprintf_data data = { |
380 | .get_bin_args = true, |
381 | .get_buf = true, |
382 | }; |
383 | int ret; |
384 | |
385 | ret = bpf_bprintf_prepare(fmt, fmt_size, raw_args: args, |
386 | MAX_TRACE_PRINTK_VARARGS, data: &data); |
387 | if (ret < 0) |
388 | return ret; |
389 | |
390 | ret = bstr_printf(buf: data.buf, MAX_BPRINTF_BUF, fmt, bin_buf: data.bin_args); |
391 | |
392 | trace_bpf_trace_printk(bpf_string: data.buf); |
393 | |
394 | bpf_bprintf_cleanup(data: &data); |
395 | |
396 | return ret; |
397 | } |
398 | |
399 | static const struct bpf_func_proto bpf_trace_printk_proto = { |
400 | .func = bpf_trace_printk, |
401 | .gpl_only = true, |
402 | .ret_type = RET_INTEGER, |
403 | .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
404 | .arg2_type = ARG_CONST_SIZE, |
405 | }; |
406 | |
407 | static void __set_printk_clr_event(void) |
408 | { |
409 | /* |
410 | * This program might be calling bpf_trace_printk, |
411 | * so enable the associated bpf_trace/bpf_trace_printk event. |
412 | * Repeat this each time as it is possible a user has |
413 | * disabled bpf_trace_printk events. By loading a program |
414 | * calling bpf_trace_printk() however the user has expressed |
415 | * the intent to see such events. |
416 | */ |
417 | if (trace_set_clr_event(system: "bpf_trace" , event: "bpf_trace_printk" , set: 1)) |
418 | pr_warn_ratelimited("could not enable bpf_trace_printk events" ); |
419 | } |
420 | |
421 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void) |
422 | { |
423 | __set_printk_clr_event(); |
424 | return &bpf_trace_printk_proto; |
425 | } |
426 | |
427 | BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args, |
428 | u32, data_len) |
429 | { |
430 | struct bpf_bprintf_data data = { |
431 | .get_bin_args = true, |
432 | .get_buf = true, |
433 | }; |
434 | int ret, num_args; |
435 | |
436 | if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || |
437 | (data_len && !args)) |
438 | return -EINVAL; |
439 | num_args = data_len / 8; |
440 | |
441 | ret = bpf_bprintf_prepare(fmt, fmt_size, raw_args: args, num_args, data: &data); |
442 | if (ret < 0) |
443 | return ret; |
444 | |
445 | ret = bstr_printf(buf: data.buf, MAX_BPRINTF_BUF, fmt, bin_buf: data.bin_args); |
446 | |
447 | trace_bpf_trace_printk(bpf_string: data.buf); |
448 | |
449 | bpf_bprintf_cleanup(data: &data); |
450 | |
451 | return ret; |
452 | } |
453 | |
454 | static const struct bpf_func_proto bpf_trace_vprintk_proto = { |
455 | .func = bpf_trace_vprintk, |
456 | .gpl_only = true, |
457 | .ret_type = RET_INTEGER, |
458 | .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
459 | .arg2_type = ARG_CONST_SIZE, |
460 | .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, |
461 | .arg4_type = ARG_CONST_SIZE_OR_ZERO, |
462 | }; |
463 | |
464 | const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void) |
465 | { |
466 | __set_printk_clr_event(); |
467 | return &bpf_trace_vprintk_proto; |
468 | } |
469 | |
470 | BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, |
471 | const void *, args, u32, data_len) |
472 | { |
473 | struct bpf_bprintf_data data = { |
474 | .get_bin_args = true, |
475 | }; |
476 | int err, num_args; |
477 | |
478 | if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || |
479 | (data_len && !args)) |
480 | return -EINVAL; |
481 | num_args = data_len / 8; |
482 | |
483 | err = bpf_bprintf_prepare(fmt, fmt_size, raw_args: args, num_args, data: &data); |
484 | if (err < 0) |
485 | return err; |
486 | |
487 | seq_bprintf(m, f: fmt, binary: data.bin_args); |
488 | |
489 | bpf_bprintf_cleanup(data: &data); |
490 | |
491 | return seq_has_overflowed(m) ? -EOVERFLOW : 0; |
492 | } |
493 | |
494 | BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file) |
495 | |
496 | static const struct bpf_func_proto bpf_seq_printf_proto = { |
497 | .func = bpf_seq_printf, |
498 | .gpl_only = true, |
499 | .ret_type = RET_INTEGER, |
500 | .arg1_type = ARG_PTR_TO_BTF_ID, |
501 | .arg1_btf_id = &btf_seq_file_ids[0], |
502 | .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
503 | .arg3_type = ARG_CONST_SIZE, |
504 | .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, |
505 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
506 | }; |
507 | |
508 | BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) |
509 | { |
510 | return seq_write(seq: m, data, len) ? -EOVERFLOW : 0; |
511 | } |
512 | |
513 | static const struct bpf_func_proto bpf_seq_write_proto = { |
514 | .func = bpf_seq_write, |
515 | .gpl_only = true, |
516 | .ret_type = RET_INTEGER, |
517 | .arg1_type = ARG_PTR_TO_BTF_ID, |
518 | .arg1_btf_id = &btf_seq_file_ids[0], |
519 | .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
520 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, |
521 | }; |
522 | |
523 | BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr, |
524 | u32, btf_ptr_size, u64, flags) |
525 | { |
526 | const struct btf *btf; |
527 | s32 btf_id; |
528 | int ret; |
529 | |
530 | ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, btf: &btf, btf_id: &btf_id); |
531 | if (ret) |
532 | return ret; |
533 | |
534 | return btf_type_seq_show_flags(btf, type_id: btf_id, obj: ptr->ptr, m, flags); |
535 | } |
536 | |
537 | static const struct bpf_func_proto bpf_seq_printf_btf_proto = { |
538 | .func = bpf_seq_printf_btf, |
539 | .gpl_only = true, |
540 | .ret_type = RET_INTEGER, |
541 | .arg1_type = ARG_PTR_TO_BTF_ID, |
542 | .arg1_btf_id = &btf_seq_file_ids[0], |
543 | .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
544 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, |
545 | .arg4_type = ARG_ANYTHING, |
546 | }; |
547 | |
548 | static __always_inline int |
549 | get_map_perf_counter(struct bpf_map *map, u64 flags, |
550 | u64 *value, u64 *enabled, u64 *running) |
551 | { |
552 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
553 | unsigned int cpu = smp_processor_id(); |
554 | u64 index = flags & BPF_F_INDEX_MASK; |
555 | struct bpf_event_entry *ee; |
556 | |
557 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) |
558 | return -EINVAL; |
559 | if (index == BPF_F_CURRENT_CPU) |
560 | index = cpu; |
561 | if (unlikely(index >= array->map.max_entries)) |
562 | return -E2BIG; |
563 | |
564 | ee = READ_ONCE(array->ptrs[index]); |
565 | if (!ee) |
566 | return -ENOENT; |
567 | |
568 | return perf_event_read_local(event: ee->event, value, enabled, running); |
569 | } |
570 | |
571 | BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) |
572 | { |
573 | u64 value = 0; |
574 | int err; |
575 | |
576 | err = get_map_perf_counter(map, flags, value: &value, NULL, NULL); |
577 | /* |
578 | * this api is ugly since we miss [-22..-2] range of valid |
579 | * counter values, but that's uapi |
580 | */ |
581 | if (err) |
582 | return err; |
583 | return value; |
584 | } |
585 | |
586 | static const struct bpf_func_proto bpf_perf_event_read_proto = { |
587 | .func = bpf_perf_event_read, |
588 | .gpl_only = true, |
589 | .ret_type = RET_INTEGER, |
590 | .arg1_type = ARG_CONST_MAP_PTR, |
591 | .arg2_type = ARG_ANYTHING, |
592 | }; |
593 | |
594 | BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, |
595 | struct bpf_perf_event_value *, buf, u32, size) |
596 | { |
597 | int err = -EINVAL; |
598 | |
599 | if (unlikely(size != sizeof(struct bpf_perf_event_value))) |
600 | goto clear; |
601 | err = get_map_perf_counter(map, flags, value: &buf->counter, enabled: &buf->enabled, |
602 | running: &buf->running); |
603 | if (unlikely(err)) |
604 | goto clear; |
605 | return 0; |
606 | clear: |
607 | memset(buf, 0, size); |
608 | return err; |
609 | } |
610 | |
611 | static const struct bpf_func_proto bpf_perf_event_read_value_proto = { |
612 | .func = bpf_perf_event_read_value, |
613 | .gpl_only = true, |
614 | .ret_type = RET_INTEGER, |
615 | .arg1_type = ARG_CONST_MAP_PTR, |
616 | .arg2_type = ARG_ANYTHING, |
617 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, |
618 | .arg4_type = ARG_CONST_SIZE, |
619 | }; |
620 | |
621 | static __always_inline u64 |
622 | __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, |
623 | u64 flags, struct perf_sample_data *sd) |
624 | { |
625 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
626 | unsigned int cpu = smp_processor_id(); |
627 | u64 index = flags & BPF_F_INDEX_MASK; |
628 | struct bpf_event_entry *ee; |
629 | struct perf_event *event; |
630 | |
631 | if (index == BPF_F_CURRENT_CPU) |
632 | index = cpu; |
633 | if (unlikely(index >= array->map.max_entries)) |
634 | return -E2BIG; |
635 | |
636 | ee = READ_ONCE(array->ptrs[index]); |
637 | if (!ee) |
638 | return -ENOENT; |
639 | |
640 | event = ee->event; |
641 | if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || |
642 | event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) |
643 | return -EINVAL; |
644 | |
645 | if (unlikely(event->oncpu != cpu)) |
646 | return -EOPNOTSUPP; |
647 | |
648 | return perf_event_output(event, data: sd, regs); |
649 | } |
650 | |
651 | /* |
652 | * Support executing tracepoints in normal, irq, and nmi context that each call |
653 | * bpf_perf_event_output |
654 | */ |
655 | struct bpf_trace_sample_data { |
656 | struct perf_sample_data sds[3]; |
657 | }; |
658 | |
659 | static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds); |
660 | static DEFINE_PER_CPU(int, bpf_trace_nest_level); |
661 | BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, |
662 | u64, flags, void *, data, u64, size) |
663 | { |
664 | struct bpf_trace_sample_data *sds; |
665 | struct perf_raw_record raw = { |
666 | .frag = { |
667 | .size = size, |
668 | .data = data, |
669 | }, |
670 | }; |
671 | struct perf_sample_data *sd; |
672 | int nest_level, err; |
673 | |
674 | preempt_disable(); |
675 | sds = this_cpu_ptr(&bpf_trace_sds); |
676 | nest_level = this_cpu_inc_return(bpf_trace_nest_level); |
677 | |
678 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { |
679 | err = -EBUSY; |
680 | goto out; |
681 | } |
682 | |
683 | sd = &sds->sds[nest_level - 1]; |
684 | |
685 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) { |
686 | err = -EINVAL; |
687 | goto out; |
688 | } |
689 | |
690 | perf_sample_data_init(data: sd, addr: 0, period: 0); |
691 | perf_sample_save_raw_data(data: sd, raw: &raw); |
692 | |
693 | err = __bpf_perf_event_output(regs, map, flags, sd); |
694 | out: |
695 | this_cpu_dec(bpf_trace_nest_level); |
696 | preempt_enable(); |
697 | return err; |
698 | } |
699 | |
700 | static const struct bpf_func_proto bpf_perf_event_output_proto = { |
701 | .func = bpf_perf_event_output, |
702 | .gpl_only = true, |
703 | .ret_type = RET_INTEGER, |
704 | .arg1_type = ARG_PTR_TO_CTX, |
705 | .arg2_type = ARG_CONST_MAP_PTR, |
706 | .arg3_type = ARG_ANYTHING, |
707 | .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
708 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
709 | }; |
710 | |
711 | static DEFINE_PER_CPU(int, bpf_event_output_nest_level); |
712 | struct bpf_nested_pt_regs { |
713 | struct pt_regs regs[3]; |
714 | }; |
715 | static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs); |
716 | static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); |
717 | |
718 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, |
719 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) |
720 | { |
721 | struct perf_raw_frag frag = { |
722 | .copy = ctx_copy, |
723 | .size = ctx_size, |
724 | .data = ctx, |
725 | }; |
726 | struct perf_raw_record raw = { |
727 | .frag = { |
728 | { |
729 | .next = ctx_size ? &frag : NULL, |
730 | }, |
731 | .size = meta_size, |
732 | .data = meta, |
733 | }, |
734 | }; |
735 | struct perf_sample_data *sd; |
736 | struct pt_regs *regs; |
737 | int nest_level; |
738 | u64 ret; |
739 | |
740 | preempt_disable(); |
741 | nest_level = this_cpu_inc_return(bpf_event_output_nest_level); |
742 | |
743 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { |
744 | ret = -EBUSY; |
745 | goto out; |
746 | } |
747 | sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); |
748 | regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); |
749 | |
750 | perf_fetch_caller_regs(regs); |
751 | perf_sample_data_init(data: sd, addr: 0, period: 0); |
752 | perf_sample_save_raw_data(data: sd, raw: &raw); |
753 | |
754 | ret = __bpf_perf_event_output(regs, map, flags, sd); |
755 | out: |
756 | this_cpu_dec(bpf_event_output_nest_level); |
757 | preempt_enable(); |
758 | return ret; |
759 | } |
760 | |
761 | BPF_CALL_0(bpf_get_current_task) |
762 | { |
763 | return (long) current; |
764 | } |
765 | |
766 | const struct bpf_func_proto bpf_get_current_task_proto = { |
767 | .func = bpf_get_current_task, |
768 | .gpl_only = true, |
769 | .ret_type = RET_INTEGER, |
770 | }; |
771 | |
772 | BPF_CALL_0(bpf_get_current_task_btf) |
773 | { |
774 | return (unsigned long) current; |
775 | } |
776 | |
777 | const struct bpf_func_proto bpf_get_current_task_btf_proto = { |
778 | .func = bpf_get_current_task_btf, |
779 | .gpl_only = true, |
780 | .ret_type = RET_PTR_TO_BTF_ID_TRUSTED, |
781 | .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], |
782 | }; |
783 | |
784 | BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task) |
785 | { |
786 | return (unsigned long) task_pt_regs(task); |
787 | } |
788 | |
789 | BTF_ID_LIST(bpf_task_pt_regs_ids) |
790 | BTF_ID(struct, pt_regs) |
791 | |
792 | const struct bpf_func_proto bpf_task_pt_regs_proto = { |
793 | .func = bpf_task_pt_regs, |
794 | .gpl_only = true, |
795 | .arg1_type = ARG_PTR_TO_BTF_ID, |
796 | .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], |
797 | .ret_type = RET_PTR_TO_BTF_ID, |
798 | .ret_btf_id = &bpf_task_pt_regs_ids[0], |
799 | }; |
800 | |
801 | BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) |
802 | { |
803 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
804 | struct cgroup *cgrp; |
805 | |
806 | if (unlikely(idx >= array->map.max_entries)) |
807 | return -E2BIG; |
808 | |
809 | cgrp = READ_ONCE(array->ptrs[idx]); |
810 | if (unlikely(!cgrp)) |
811 | return -EAGAIN; |
812 | |
813 | return task_under_cgroup_hierarchy(current, ancestor: cgrp); |
814 | } |
815 | |
816 | static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { |
817 | .func = bpf_current_task_under_cgroup, |
818 | .gpl_only = false, |
819 | .ret_type = RET_INTEGER, |
820 | .arg1_type = ARG_CONST_MAP_PTR, |
821 | .arg2_type = ARG_ANYTHING, |
822 | }; |
823 | |
824 | struct send_signal_irq_work { |
825 | struct irq_work irq_work; |
826 | struct task_struct *task; |
827 | u32 sig; |
828 | enum pid_type type; |
829 | }; |
830 | |
831 | static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); |
832 | |
833 | static void do_bpf_send_signal(struct irq_work *entry) |
834 | { |
835 | struct send_signal_irq_work *work; |
836 | |
837 | work = container_of(entry, struct send_signal_irq_work, irq_work); |
838 | group_send_sig_info(sig: work->sig, SEND_SIG_PRIV, p: work->task, type: work->type); |
839 | put_task_struct(t: work->task); |
840 | } |
841 | |
842 | static int bpf_send_signal_common(u32 sig, enum pid_type type) |
843 | { |
844 | struct send_signal_irq_work *work = NULL; |
845 | |
846 | /* Similar to bpf_probe_write_user, task needs to be |
847 | * in a sound condition and kernel memory access be |
848 | * permitted in order to send signal to the current |
849 | * task. |
850 | */ |
851 | if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) |
852 | return -EPERM; |
853 | if (unlikely(!nmi_uaccess_okay())) |
854 | return -EPERM; |
855 | /* Task should not be pid=1 to avoid kernel panic. */ |
856 | if (unlikely(is_global_init(current))) |
857 | return -EPERM; |
858 | |
859 | if (irqs_disabled()) { |
860 | /* Do an early check on signal validity. Otherwise, |
861 | * the error is lost in deferred irq_work. |
862 | */ |
863 | if (unlikely(!valid_signal(sig))) |
864 | return -EINVAL; |
865 | |
866 | work = this_cpu_ptr(&send_signal_work); |
867 | if (irq_work_is_busy(work: &work->irq_work)) |
868 | return -EBUSY; |
869 | |
870 | /* Add the current task, which is the target of sending signal, |
871 | * to the irq_work. The current task may change when queued |
872 | * irq works get executed. |
873 | */ |
874 | work->task = get_task_struct(current); |
875 | work->sig = sig; |
876 | work->type = type; |
877 | irq_work_queue(work: &work->irq_work); |
878 | return 0; |
879 | } |
880 | |
881 | return group_send_sig_info(sig, SEND_SIG_PRIV, current, type); |
882 | } |
883 | |
884 | BPF_CALL_1(bpf_send_signal, u32, sig) |
885 | { |
886 | return bpf_send_signal_common(sig, type: PIDTYPE_TGID); |
887 | } |
888 | |
889 | static const struct bpf_func_proto bpf_send_signal_proto = { |
890 | .func = bpf_send_signal, |
891 | .gpl_only = false, |
892 | .ret_type = RET_INTEGER, |
893 | .arg1_type = ARG_ANYTHING, |
894 | }; |
895 | |
896 | BPF_CALL_1(bpf_send_signal_thread, u32, sig) |
897 | { |
898 | return bpf_send_signal_common(sig, type: PIDTYPE_PID); |
899 | } |
900 | |
901 | static const struct bpf_func_proto bpf_send_signal_thread_proto = { |
902 | .func = bpf_send_signal_thread, |
903 | .gpl_only = false, |
904 | .ret_type = RET_INTEGER, |
905 | .arg1_type = ARG_ANYTHING, |
906 | }; |
907 | |
908 | BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz) |
909 | { |
910 | struct path copy; |
911 | long len; |
912 | char *p; |
913 | |
914 | if (!sz) |
915 | return 0; |
916 | |
917 | /* |
918 | * The path pointer is verified as trusted and safe to use, |
919 | * but let's double check it's valid anyway to workaround |
920 | * potentially broken verifier. |
921 | */ |
922 | len = copy_from_kernel_nofault(dst: ©, src: path, size: sizeof(*path)); |
923 | if (len < 0) |
924 | return len; |
925 | |
926 | p = d_path(©, buf, sz); |
927 | if (IS_ERR(ptr: p)) { |
928 | len = PTR_ERR(ptr: p); |
929 | } else { |
930 | len = buf + sz - p; |
931 | memmove(buf, p, len); |
932 | } |
933 | |
934 | return len; |
935 | } |
936 | |
937 | BTF_SET_START(btf_allowlist_d_path) |
938 | #ifdef CONFIG_SECURITY |
939 | BTF_ID(func, security_file_permission) |
940 | BTF_ID(func, security_inode_getattr) |
941 | BTF_ID(func, security_file_open) |
942 | #endif |
943 | #ifdef CONFIG_SECURITY_PATH |
944 | BTF_ID(func, security_path_truncate) |
945 | #endif |
946 | BTF_ID(func, vfs_truncate) |
947 | BTF_ID(func, vfs_fallocate) |
948 | BTF_ID(func, dentry_open) |
949 | BTF_ID(func, vfs_getattr) |
950 | BTF_ID(func, filp_close) |
951 | BTF_SET_END(btf_allowlist_d_path) |
952 | |
953 | static bool bpf_d_path_allowed(const struct bpf_prog *prog) |
954 | { |
955 | if (prog->type == BPF_PROG_TYPE_TRACING && |
956 | prog->expected_attach_type == BPF_TRACE_ITER) |
957 | return true; |
958 | |
959 | if (prog->type == BPF_PROG_TYPE_LSM) |
960 | return bpf_lsm_is_sleepable_hook(btf_id: prog->aux->attach_btf_id); |
961 | |
962 | return btf_id_set_contains(set: &btf_allowlist_d_path, |
963 | id: prog->aux->attach_btf_id); |
964 | } |
965 | |
966 | BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path) |
967 | |
968 | static const struct bpf_func_proto bpf_d_path_proto = { |
969 | .func = bpf_d_path, |
970 | .gpl_only = false, |
971 | .ret_type = RET_INTEGER, |
972 | .arg1_type = ARG_PTR_TO_BTF_ID, |
973 | .arg1_btf_id = &bpf_d_path_btf_ids[0], |
974 | .arg2_type = ARG_PTR_TO_MEM, |
975 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, |
976 | .allowed = bpf_d_path_allowed, |
977 | }; |
978 | |
979 | #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \ |
980 | BTF_F_PTR_RAW | BTF_F_ZERO) |
981 | |
982 | static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, |
983 | u64 flags, const struct btf **btf, |
984 | s32 *btf_id) |
985 | { |
986 | const struct btf_type *t; |
987 | |
988 | if (unlikely(flags & ~(BTF_F_ALL))) |
989 | return -EINVAL; |
990 | |
991 | if (btf_ptr_size != sizeof(struct btf_ptr)) |
992 | return -EINVAL; |
993 | |
994 | *btf = bpf_get_btf_vmlinux(); |
995 | |
996 | if (IS_ERR_OR_NULL(ptr: *btf)) |
997 | return IS_ERR(ptr: *btf) ? PTR_ERR(ptr: *btf) : -EINVAL; |
998 | |
999 | if (ptr->type_id > 0) |
1000 | *btf_id = ptr->type_id; |
1001 | else |
1002 | return -EINVAL; |
1003 | |
1004 | if (*btf_id > 0) |
1005 | t = btf_type_by_id(btf: *btf, type_id: *btf_id); |
1006 | if (*btf_id <= 0 || !t) |
1007 | return -ENOENT; |
1008 | |
1009 | return 0; |
1010 | } |
1011 | |
1012 | BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr, |
1013 | u32, btf_ptr_size, u64, flags) |
1014 | { |
1015 | const struct btf *btf; |
1016 | s32 btf_id; |
1017 | int ret; |
1018 | |
1019 | ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, btf: &btf, btf_id: &btf_id); |
1020 | if (ret) |
1021 | return ret; |
1022 | |
1023 | return btf_type_snprintf_show(btf, type_id: btf_id, obj: ptr->ptr, buf: str, len: str_size, |
1024 | flags); |
1025 | } |
1026 | |
1027 | const struct bpf_func_proto bpf_snprintf_btf_proto = { |
1028 | .func = bpf_snprintf_btf, |
1029 | .gpl_only = false, |
1030 | .ret_type = RET_INTEGER, |
1031 | .arg1_type = ARG_PTR_TO_MEM, |
1032 | .arg2_type = ARG_CONST_SIZE, |
1033 | .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
1034 | .arg4_type = ARG_CONST_SIZE, |
1035 | .arg5_type = ARG_ANYTHING, |
1036 | }; |
1037 | |
1038 | BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx) |
1039 | { |
1040 | /* This helper call is inlined by verifier. */ |
1041 | return ((u64 *)ctx)[-2]; |
1042 | } |
1043 | |
1044 | static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = { |
1045 | .func = bpf_get_func_ip_tracing, |
1046 | .gpl_only = true, |
1047 | .ret_type = RET_INTEGER, |
1048 | .arg1_type = ARG_PTR_TO_CTX, |
1049 | }; |
1050 | |
1051 | #ifdef CONFIG_X86_KERNEL_IBT |
1052 | static unsigned long get_entry_ip(unsigned long fentry_ip) |
1053 | { |
1054 | u32 instr; |
1055 | |
1056 | /* Being extra safe in here in case entry ip is on the page-edge. */ |
1057 | if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1)) |
1058 | return fentry_ip; |
1059 | if (is_endbr(val: instr)) |
1060 | fentry_ip -= ENDBR_INSN_SIZE; |
1061 | return fentry_ip; |
1062 | } |
1063 | #else |
1064 | #define get_entry_ip(fentry_ip) fentry_ip |
1065 | #endif |
1066 | |
1067 | BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs) |
1068 | { |
1069 | struct bpf_trace_run_ctx *run_ctx __maybe_unused; |
1070 | struct kprobe *kp; |
1071 | |
1072 | #ifdef CONFIG_UPROBES |
1073 | run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); |
1074 | if (run_ctx->is_uprobe) |
1075 | return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr; |
1076 | #endif |
1077 | |
1078 | kp = kprobe_running(); |
1079 | |
1080 | if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY)) |
1081 | return 0; |
1082 | |
1083 | return get_entry_ip(fentry_ip: (uintptr_t)kp->addr); |
1084 | } |
1085 | |
1086 | static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { |
1087 | .func = bpf_get_func_ip_kprobe, |
1088 | .gpl_only = true, |
1089 | .ret_type = RET_INTEGER, |
1090 | .arg1_type = ARG_PTR_TO_CTX, |
1091 | }; |
1092 | |
1093 | BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs) |
1094 | { |
1095 | return bpf_kprobe_multi_entry_ip(current->bpf_ctx); |
1096 | } |
1097 | |
1098 | static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = { |
1099 | .func = bpf_get_func_ip_kprobe_multi, |
1100 | .gpl_only = false, |
1101 | .ret_type = RET_INTEGER, |
1102 | .arg1_type = ARG_PTR_TO_CTX, |
1103 | }; |
1104 | |
1105 | BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs) |
1106 | { |
1107 | return bpf_kprobe_multi_cookie(current->bpf_ctx); |
1108 | } |
1109 | |
1110 | static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = { |
1111 | .func = bpf_get_attach_cookie_kprobe_multi, |
1112 | .gpl_only = false, |
1113 | .ret_type = RET_INTEGER, |
1114 | .arg1_type = ARG_PTR_TO_CTX, |
1115 | }; |
1116 | |
1117 | BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs) |
1118 | { |
1119 | return bpf_uprobe_multi_entry_ip(current->bpf_ctx); |
1120 | } |
1121 | |
1122 | static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = { |
1123 | .func = bpf_get_func_ip_uprobe_multi, |
1124 | .gpl_only = false, |
1125 | .ret_type = RET_INTEGER, |
1126 | .arg1_type = ARG_PTR_TO_CTX, |
1127 | }; |
1128 | |
1129 | BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs) |
1130 | { |
1131 | return bpf_uprobe_multi_cookie(current->bpf_ctx); |
1132 | } |
1133 | |
1134 | static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = { |
1135 | .func = bpf_get_attach_cookie_uprobe_multi, |
1136 | .gpl_only = false, |
1137 | .ret_type = RET_INTEGER, |
1138 | .arg1_type = ARG_PTR_TO_CTX, |
1139 | }; |
1140 | |
1141 | BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) |
1142 | { |
1143 | struct bpf_trace_run_ctx *run_ctx; |
1144 | |
1145 | run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); |
1146 | return run_ctx->bpf_cookie; |
1147 | } |
1148 | |
1149 | static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = { |
1150 | .func = bpf_get_attach_cookie_trace, |
1151 | .gpl_only = false, |
1152 | .ret_type = RET_INTEGER, |
1153 | .arg1_type = ARG_PTR_TO_CTX, |
1154 | }; |
1155 | |
1156 | BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx) |
1157 | { |
1158 | return ctx->event->bpf_cookie; |
1159 | } |
1160 | |
1161 | static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = { |
1162 | .func = bpf_get_attach_cookie_pe, |
1163 | .gpl_only = false, |
1164 | .ret_type = RET_INTEGER, |
1165 | .arg1_type = ARG_PTR_TO_CTX, |
1166 | }; |
1167 | |
1168 | BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx) |
1169 | { |
1170 | struct bpf_trace_run_ctx *run_ctx; |
1171 | |
1172 | run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); |
1173 | return run_ctx->bpf_cookie; |
1174 | } |
1175 | |
1176 | static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = { |
1177 | .func = bpf_get_attach_cookie_tracing, |
1178 | .gpl_only = false, |
1179 | .ret_type = RET_INTEGER, |
1180 | .arg1_type = ARG_PTR_TO_CTX, |
1181 | }; |
1182 | |
1183 | BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags) |
1184 | { |
1185 | #ifndef CONFIG_X86 |
1186 | return -ENOENT; |
1187 | #else |
1188 | static const u32 br_entry_size = sizeof(struct perf_branch_entry); |
1189 | u32 entry_cnt = size / br_entry_size; |
1190 | |
1191 | entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt); |
1192 | |
1193 | if (unlikely(flags)) |
1194 | return -EINVAL; |
1195 | |
1196 | if (!entry_cnt) |
1197 | return -ENOENT; |
1198 | |
1199 | return entry_cnt * br_entry_size; |
1200 | #endif |
1201 | } |
1202 | |
1203 | static const struct bpf_func_proto bpf_get_branch_snapshot_proto = { |
1204 | .func = bpf_get_branch_snapshot, |
1205 | .gpl_only = true, |
1206 | .ret_type = RET_INTEGER, |
1207 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
1208 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
1209 | }; |
1210 | |
1211 | BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value) |
1212 | { |
1213 | /* This helper call is inlined by verifier. */ |
1214 | u64 nr_args = ((u64 *)ctx)[-1]; |
1215 | |
1216 | if ((u64) n >= nr_args) |
1217 | return -EINVAL; |
1218 | *value = ((u64 *)ctx)[n]; |
1219 | return 0; |
1220 | } |
1221 | |
1222 | static const struct bpf_func_proto bpf_get_func_arg_proto = { |
1223 | .func = get_func_arg, |
1224 | .ret_type = RET_INTEGER, |
1225 | .arg1_type = ARG_PTR_TO_CTX, |
1226 | .arg2_type = ARG_ANYTHING, |
1227 | .arg3_type = ARG_PTR_TO_LONG, |
1228 | }; |
1229 | |
1230 | BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value) |
1231 | { |
1232 | /* This helper call is inlined by verifier. */ |
1233 | u64 nr_args = ((u64 *)ctx)[-1]; |
1234 | |
1235 | *value = ((u64 *)ctx)[nr_args]; |
1236 | return 0; |
1237 | } |
1238 | |
1239 | static const struct bpf_func_proto bpf_get_func_ret_proto = { |
1240 | .func = get_func_ret, |
1241 | .ret_type = RET_INTEGER, |
1242 | .arg1_type = ARG_PTR_TO_CTX, |
1243 | .arg2_type = ARG_PTR_TO_LONG, |
1244 | }; |
1245 | |
1246 | BPF_CALL_1(get_func_arg_cnt, void *, ctx) |
1247 | { |
1248 | /* This helper call is inlined by verifier. */ |
1249 | return ((u64 *)ctx)[-1]; |
1250 | } |
1251 | |
1252 | static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = { |
1253 | .func = get_func_arg_cnt, |
1254 | .ret_type = RET_INTEGER, |
1255 | .arg1_type = ARG_PTR_TO_CTX, |
1256 | }; |
1257 | |
1258 | #ifdef CONFIG_KEYS |
1259 | __bpf_kfunc_start_defs(); |
1260 | |
1261 | /** |
1262 | * bpf_lookup_user_key - lookup a key by its serial |
1263 | * @serial: key handle serial number |
1264 | * @flags: lookup-specific flags |
1265 | * |
1266 | * Search a key with a given *serial* and the provided *flags*. |
1267 | * If found, increment the reference count of the key by one, and |
1268 | * return it in the bpf_key structure. |
1269 | * |
1270 | * The bpf_key structure must be passed to bpf_key_put() when done |
1271 | * with it, so that the key reference count is decremented and the |
1272 | * bpf_key structure is freed. |
1273 | * |
1274 | * Permission checks are deferred to the time the key is used by |
1275 | * one of the available key-specific kfuncs. |
1276 | * |
1277 | * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested |
1278 | * special keyring (e.g. session keyring), if it doesn't yet exist. |
1279 | * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting |
1280 | * for the key construction, and to retrieve uninstantiated keys (keys |
1281 | * without data attached to them). |
1282 | * |
1283 | * Return: a bpf_key pointer with a valid key pointer if the key is found, a |
1284 | * NULL pointer otherwise. |
1285 | */ |
1286 | __bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags) |
1287 | { |
1288 | key_ref_t key_ref; |
1289 | struct bpf_key *bkey; |
1290 | |
1291 | if (flags & ~KEY_LOOKUP_ALL) |
1292 | return NULL; |
1293 | |
1294 | /* |
1295 | * Permission check is deferred until the key is used, as the |
1296 | * intent of the caller is unknown here. |
1297 | */ |
1298 | key_ref = lookup_user_key(id: serial, flags, need_perm: KEY_DEFER_PERM_CHECK); |
1299 | if (IS_ERR(ptr: key_ref)) |
1300 | return NULL; |
1301 | |
1302 | bkey = kmalloc(size: sizeof(*bkey), GFP_KERNEL); |
1303 | if (!bkey) { |
1304 | key_put(key: key_ref_to_ptr(key_ref)); |
1305 | return NULL; |
1306 | } |
1307 | |
1308 | bkey->key = key_ref_to_ptr(key_ref); |
1309 | bkey->has_ref = true; |
1310 | |
1311 | return bkey; |
1312 | } |
1313 | |
1314 | /** |
1315 | * bpf_lookup_system_key - lookup a key by a system-defined ID |
1316 | * @id: key ID |
1317 | * |
1318 | * Obtain a bpf_key structure with a key pointer set to the passed key ID. |
1319 | * The key pointer is marked as invalid, to prevent bpf_key_put() from |
1320 | * attempting to decrement the key reference count on that pointer. The key |
1321 | * pointer set in such way is currently understood only by |
1322 | * verify_pkcs7_signature(). |
1323 | * |
1324 | * Set *id* to one of the values defined in include/linux/verification.h: |
1325 | * 0 for the primary keyring (immutable keyring of system keys); |
1326 | * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring |
1327 | * (where keys can be added only if they are vouched for by existing keys |
1328 | * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform |
1329 | * keyring (primarily used by the integrity subsystem to verify a kexec'ed |
1330 | * kerned image and, possibly, the initramfs signature). |
1331 | * |
1332 | * Return: a bpf_key pointer with an invalid key pointer set from the |
1333 | * pre-determined ID on success, a NULL pointer otherwise |
1334 | */ |
1335 | __bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id) |
1336 | { |
1337 | struct bpf_key *bkey; |
1338 | |
1339 | if (system_keyring_id_check(id) < 0) |
1340 | return NULL; |
1341 | |
1342 | bkey = kmalloc(size: sizeof(*bkey), GFP_ATOMIC); |
1343 | if (!bkey) |
1344 | return NULL; |
1345 | |
1346 | bkey->key = (struct key *)(unsigned long)id; |
1347 | bkey->has_ref = false; |
1348 | |
1349 | return bkey; |
1350 | } |
1351 | |
1352 | /** |
1353 | * bpf_key_put - decrement key reference count if key is valid and free bpf_key |
1354 | * @bkey: bpf_key structure |
1355 | * |
1356 | * Decrement the reference count of the key inside *bkey*, if the pointer |
1357 | * is valid, and free *bkey*. |
1358 | */ |
1359 | __bpf_kfunc void bpf_key_put(struct bpf_key *bkey) |
1360 | { |
1361 | if (bkey->has_ref) |
1362 | key_put(key: bkey->key); |
1363 | |
1364 | kfree(objp: bkey); |
1365 | } |
1366 | |
1367 | #ifdef CONFIG_SYSTEM_DATA_VERIFICATION |
1368 | /** |
1369 | * bpf_verify_pkcs7_signature - verify a PKCS#7 signature |
1370 | * @data_ptr: data to verify |
1371 | * @sig_ptr: signature of the data |
1372 | * @trusted_keyring: keyring with keys trusted for signature verification |
1373 | * |
1374 | * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr* |
1375 | * with keys in a keyring referenced by *trusted_keyring*. |
1376 | * |
1377 | * Return: 0 on success, a negative value on error. |
1378 | */ |
1379 | __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr, |
1380 | struct bpf_dynptr_kern *sig_ptr, |
1381 | struct bpf_key *trusted_keyring) |
1382 | { |
1383 | const void *data, *sig; |
1384 | u32 data_len, sig_len; |
1385 | int ret; |
1386 | |
1387 | if (trusted_keyring->has_ref) { |
1388 | /* |
1389 | * Do the permission check deferred in bpf_lookup_user_key(). |
1390 | * See bpf_lookup_user_key() for more details. |
1391 | * |
1392 | * A call to key_task_permission() here would be redundant, as |
1393 | * it is already done by keyring_search() called by |
1394 | * find_asymmetric_key(). |
1395 | */ |
1396 | ret = key_validate(key: trusted_keyring->key); |
1397 | if (ret < 0) |
1398 | return ret; |
1399 | } |
1400 | |
1401 | data_len = __bpf_dynptr_size(ptr: data_ptr); |
1402 | data = __bpf_dynptr_data(ptr: data_ptr, len: data_len); |
1403 | sig_len = __bpf_dynptr_size(ptr: sig_ptr); |
1404 | sig = __bpf_dynptr_data(ptr: sig_ptr, len: sig_len); |
1405 | |
1406 | return verify_pkcs7_signature(data, len: data_len, raw_pkcs7: sig, pkcs7_len: sig_len, |
1407 | trusted_keys: trusted_keyring->key, |
1408 | usage: VERIFYING_UNSPECIFIED_SIGNATURE, NULL, |
1409 | NULL); |
1410 | } |
1411 | #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ |
1412 | |
1413 | __bpf_kfunc_end_defs(); |
1414 | |
1415 | BTF_KFUNCS_START(key_sig_kfunc_set) |
1416 | BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE) |
1417 | BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL) |
1418 | BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE) |
1419 | #ifdef CONFIG_SYSTEM_DATA_VERIFICATION |
1420 | BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE) |
1421 | #endif |
1422 | BTF_KFUNCS_END(key_sig_kfunc_set) |
1423 | |
1424 | static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = { |
1425 | .owner = THIS_MODULE, |
1426 | .set = &key_sig_kfunc_set, |
1427 | }; |
1428 | |
1429 | static int __init bpf_key_sig_kfuncs_init(void) |
1430 | { |
1431 | return register_btf_kfunc_id_set(prog_type: BPF_PROG_TYPE_TRACING, |
1432 | s: &bpf_key_sig_kfunc_set); |
1433 | } |
1434 | |
1435 | late_initcall(bpf_key_sig_kfuncs_init); |
1436 | #endif /* CONFIG_KEYS */ |
1437 | |
1438 | /* filesystem kfuncs */ |
1439 | __bpf_kfunc_start_defs(); |
1440 | |
1441 | /** |
1442 | * bpf_get_file_xattr - get xattr of a file |
1443 | * @file: file to get xattr from |
1444 | * @name__str: name of the xattr |
1445 | * @value_ptr: output buffer of the xattr value |
1446 | * |
1447 | * Get xattr *name__str* of *file* and store the output in *value_ptr*. |
1448 | * |
1449 | * For security reasons, only *name__str* with prefix "user." is allowed. |
1450 | * |
1451 | * Return: 0 on success, a negative value on error. |
1452 | */ |
1453 | __bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str, |
1454 | struct bpf_dynptr_kern *value_ptr) |
1455 | { |
1456 | struct dentry *dentry; |
1457 | u32 value_len; |
1458 | void *value; |
1459 | int ret; |
1460 | |
1461 | if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) |
1462 | return -EPERM; |
1463 | |
1464 | value_len = __bpf_dynptr_size(ptr: value_ptr); |
1465 | value = __bpf_dynptr_data_rw(ptr: value_ptr, len: value_len); |
1466 | if (!value) |
1467 | return -EINVAL; |
1468 | |
1469 | dentry = file_dentry(file); |
1470 | ret = inode_permission(&nop_mnt_idmap, dentry->d_inode, MAY_READ); |
1471 | if (ret) |
1472 | return ret; |
1473 | return __vfs_getxattr(dentry, dentry->d_inode, name__str, value, value_len); |
1474 | } |
1475 | |
1476 | __bpf_kfunc_end_defs(); |
1477 | |
1478 | BTF_KFUNCS_START(fs_kfunc_set_ids) |
1479 | BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS) |
1480 | BTF_KFUNCS_END(fs_kfunc_set_ids) |
1481 | |
1482 | static int bpf_get_file_xattr_filter(const struct bpf_prog *prog, u32 kfunc_id) |
1483 | { |
1484 | if (!btf_id_set8_contains(set: &fs_kfunc_set_ids, id: kfunc_id)) |
1485 | return 0; |
1486 | |
1487 | /* Only allow to attach from LSM hooks, to avoid recursion */ |
1488 | return prog->type != BPF_PROG_TYPE_LSM ? -EACCES : 0; |
1489 | } |
1490 | |
1491 | static const struct btf_kfunc_id_set bpf_fs_kfunc_set = { |
1492 | .owner = THIS_MODULE, |
1493 | .set = &fs_kfunc_set_ids, |
1494 | .filter = bpf_get_file_xattr_filter, |
1495 | }; |
1496 | |
1497 | static int __init bpf_fs_kfuncs_init(void) |
1498 | { |
1499 | return register_btf_kfunc_id_set(prog_type: BPF_PROG_TYPE_LSM, s: &bpf_fs_kfunc_set); |
1500 | } |
1501 | |
1502 | late_initcall(bpf_fs_kfuncs_init); |
1503 | |
1504 | static const struct bpf_func_proto * |
1505 | bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
1506 | { |
1507 | switch (func_id) { |
1508 | case BPF_FUNC_map_lookup_elem: |
1509 | return &bpf_map_lookup_elem_proto; |
1510 | case BPF_FUNC_map_update_elem: |
1511 | return &bpf_map_update_elem_proto; |
1512 | case BPF_FUNC_map_delete_elem: |
1513 | return &bpf_map_delete_elem_proto; |
1514 | case BPF_FUNC_map_push_elem: |
1515 | return &bpf_map_push_elem_proto; |
1516 | case BPF_FUNC_map_pop_elem: |
1517 | return &bpf_map_pop_elem_proto; |
1518 | case BPF_FUNC_map_peek_elem: |
1519 | return &bpf_map_peek_elem_proto; |
1520 | case BPF_FUNC_map_lookup_percpu_elem: |
1521 | return &bpf_map_lookup_percpu_elem_proto; |
1522 | case BPF_FUNC_ktime_get_ns: |
1523 | return &bpf_ktime_get_ns_proto; |
1524 | case BPF_FUNC_ktime_get_boot_ns: |
1525 | return &bpf_ktime_get_boot_ns_proto; |
1526 | case BPF_FUNC_tail_call: |
1527 | return &bpf_tail_call_proto; |
1528 | case BPF_FUNC_get_current_pid_tgid: |
1529 | return &bpf_get_current_pid_tgid_proto; |
1530 | case BPF_FUNC_get_current_task: |
1531 | return &bpf_get_current_task_proto; |
1532 | case BPF_FUNC_get_current_task_btf: |
1533 | return &bpf_get_current_task_btf_proto; |
1534 | case BPF_FUNC_task_pt_regs: |
1535 | return &bpf_task_pt_regs_proto; |
1536 | case BPF_FUNC_get_current_uid_gid: |
1537 | return &bpf_get_current_uid_gid_proto; |
1538 | case BPF_FUNC_get_current_comm: |
1539 | return &bpf_get_current_comm_proto; |
1540 | case BPF_FUNC_trace_printk: |
1541 | return bpf_get_trace_printk_proto(); |
1542 | case BPF_FUNC_get_smp_processor_id: |
1543 | return &bpf_get_smp_processor_id_proto; |
1544 | case BPF_FUNC_get_numa_node_id: |
1545 | return &bpf_get_numa_node_id_proto; |
1546 | case BPF_FUNC_perf_event_read: |
1547 | return &bpf_perf_event_read_proto; |
1548 | case BPF_FUNC_current_task_under_cgroup: |
1549 | return &bpf_current_task_under_cgroup_proto; |
1550 | case BPF_FUNC_get_prandom_u32: |
1551 | return &bpf_get_prandom_u32_proto; |
1552 | case BPF_FUNC_probe_write_user: |
1553 | return security_locked_down(what: LOCKDOWN_BPF_WRITE_USER) < 0 ? |
1554 | NULL : bpf_get_probe_write_proto(); |
1555 | case BPF_FUNC_probe_read_user: |
1556 | return &bpf_probe_read_user_proto; |
1557 | case BPF_FUNC_probe_read_kernel: |
1558 | return security_locked_down(what: LOCKDOWN_BPF_READ_KERNEL) < 0 ? |
1559 | NULL : &bpf_probe_read_kernel_proto; |
1560 | case BPF_FUNC_probe_read_user_str: |
1561 | return &bpf_probe_read_user_str_proto; |
1562 | case BPF_FUNC_probe_read_kernel_str: |
1563 | return security_locked_down(what: LOCKDOWN_BPF_READ_KERNEL) < 0 ? |
1564 | NULL : &bpf_probe_read_kernel_str_proto; |
1565 | #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE |
1566 | case BPF_FUNC_probe_read: |
1567 | return security_locked_down(what: LOCKDOWN_BPF_READ_KERNEL) < 0 ? |
1568 | NULL : &bpf_probe_read_compat_proto; |
1569 | case BPF_FUNC_probe_read_str: |
1570 | return security_locked_down(what: LOCKDOWN_BPF_READ_KERNEL) < 0 ? |
1571 | NULL : &bpf_probe_read_compat_str_proto; |
1572 | #endif |
1573 | #ifdef CONFIG_CGROUPS |
1574 | case BPF_FUNC_cgrp_storage_get: |
1575 | return &bpf_cgrp_storage_get_proto; |
1576 | case BPF_FUNC_cgrp_storage_delete: |
1577 | return &bpf_cgrp_storage_delete_proto; |
1578 | #endif |
1579 | case BPF_FUNC_send_signal: |
1580 | return &bpf_send_signal_proto; |
1581 | case BPF_FUNC_send_signal_thread: |
1582 | return &bpf_send_signal_thread_proto; |
1583 | case BPF_FUNC_perf_event_read_value: |
1584 | return &bpf_perf_event_read_value_proto; |
1585 | case BPF_FUNC_get_ns_current_pid_tgid: |
1586 | return &bpf_get_ns_current_pid_tgid_proto; |
1587 | case BPF_FUNC_ringbuf_output: |
1588 | return &bpf_ringbuf_output_proto; |
1589 | case BPF_FUNC_ringbuf_reserve: |
1590 | return &bpf_ringbuf_reserve_proto; |
1591 | case BPF_FUNC_ringbuf_submit: |
1592 | return &bpf_ringbuf_submit_proto; |
1593 | case BPF_FUNC_ringbuf_discard: |
1594 | return &bpf_ringbuf_discard_proto; |
1595 | case BPF_FUNC_ringbuf_query: |
1596 | return &bpf_ringbuf_query_proto; |
1597 | case BPF_FUNC_jiffies64: |
1598 | return &bpf_jiffies64_proto; |
1599 | case BPF_FUNC_get_task_stack: |
1600 | return &bpf_get_task_stack_proto; |
1601 | case BPF_FUNC_copy_from_user: |
1602 | return &bpf_copy_from_user_proto; |
1603 | case BPF_FUNC_copy_from_user_task: |
1604 | return &bpf_copy_from_user_task_proto; |
1605 | case BPF_FUNC_snprintf_btf: |
1606 | return &bpf_snprintf_btf_proto; |
1607 | case BPF_FUNC_per_cpu_ptr: |
1608 | return &bpf_per_cpu_ptr_proto; |
1609 | case BPF_FUNC_this_cpu_ptr: |
1610 | return &bpf_this_cpu_ptr_proto; |
1611 | case BPF_FUNC_task_storage_get: |
1612 | if (bpf_prog_check_recur(prog)) |
1613 | return &bpf_task_storage_get_recur_proto; |
1614 | return &bpf_task_storage_get_proto; |
1615 | case BPF_FUNC_task_storage_delete: |
1616 | if (bpf_prog_check_recur(prog)) |
1617 | return &bpf_task_storage_delete_recur_proto; |
1618 | return &bpf_task_storage_delete_proto; |
1619 | case BPF_FUNC_for_each_map_elem: |
1620 | return &bpf_for_each_map_elem_proto; |
1621 | case BPF_FUNC_snprintf: |
1622 | return &bpf_snprintf_proto; |
1623 | case BPF_FUNC_get_func_ip: |
1624 | return &bpf_get_func_ip_proto_tracing; |
1625 | case BPF_FUNC_get_branch_snapshot: |
1626 | return &bpf_get_branch_snapshot_proto; |
1627 | case BPF_FUNC_find_vma: |
1628 | return &bpf_find_vma_proto; |
1629 | case BPF_FUNC_trace_vprintk: |
1630 | return bpf_get_trace_vprintk_proto(); |
1631 | default: |
1632 | return bpf_base_func_proto(func_id, prog); |
1633 | } |
1634 | } |
1635 | |
1636 | static const struct bpf_func_proto * |
1637 | kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
1638 | { |
1639 | switch (func_id) { |
1640 | case BPF_FUNC_perf_event_output: |
1641 | return &bpf_perf_event_output_proto; |
1642 | case BPF_FUNC_get_stackid: |
1643 | return &bpf_get_stackid_proto; |
1644 | case BPF_FUNC_get_stack: |
1645 | return &bpf_get_stack_proto; |
1646 | #ifdef CONFIG_BPF_KPROBE_OVERRIDE |
1647 | case BPF_FUNC_override_return: |
1648 | return &bpf_override_return_proto; |
1649 | #endif |
1650 | case BPF_FUNC_get_func_ip: |
1651 | if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI) |
1652 | return &bpf_get_func_ip_proto_kprobe_multi; |
1653 | if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) |
1654 | return &bpf_get_func_ip_proto_uprobe_multi; |
1655 | return &bpf_get_func_ip_proto_kprobe; |
1656 | case BPF_FUNC_get_attach_cookie: |
1657 | if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI) |
1658 | return &bpf_get_attach_cookie_proto_kmulti; |
1659 | if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) |
1660 | return &bpf_get_attach_cookie_proto_umulti; |
1661 | return &bpf_get_attach_cookie_proto_trace; |
1662 | default: |
1663 | return bpf_tracing_func_proto(func_id, prog); |
1664 | } |
1665 | } |
1666 | |
1667 | /* bpf+kprobe programs can access fields of 'struct pt_regs' */ |
1668 | static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
1669 | const struct bpf_prog *prog, |
1670 | struct bpf_insn_access_aux *info) |
1671 | { |
1672 | if (off < 0 || off >= sizeof(struct pt_regs)) |
1673 | return false; |
1674 | if (type != BPF_READ) |
1675 | return false; |
1676 | if (off % size != 0) |
1677 | return false; |
1678 | /* |
1679 | * Assertion for 32 bit to make sure last 8 byte access |
1680 | * (BPF_DW) to the last 4 byte member is disallowed. |
1681 | */ |
1682 | if (off + size > sizeof(struct pt_regs)) |
1683 | return false; |
1684 | |
1685 | return true; |
1686 | } |
1687 | |
1688 | const struct bpf_verifier_ops kprobe_verifier_ops = { |
1689 | .get_func_proto = kprobe_prog_func_proto, |
1690 | .is_valid_access = kprobe_prog_is_valid_access, |
1691 | }; |
1692 | |
1693 | const struct bpf_prog_ops kprobe_prog_ops = { |
1694 | }; |
1695 | |
1696 | BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, |
1697 | u64, flags, void *, data, u64, size) |
1698 | { |
1699 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; |
1700 | |
1701 | /* |
1702 | * r1 points to perf tracepoint buffer where first 8 bytes are hidden |
1703 | * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it |
1704 | * from there and call the same bpf_perf_event_output() helper inline. |
1705 | */ |
1706 | return ____bpf_perf_event_output(regs, map, flags, data, size); |
1707 | } |
1708 | |
1709 | static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { |
1710 | .func = bpf_perf_event_output_tp, |
1711 | .gpl_only = true, |
1712 | .ret_type = RET_INTEGER, |
1713 | .arg1_type = ARG_PTR_TO_CTX, |
1714 | .arg2_type = ARG_CONST_MAP_PTR, |
1715 | .arg3_type = ARG_ANYTHING, |
1716 | .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
1717 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
1718 | }; |
1719 | |
1720 | BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, |
1721 | u64, flags) |
1722 | { |
1723 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; |
1724 | |
1725 | /* |
1726 | * Same comment as in bpf_perf_event_output_tp(), only that this time |
1727 | * the other helper's function body cannot be inlined due to being |
1728 | * external, thus we need to call raw helper function. |
1729 | */ |
1730 | return bpf_get_stackid(r1: (unsigned long) regs, r2: (unsigned long) map, |
1731 | r3: flags, r4: 0, r5: 0); |
1732 | } |
1733 | |
1734 | static const struct bpf_func_proto bpf_get_stackid_proto_tp = { |
1735 | .func = bpf_get_stackid_tp, |
1736 | .gpl_only = true, |
1737 | .ret_type = RET_INTEGER, |
1738 | .arg1_type = ARG_PTR_TO_CTX, |
1739 | .arg2_type = ARG_CONST_MAP_PTR, |
1740 | .arg3_type = ARG_ANYTHING, |
1741 | }; |
1742 | |
1743 | BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size, |
1744 | u64, flags) |
1745 | { |
1746 | struct pt_regs *regs = *(struct pt_regs **)tp_buff; |
1747 | |
1748 | return bpf_get_stack(r1: (unsigned long) regs, r2: (unsigned long) buf, |
1749 | r3: (unsigned long) size, r4: flags, r5: 0); |
1750 | } |
1751 | |
1752 | static const struct bpf_func_proto bpf_get_stack_proto_tp = { |
1753 | .func = bpf_get_stack_tp, |
1754 | .gpl_only = true, |
1755 | .ret_type = RET_INTEGER, |
1756 | .arg1_type = ARG_PTR_TO_CTX, |
1757 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, |
1758 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, |
1759 | .arg4_type = ARG_ANYTHING, |
1760 | }; |
1761 | |
1762 | static const struct bpf_func_proto * |
1763 | tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
1764 | { |
1765 | switch (func_id) { |
1766 | case BPF_FUNC_perf_event_output: |
1767 | return &bpf_perf_event_output_proto_tp; |
1768 | case BPF_FUNC_get_stackid: |
1769 | return &bpf_get_stackid_proto_tp; |
1770 | case BPF_FUNC_get_stack: |
1771 | return &bpf_get_stack_proto_tp; |
1772 | case BPF_FUNC_get_attach_cookie: |
1773 | return &bpf_get_attach_cookie_proto_trace; |
1774 | default: |
1775 | return bpf_tracing_func_proto(func_id, prog); |
1776 | } |
1777 | } |
1778 | |
1779 | static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
1780 | const struct bpf_prog *prog, |
1781 | struct bpf_insn_access_aux *info) |
1782 | { |
1783 | if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) |
1784 | return false; |
1785 | if (type != BPF_READ) |
1786 | return false; |
1787 | if (off % size != 0) |
1788 | return false; |
1789 | |
1790 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); |
1791 | return true; |
1792 | } |
1793 | |
1794 | const struct bpf_verifier_ops tracepoint_verifier_ops = { |
1795 | .get_func_proto = tp_prog_func_proto, |
1796 | .is_valid_access = tp_prog_is_valid_access, |
1797 | }; |
1798 | |
1799 | const struct bpf_prog_ops tracepoint_prog_ops = { |
1800 | }; |
1801 | |
1802 | BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, |
1803 | struct bpf_perf_event_value *, buf, u32, size) |
1804 | { |
1805 | int err = -EINVAL; |
1806 | |
1807 | if (unlikely(size != sizeof(struct bpf_perf_event_value))) |
1808 | goto clear; |
1809 | err = perf_event_read_local(event: ctx->event, value: &buf->counter, enabled: &buf->enabled, |
1810 | running: &buf->running); |
1811 | if (unlikely(err)) |
1812 | goto clear; |
1813 | return 0; |
1814 | clear: |
1815 | memset(buf, 0, size); |
1816 | return err; |
1817 | } |
1818 | |
1819 | static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { |
1820 | .func = bpf_perf_prog_read_value, |
1821 | .gpl_only = true, |
1822 | .ret_type = RET_INTEGER, |
1823 | .arg1_type = ARG_PTR_TO_CTX, |
1824 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, |
1825 | .arg3_type = ARG_CONST_SIZE, |
1826 | }; |
1827 | |
1828 | BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, |
1829 | void *, buf, u32, size, u64, flags) |
1830 | { |
1831 | static const u32 br_entry_size = sizeof(struct perf_branch_entry); |
1832 | struct perf_branch_stack *br_stack = ctx->data->br_stack; |
1833 | u32 to_copy; |
1834 | |
1835 | if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE)) |
1836 | return -EINVAL; |
1837 | |
1838 | if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK))) |
1839 | return -ENOENT; |
1840 | |
1841 | if (unlikely(!br_stack)) |
1842 | return -ENOENT; |
1843 | |
1844 | if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) |
1845 | return br_stack->nr * br_entry_size; |
1846 | |
1847 | if (!buf || (size % br_entry_size != 0)) |
1848 | return -EINVAL; |
1849 | |
1850 | to_copy = min_t(u32, br_stack->nr * br_entry_size, size); |
1851 | memcpy(buf, br_stack->entries, to_copy); |
1852 | |
1853 | return to_copy; |
1854 | } |
1855 | |
1856 | static const struct bpf_func_proto bpf_read_branch_records_proto = { |
1857 | .func = bpf_read_branch_records, |
1858 | .gpl_only = true, |
1859 | .ret_type = RET_INTEGER, |
1860 | .arg1_type = ARG_PTR_TO_CTX, |
1861 | .arg2_type = ARG_PTR_TO_MEM_OR_NULL, |
1862 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, |
1863 | .arg4_type = ARG_ANYTHING, |
1864 | }; |
1865 | |
1866 | static const struct bpf_func_proto * |
1867 | pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
1868 | { |
1869 | switch (func_id) { |
1870 | case BPF_FUNC_perf_event_output: |
1871 | return &bpf_perf_event_output_proto_tp; |
1872 | case BPF_FUNC_get_stackid: |
1873 | return &bpf_get_stackid_proto_pe; |
1874 | case BPF_FUNC_get_stack: |
1875 | return &bpf_get_stack_proto_pe; |
1876 | case BPF_FUNC_perf_prog_read_value: |
1877 | return &bpf_perf_prog_read_value_proto; |
1878 | case BPF_FUNC_read_branch_records: |
1879 | return &bpf_read_branch_records_proto; |
1880 | case BPF_FUNC_get_attach_cookie: |
1881 | return &bpf_get_attach_cookie_proto_pe; |
1882 | default: |
1883 | return bpf_tracing_func_proto(func_id, prog); |
1884 | } |
1885 | } |
1886 | |
1887 | /* |
1888 | * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp |
1889 | * to avoid potential recursive reuse issue when/if tracepoints are added |
1890 | * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack. |
1891 | * |
1892 | * Since raw tracepoints run despite bpf_prog_active, support concurrent usage |
1893 | * in normal, irq, and nmi context. |
1894 | */ |
1895 | struct bpf_raw_tp_regs { |
1896 | struct pt_regs regs[3]; |
1897 | }; |
1898 | static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs); |
1899 | static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level); |
1900 | static struct pt_regs *get_bpf_raw_tp_regs(void) |
1901 | { |
1902 | struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); |
1903 | int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); |
1904 | |
1905 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { |
1906 | this_cpu_dec(bpf_raw_tp_nest_level); |
1907 | return ERR_PTR(error: -EBUSY); |
1908 | } |
1909 | |
1910 | return &tp_regs->regs[nest_level - 1]; |
1911 | } |
1912 | |
1913 | static void put_bpf_raw_tp_regs(void) |
1914 | { |
1915 | this_cpu_dec(bpf_raw_tp_nest_level); |
1916 | } |
1917 | |
1918 | BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, |
1919 | struct bpf_map *, map, u64, flags, void *, data, u64, size) |
1920 | { |
1921 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
1922 | int ret; |
1923 | |
1924 | if (IS_ERR(ptr: regs)) |
1925 | return PTR_ERR(ptr: regs); |
1926 | |
1927 | perf_fetch_caller_regs(regs); |
1928 | ret = ____bpf_perf_event_output(regs, map, flags, data, size); |
1929 | |
1930 | put_bpf_raw_tp_regs(); |
1931 | return ret; |
1932 | } |
1933 | |
1934 | static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { |
1935 | .func = bpf_perf_event_output_raw_tp, |
1936 | .gpl_only = true, |
1937 | .ret_type = RET_INTEGER, |
1938 | .arg1_type = ARG_PTR_TO_CTX, |
1939 | .arg2_type = ARG_CONST_MAP_PTR, |
1940 | .arg3_type = ARG_ANYTHING, |
1941 | .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
1942 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
1943 | }; |
1944 | |
1945 | extern const struct bpf_func_proto bpf_skb_output_proto; |
1946 | extern const struct bpf_func_proto bpf_xdp_output_proto; |
1947 | extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto; |
1948 | |
1949 | BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, |
1950 | struct bpf_map *, map, u64, flags) |
1951 | { |
1952 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
1953 | int ret; |
1954 | |
1955 | if (IS_ERR(ptr: regs)) |
1956 | return PTR_ERR(ptr: regs); |
1957 | |
1958 | perf_fetch_caller_regs(regs); |
1959 | /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ |
1960 | ret = bpf_get_stackid(r1: (unsigned long) regs, r2: (unsigned long) map, |
1961 | r3: flags, r4: 0, r5: 0); |
1962 | put_bpf_raw_tp_regs(); |
1963 | return ret; |
1964 | } |
1965 | |
1966 | static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { |
1967 | .func = bpf_get_stackid_raw_tp, |
1968 | .gpl_only = true, |
1969 | .ret_type = RET_INTEGER, |
1970 | .arg1_type = ARG_PTR_TO_CTX, |
1971 | .arg2_type = ARG_CONST_MAP_PTR, |
1972 | .arg3_type = ARG_ANYTHING, |
1973 | }; |
1974 | |
1975 | BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, |
1976 | void *, buf, u32, size, u64, flags) |
1977 | { |
1978 | struct pt_regs *regs = get_bpf_raw_tp_regs(); |
1979 | int ret; |
1980 | |
1981 | if (IS_ERR(ptr: regs)) |
1982 | return PTR_ERR(ptr: regs); |
1983 | |
1984 | perf_fetch_caller_regs(regs); |
1985 | ret = bpf_get_stack(r1: (unsigned long) regs, r2: (unsigned long) buf, |
1986 | r3: (unsigned long) size, r4: flags, r5: 0); |
1987 | put_bpf_raw_tp_regs(); |
1988 | return ret; |
1989 | } |
1990 | |
1991 | static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { |
1992 | .func = bpf_get_stack_raw_tp, |
1993 | .gpl_only = true, |
1994 | .ret_type = RET_INTEGER, |
1995 | .arg1_type = ARG_PTR_TO_CTX, |
1996 | .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, |
1997 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, |
1998 | .arg4_type = ARG_ANYTHING, |
1999 | }; |
2000 | |
2001 | static const struct bpf_func_proto * |
2002 | raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
2003 | { |
2004 | switch (func_id) { |
2005 | case BPF_FUNC_perf_event_output: |
2006 | return &bpf_perf_event_output_proto_raw_tp; |
2007 | case BPF_FUNC_get_stackid: |
2008 | return &bpf_get_stackid_proto_raw_tp; |
2009 | case BPF_FUNC_get_stack: |
2010 | return &bpf_get_stack_proto_raw_tp; |
2011 | default: |
2012 | return bpf_tracing_func_proto(func_id, prog); |
2013 | } |
2014 | } |
2015 | |
2016 | const struct bpf_func_proto * |
2017 | tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
2018 | { |
2019 | const struct bpf_func_proto *fn; |
2020 | |
2021 | switch (func_id) { |
2022 | #ifdef CONFIG_NET |
2023 | case BPF_FUNC_skb_output: |
2024 | return &bpf_skb_output_proto; |
2025 | case BPF_FUNC_xdp_output: |
2026 | return &bpf_xdp_output_proto; |
2027 | case BPF_FUNC_skc_to_tcp6_sock: |
2028 | return &bpf_skc_to_tcp6_sock_proto; |
2029 | case BPF_FUNC_skc_to_tcp_sock: |
2030 | return &bpf_skc_to_tcp_sock_proto; |
2031 | case BPF_FUNC_skc_to_tcp_timewait_sock: |
2032 | return &bpf_skc_to_tcp_timewait_sock_proto; |
2033 | case BPF_FUNC_skc_to_tcp_request_sock: |
2034 | return &bpf_skc_to_tcp_request_sock_proto; |
2035 | case BPF_FUNC_skc_to_udp6_sock: |
2036 | return &bpf_skc_to_udp6_sock_proto; |
2037 | case BPF_FUNC_skc_to_unix_sock: |
2038 | return &bpf_skc_to_unix_sock_proto; |
2039 | case BPF_FUNC_skc_to_mptcp_sock: |
2040 | return &bpf_skc_to_mptcp_sock_proto; |
2041 | case BPF_FUNC_sk_storage_get: |
2042 | return &bpf_sk_storage_get_tracing_proto; |
2043 | case BPF_FUNC_sk_storage_delete: |
2044 | return &bpf_sk_storage_delete_tracing_proto; |
2045 | case BPF_FUNC_sock_from_file: |
2046 | return &bpf_sock_from_file_proto; |
2047 | case BPF_FUNC_get_socket_cookie: |
2048 | return &bpf_get_socket_ptr_cookie_proto; |
2049 | case BPF_FUNC_xdp_get_buff_len: |
2050 | return &bpf_xdp_get_buff_len_trace_proto; |
2051 | #endif |
2052 | case BPF_FUNC_seq_printf: |
2053 | return prog->expected_attach_type == BPF_TRACE_ITER ? |
2054 | &bpf_seq_printf_proto : |
2055 | NULL; |
2056 | case BPF_FUNC_seq_write: |
2057 | return prog->expected_attach_type == BPF_TRACE_ITER ? |
2058 | &bpf_seq_write_proto : |
2059 | NULL; |
2060 | case BPF_FUNC_seq_printf_btf: |
2061 | return prog->expected_attach_type == BPF_TRACE_ITER ? |
2062 | &bpf_seq_printf_btf_proto : |
2063 | NULL; |
2064 | case BPF_FUNC_d_path: |
2065 | return &bpf_d_path_proto; |
2066 | case BPF_FUNC_get_func_arg: |
2067 | return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL; |
2068 | case BPF_FUNC_get_func_ret: |
2069 | return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL; |
2070 | case BPF_FUNC_get_func_arg_cnt: |
2071 | return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL; |
2072 | case BPF_FUNC_get_attach_cookie: |
2073 | return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL; |
2074 | default: |
2075 | fn = raw_tp_prog_func_proto(func_id, prog); |
2076 | if (!fn && prog->expected_attach_type == BPF_TRACE_ITER) |
2077 | fn = bpf_iter_get_func_proto(func_id, prog); |
2078 | return fn; |
2079 | } |
2080 | } |
2081 | |
2082 | static bool raw_tp_prog_is_valid_access(int off, int size, |
2083 | enum bpf_access_type type, |
2084 | const struct bpf_prog *prog, |
2085 | struct bpf_insn_access_aux *info) |
2086 | { |
2087 | return bpf_tracing_ctx_access(off, size, type); |
2088 | } |
2089 | |
2090 | static bool tracing_prog_is_valid_access(int off, int size, |
2091 | enum bpf_access_type type, |
2092 | const struct bpf_prog *prog, |
2093 | struct bpf_insn_access_aux *info) |
2094 | { |
2095 | return bpf_tracing_btf_ctx_access(off, size, type, prog, info); |
2096 | } |
2097 | |
2098 | int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog, |
2099 | const union bpf_attr *kattr, |
2100 | union bpf_attr __user *uattr) |
2101 | { |
2102 | return -ENOTSUPP; |
2103 | } |
2104 | |
2105 | const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { |
2106 | .get_func_proto = raw_tp_prog_func_proto, |
2107 | .is_valid_access = raw_tp_prog_is_valid_access, |
2108 | }; |
2109 | |
2110 | const struct bpf_prog_ops raw_tracepoint_prog_ops = { |
2111 | #ifdef CONFIG_NET |
2112 | .test_run = bpf_prog_test_run_raw_tp, |
2113 | #endif |
2114 | }; |
2115 | |
2116 | const struct bpf_verifier_ops tracing_verifier_ops = { |
2117 | .get_func_proto = tracing_prog_func_proto, |
2118 | .is_valid_access = tracing_prog_is_valid_access, |
2119 | }; |
2120 | |
2121 | const struct bpf_prog_ops tracing_prog_ops = { |
2122 | .test_run = bpf_prog_test_run_tracing, |
2123 | }; |
2124 | |
2125 | static bool raw_tp_writable_prog_is_valid_access(int off, int size, |
2126 | enum bpf_access_type type, |
2127 | const struct bpf_prog *prog, |
2128 | struct bpf_insn_access_aux *info) |
2129 | { |
2130 | if (off == 0) { |
2131 | if (size != sizeof(u64) || type != BPF_READ) |
2132 | return false; |
2133 | info->reg_type = PTR_TO_TP_BUFFER; |
2134 | } |
2135 | return raw_tp_prog_is_valid_access(off, size, type, prog, info); |
2136 | } |
2137 | |
2138 | const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = { |
2139 | .get_func_proto = raw_tp_prog_func_proto, |
2140 | .is_valid_access = raw_tp_writable_prog_is_valid_access, |
2141 | }; |
2142 | |
2143 | const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = { |
2144 | }; |
2145 | |
2146 | static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
2147 | const struct bpf_prog *prog, |
2148 | struct bpf_insn_access_aux *info) |
2149 | { |
2150 | const int size_u64 = sizeof(u64); |
2151 | |
2152 | if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) |
2153 | return false; |
2154 | if (type != BPF_READ) |
2155 | return false; |
2156 | if (off % size != 0) { |
2157 | if (sizeof(unsigned long) != 4) |
2158 | return false; |
2159 | if (size != 8) |
2160 | return false; |
2161 | if (off % size != 4) |
2162 | return false; |
2163 | } |
2164 | |
2165 | switch (off) { |
2166 | case bpf_ctx_range(struct bpf_perf_event_data, sample_period): |
2167 | bpf_ctx_record_field_size(aux: info, size: size_u64); |
2168 | if (!bpf_ctx_narrow_access_ok(off, size, size_default: size_u64)) |
2169 | return false; |
2170 | break; |
2171 | case bpf_ctx_range(struct bpf_perf_event_data, addr): |
2172 | bpf_ctx_record_field_size(aux: info, size: size_u64); |
2173 | if (!bpf_ctx_narrow_access_ok(off, size, size_default: size_u64)) |
2174 | return false; |
2175 | break; |
2176 | default: |
2177 | if (size != sizeof(long)) |
2178 | return false; |
2179 | } |
2180 | |
2181 | return true; |
2182 | } |
2183 | |
2184 | static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, |
2185 | const struct bpf_insn *si, |
2186 | struct bpf_insn *insn_buf, |
2187 | struct bpf_prog *prog, u32 *target_size) |
2188 | { |
2189 | struct bpf_insn *insn = insn_buf; |
2190 | |
2191 | switch (si->off) { |
2192 | case offsetof(struct bpf_perf_event_data, sample_period): |
2193 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
2194 | data), si->dst_reg, si->src_reg, |
2195 | offsetof(struct bpf_perf_event_data_kern, data)); |
2196 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, |
2197 | bpf_target_off(struct perf_sample_data, period, 8, |
2198 | target_size)); |
2199 | break; |
2200 | case offsetof(struct bpf_perf_event_data, addr): |
2201 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
2202 | data), si->dst_reg, si->src_reg, |
2203 | offsetof(struct bpf_perf_event_data_kern, data)); |
2204 | *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, |
2205 | bpf_target_off(struct perf_sample_data, addr, 8, |
2206 | target_size)); |
2207 | break; |
2208 | default: |
2209 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, |
2210 | regs), si->dst_reg, si->src_reg, |
2211 | offsetof(struct bpf_perf_event_data_kern, regs)); |
2212 | *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, |
2213 | si->off); |
2214 | break; |
2215 | } |
2216 | |
2217 | return insn - insn_buf; |
2218 | } |
2219 | |
2220 | const struct bpf_verifier_ops perf_event_verifier_ops = { |
2221 | .get_func_proto = pe_prog_func_proto, |
2222 | .is_valid_access = pe_prog_is_valid_access, |
2223 | .convert_ctx_access = pe_prog_convert_ctx_access, |
2224 | }; |
2225 | |
2226 | const struct bpf_prog_ops perf_event_prog_ops = { |
2227 | }; |
2228 | |
2229 | static DEFINE_MUTEX(bpf_event_mutex); |
2230 | |
2231 | #define BPF_TRACE_MAX_PROGS 64 |
2232 | |
2233 | int perf_event_attach_bpf_prog(struct perf_event *event, |
2234 | struct bpf_prog *prog, |
2235 | u64 bpf_cookie) |
2236 | { |
2237 | struct bpf_prog_array *old_array; |
2238 | struct bpf_prog_array *new_array; |
2239 | int ret = -EEXIST; |
2240 | |
2241 | /* |
2242 | * Kprobe override only works if they are on the function entry, |
2243 | * and only if they are on the opt-in list. |
2244 | */ |
2245 | if (prog->kprobe_override && |
2246 | (!trace_kprobe_on_func_entry(call: event->tp_event) || |
2247 | !trace_kprobe_error_injectable(call: event->tp_event))) |
2248 | return -EINVAL; |
2249 | |
2250 | mutex_lock(&bpf_event_mutex); |
2251 | |
2252 | if (event->prog) |
2253 | goto unlock; |
2254 | |
2255 | old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); |
2256 | if (old_array && |
2257 | bpf_prog_array_length(progs: old_array) >= BPF_TRACE_MAX_PROGS) { |
2258 | ret = -E2BIG; |
2259 | goto unlock; |
2260 | } |
2261 | |
2262 | ret = bpf_prog_array_copy(old_array, NULL, include_prog: prog, bpf_cookie, new_array: &new_array); |
2263 | if (ret < 0) |
2264 | goto unlock; |
2265 | |
2266 | /* set the new array to event->tp_event and set event->prog */ |
2267 | event->prog = prog; |
2268 | event->bpf_cookie = bpf_cookie; |
2269 | rcu_assign_pointer(event->tp_event->prog_array, new_array); |
2270 | bpf_prog_array_free_sleepable(progs: old_array); |
2271 | |
2272 | unlock: |
2273 | mutex_unlock(lock: &bpf_event_mutex); |
2274 | return ret; |
2275 | } |
2276 | |
2277 | void perf_event_detach_bpf_prog(struct perf_event *event) |
2278 | { |
2279 | struct bpf_prog_array *old_array; |
2280 | struct bpf_prog_array *new_array; |
2281 | int ret; |
2282 | |
2283 | mutex_lock(&bpf_event_mutex); |
2284 | |
2285 | if (!event->prog) |
2286 | goto unlock; |
2287 | |
2288 | old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); |
2289 | ret = bpf_prog_array_copy(old_array, exclude_prog: event->prog, NULL, bpf_cookie: 0, new_array: &new_array); |
2290 | if (ret == -ENOENT) |
2291 | goto unlock; |
2292 | if (ret < 0) { |
2293 | bpf_prog_array_delete_safe(progs: old_array, old_prog: event->prog); |
2294 | } else { |
2295 | rcu_assign_pointer(event->tp_event->prog_array, new_array); |
2296 | bpf_prog_array_free_sleepable(progs: old_array); |
2297 | } |
2298 | |
2299 | bpf_prog_put(prog: event->prog); |
2300 | event->prog = NULL; |
2301 | |
2302 | unlock: |
2303 | mutex_unlock(lock: &bpf_event_mutex); |
2304 | } |
2305 | |
2306 | int perf_event_query_prog_array(struct perf_event *event, void __user *info) |
2307 | { |
2308 | struct perf_event_query_bpf __user *uquery = info; |
2309 | struct perf_event_query_bpf query = {}; |
2310 | struct bpf_prog_array *progs; |
2311 | u32 *ids, prog_cnt, ids_len; |
2312 | int ret; |
2313 | |
2314 | if (!perfmon_capable()) |
2315 | return -EPERM; |
2316 | if (event->attr.type != PERF_TYPE_TRACEPOINT) |
2317 | return -EINVAL; |
2318 | if (copy_from_user(to: &query, from: uquery, n: sizeof(query))) |
2319 | return -EFAULT; |
2320 | |
2321 | ids_len = query.ids_len; |
2322 | if (ids_len > BPF_TRACE_MAX_PROGS) |
2323 | return -E2BIG; |
2324 | ids = kcalloc(n: ids_len, size: sizeof(u32), GFP_USER | __GFP_NOWARN); |
2325 | if (!ids) |
2326 | return -ENOMEM; |
2327 | /* |
2328 | * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which |
2329 | * is required when user only wants to check for uquery->prog_cnt. |
2330 | * There is no need to check for it since the case is handled |
2331 | * gracefully in bpf_prog_array_copy_info. |
2332 | */ |
2333 | |
2334 | mutex_lock(&bpf_event_mutex); |
2335 | progs = bpf_event_rcu_dereference(event->tp_event->prog_array); |
2336 | ret = bpf_prog_array_copy_info(array: progs, prog_ids: ids, request_cnt: ids_len, prog_cnt: &prog_cnt); |
2337 | mutex_unlock(lock: &bpf_event_mutex); |
2338 | |
2339 | if (copy_to_user(to: &uquery->prog_cnt, from: &prog_cnt, n: sizeof(prog_cnt)) || |
2340 | copy_to_user(to: uquery->ids, from: ids, n: ids_len * sizeof(u32))) |
2341 | ret = -EFAULT; |
2342 | |
2343 | kfree(objp: ids); |
2344 | return ret; |
2345 | } |
2346 | |
2347 | extern struct bpf_raw_event_map __start__bpf_raw_tp[]; |
2348 | extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; |
2349 | |
2350 | struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) |
2351 | { |
2352 | struct bpf_raw_event_map *btp = __start__bpf_raw_tp; |
2353 | |
2354 | for (; btp < __stop__bpf_raw_tp; btp++) { |
2355 | if (!strcmp(btp->tp->name, name)) |
2356 | return btp; |
2357 | } |
2358 | |
2359 | return bpf_get_raw_tracepoint_module(name); |
2360 | } |
2361 | |
2362 | void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) |
2363 | { |
2364 | struct module *mod; |
2365 | |
2366 | preempt_disable(); |
2367 | mod = __module_address(addr: (unsigned long)btp); |
2368 | module_put(module: mod); |
2369 | preempt_enable(); |
2370 | } |
2371 | |
2372 | static __always_inline |
2373 | void __bpf_trace_run(struct bpf_prog *prog, u64 *args) |
2374 | { |
2375 | cant_sleep(); |
2376 | if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { |
2377 | bpf_prog_inc_misses_counter(prog); |
2378 | goto out; |
2379 | } |
2380 | rcu_read_lock(); |
2381 | (void) bpf_prog_run(prog, ctx: args); |
2382 | rcu_read_unlock(); |
2383 | out: |
2384 | this_cpu_dec(*(prog->active)); |
2385 | } |
2386 | |
2387 | #define UNPACK(...) __VA_ARGS__ |
2388 | #define REPEAT_1(FN, DL, X, ...) FN(X) |
2389 | #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) |
2390 | #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) |
2391 | #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) |
2392 | #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) |
2393 | #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) |
2394 | #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) |
2395 | #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) |
2396 | #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) |
2397 | #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) |
2398 | #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) |
2399 | #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) |
2400 | #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) |
2401 | |
2402 | #define SARG(X) u64 arg##X |
2403 | #define COPY(X) args[X] = arg##X |
2404 | |
2405 | #define __DL_COM (,) |
2406 | #define __DL_SEM (;) |
2407 | |
2408 | #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 |
2409 | |
2410 | #define BPF_TRACE_DEFN_x(x) \ |
2411 | void bpf_trace_run##x(struct bpf_prog *prog, \ |
2412 | REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ |
2413 | { \ |
2414 | u64 args[x]; \ |
2415 | REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ |
2416 | __bpf_trace_run(prog, args); \ |
2417 | } \ |
2418 | EXPORT_SYMBOL_GPL(bpf_trace_run##x) |
2419 | BPF_TRACE_DEFN_x(1); |
2420 | BPF_TRACE_DEFN_x(2); |
2421 | BPF_TRACE_DEFN_x(3); |
2422 | BPF_TRACE_DEFN_x(4); |
2423 | BPF_TRACE_DEFN_x(5); |
2424 | BPF_TRACE_DEFN_x(6); |
2425 | BPF_TRACE_DEFN_x(7); |
2426 | BPF_TRACE_DEFN_x(8); |
2427 | BPF_TRACE_DEFN_x(9); |
2428 | BPF_TRACE_DEFN_x(10); |
2429 | BPF_TRACE_DEFN_x(11); |
2430 | BPF_TRACE_DEFN_x(12); |
2431 | |
2432 | static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) |
2433 | { |
2434 | struct tracepoint *tp = btp->tp; |
2435 | |
2436 | /* |
2437 | * check that program doesn't access arguments beyond what's |
2438 | * available in this tracepoint |
2439 | */ |
2440 | if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) |
2441 | return -EINVAL; |
2442 | |
2443 | if (prog->aux->max_tp_access > btp->writable_size) |
2444 | return -EINVAL; |
2445 | |
2446 | return tracepoint_probe_register_may_exist(tp, probe: (void *)btp->bpf_func, |
2447 | data: prog); |
2448 | } |
2449 | |
2450 | int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) |
2451 | { |
2452 | return __bpf_probe_register(btp, prog); |
2453 | } |
2454 | |
2455 | int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) |
2456 | { |
2457 | return tracepoint_probe_unregister(tp: btp->tp, probe: (void *)btp->bpf_func, data: prog); |
2458 | } |
2459 | |
2460 | int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, |
2461 | u32 *fd_type, const char **buf, |
2462 | u64 *probe_offset, u64 *probe_addr, |
2463 | unsigned long *missed) |
2464 | { |
2465 | bool is_tracepoint, is_syscall_tp; |
2466 | struct bpf_prog *prog; |
2467 | int flags, err = 0; |
2468 | |
2469 | prog = event->prog; |
2470 | if (!prog) |
2471 | return -ENOENT; |
2472 | |
2473 | /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */ |
2474 | if (prog->type == BPF_PROG_TYPE_PERF_EVENT) |
2475 | return -EOPNOTSUPP; |
2476 | |
2477 | *prog_id = prog->aux->id; |
2478 | flags = event->tp_event->flags; |
2479 | is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT; |
2480 | is_syscall_tp = is_syscall_trace_event(tp_event: event->tp_event); |
2481 | |
2482 | if (is_tracepoint || is_syscall_tp) { |
2483 | *buf = is_tracepoint ? event->tp_event->tp->name |
2484 | : event->tp_event->name; |
2485 | /* We allow NULL pointer for tracepoint */ |
2486 | if (fd_type) |
2487 | *fd_type = BPF_FD_TYPE_TRACEPOINT; |
2488 | if (probe_offset) |
2489 | *probe_offset = 0x0; |
2490 | if (probe_addr) |
2491 | *probe_addr = 0x0; |
2492 | } else { |
2493 | /* kprobe/uprobe */ |
2494 | err = -EOPNOTSUPP; |
2495 | #ifdef CONFIG_KPROBE_EVENTS |
2496 | if (flags & TRACE_EVENT_FL_KPROBE) |
2497 | err = bpf_get_kprobe_info(event, fd_type, symbol: buf, |
2498 | probe_offset, probe_addr, missed, |
2499 | perf_type_tracepoint: event->attr.type == PERF_TYPE_TRACEPOINT); |
2500 | #endif |
2501 | #ifdef CONFIG_UPROBE_EVENTS |
2502 | if (flags & TRACE_EVENT_FL_UPROBE) |
2503 | err = bpf_get_uprobe_info(event, fd_type, filename: buf, |
2504 | probe_offset, probe_addr, |
2505 | perf_type_tracepoint: event->attr.type == PERF_TYPE_TRACEPOINT); |
2506 | #endif |
2507 | } |
2508 | |
2509 | return err; |
2510 | } |
2511 | |
2512 | static int __init send_signal_irq_work_init(void) |
2513 | { |
2514 | int cpu; |
2515 | struct send_signal_irq_work *work; |
2516 | |
2517 | for_each_possible_cpu(cpu) { |
2518 | work = per_cpu_ptr(&send_signal_work, cpu); |
2519 | init_irq_work(work: &work->irq_work, func: do_bpf_send_signal); |
2520 | } |
2521 | return 0; |
2522 | } |
2523 | |
2524 | subsys_initcall(send_signal_irq_work_init); |
2525 | |
2526 | #ifdef CONFIG_MODULES |
2527 | static int bpf_event_notify(struct notifier_block *nb, unsigned long op, |
2528 | void *module) |
2529 | { |
2530 | struct bpf_trace_module *btm, *tmp; |
2531 | struct module *mod = module; |
2532 | int ret = 0; |
2533 | |
2534 | if (mod->num_bpf_raw_events == 0 || |
2535 | (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING)) |
2536 | goto out; |
2537 | |
2538 | mutex_lock(&bpf_module_mutex); |
2539 | |
2540 | switch (op) { |
2541 | case MODULE_STATE_COMING: |
2542 | btm = kzalloc(size: sizeof(*btm), GFP_KERNEL); |
2543 | if (btm) { |
2544 | btm->module = module; |
2545 | list_add(new: &btm->list, head: &bpf_trace_modules); |
2546 | } else { |
2547 | ret = -ENOMEM; |
2548 | } |
2549 | break; |
2550 | case MODULE_STATE_GOING: |
2551 | list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) { |
2552 | if (btm->module == module) { |
2553 | list_del(entry: &btm->list); |
2554 | kfree(objp: btm); |
2555 | break; |
2556 | } |
2557 | } |
2558 | break; |
2559 | } |
2560 | |
2561 | mutex_unlock(lock: &bpf_module_mutex); |
2562 | |
2563 | out: |
2564 | return notifier_from_errno(err: ret); |
2565 | } |
2566 | |
2567 | static struct notifier_block bpf_module_nb = { |
2568 | .notifier_call = bpf_event_notify, |
2569 | }; |
2570 | |
2571 | static int __init bpf_event_init(void) |
2572 | { |
2573 | register_module_notifier(nb: &bpf_module_nb); |
2574 | return 0; |
2575 | } |
2576 | |
2577 | fs_initcall(bpf_event_init); |
2578 | #endif /* CONFIG_MODULES */ |
2579 | |
2580 | #ifdef CONFIG_FPROBE |
2581 | struct bpf_kprobe_multi_link { |
2582 | struct bpf_link link; |
2583 | struct fprobe fp; |
2584 | unsigned long *addrs; |
2585 | u64 *cookies; |
2586 | u32 cnt; |
2587 | u32 mods_cnt; |
2588 | struct module **mods; |
2589 | u32 flags; |
2590 | }; |
2591 | |
2592 | struct bpf_kprobe_multi_run_ctx { |
2593 | struct bpf_run_ctx run_ctx; |
2594 | struct bpf_kprobe_multi_link *link; |
2595 | unsigned long entry_ip; |
2596 | }; |
2597 | |
2598 | struct user_syms { |
2599 | const char **syms; |
2600 | char *buf; |
2601 | }; |
2602 | |
2603 | static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt) |
2604 | { |
2605 | unsigned long __user usymbol; |
2606 | const char **syms = NULL; |
2607 | char *buf = NULL, *p; |
2608 | int err = -ENOMEM; |
2609 | unsigned int i; |
2610 | |
2611 | syms = kvmalloc_array(n: cnt, size: sizeof(*syms), GFP_KERNEL); |
2612 | if (!syms) |
2613 | goto error; |
2614 | |
2615 | buf = kvmalloc_array(n: cnt, KSYM_NAME_LEN, GFP_KERNEL); |
2616 | if (!buf) |
2617 | goto error; |
2618 | |
2619 | for (p = buf, i = 0; i < cnt; i++) { |
2620 | if (__get_user(usymbol, usyms + i)) { |
2621 | err = -EFAULT; |
2622 | goto error; |
2623 | } |
2624 | err = strncpy_from_user(dst: p, src: (const char __user *) usymbol, KSYM_NAME_LEN); |
2625 | if (err == KSYM_NAME_LEN) |
2626 | err = -E2BIG; |
2627 | if (err < 0) |
2628 | goto error; |
2629 | syms[i] = p; |
2630 | p += err + 1; |
2631 | } |
2632 | |
2633 | us->syms = syms; |
2634 | us->buf = buf; |
2635 | return 0; |
2636 | |
2637 | error: |
2638 | if (err) { |
2639 | kvfree(addr: syms); |
2640 | kvfree(addr: buf); |
2641 | } |
2642 | return err; |
2643 | } |
2644 | |
2645 | static void kprobe_multi_put_modules(struct module **mods, u32 cnt) |
2646 | { |
2647 | u32 i; |
2648 | |
2649 | for (i = 0; i < cnt; i++) |
2650 | module_put(module: mods[i]); |
2651 | } |
2652 | |
2653 | static void free_user_syms(struct user_syms *us) |
2654 | { |
2655 | kvfree(addr: us->syms); |
2656 | kvfree(addr: us->buf); |
2657 | } |
2658 | |
2659 | static void bpf_kprobe_multi_link_release(struct bpf_link *link) |
2660 | { |
2661 | struct bpf_kprobe_multi_link *kmulti_link; |
2662 | |
2663 | kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); |
2664 | unregister_fprobe(fp: &kmulti_link->fp); |
2665 | kprobe_multi_put_modules(mods: kmulti_link->mods, cnt: kmulti_link->mods_cnt); |
2666 | } |
2667 | |
2668 | static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link) |
2669 | { |
2670 | struct bpf_kprobe_multi_link *kmulti_link; |
2671 | |
2672 | kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); |
2673 | kvfree(addr: kmulti_link->addrs); |
2674 | kvfree(addr: kmulti_link->cookies); |
2675 | kfree(objp: kmulti_link->mods); |
2676 | kfree(objp: kmulti_link); |
2677 | } |
2678 | |
2679 | static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link, |
2680 | struct bpf_link_info *info) |
2681 | { |
2682 | u64 __user *ucookies = u64_to_user_ptr(info->kprobe_multi.cookies); |
2683 | u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs); |
2684 | struct bpf_kprobe_multi_link *kmulti_link; |
2685 | u32 ucount = info->kprobe_multi.count; |
2686 | int err = 0, i; |
2687 | |
2688 | if (!uaddrs ^ !ucount) |
2689 | return -EINVAL; |
2690 | if (ucookies && !ucount) |
2691 | return -EINVAL; |
2692 | |
2693 | kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); |
2694 | info->kprobe_multi.count = kmulti_link->cnt; |
2695 | info->kprobe_multi.flags = kmulti_link->flags; |
2696 | info->kprobe_multi.missed = kmulti_link->fp.nmissed; |
2697 | |
2698 | if (!uaddrs) |
2699 | return 0; |
2700 | if (ucount < kmulti_link->cnt) |
2701 | err = -ENOSPC; |
2702 | else |
2703 | ucount = kmulti_link->cnt; |
2704 | |
2705 | if (ucookies) { |
2706 | if (kmulti_link->cookies) { |
2707 | if (copy_to_user(to: ucookies, from: kmulti_link->cookies, n: ucount * sizeof(u64))) |
2708 | return -EFAULT; |
2709 | } else { |
2710 | for (i = 0; i < ucount; i++) { |
2711 | if (put_user(0, ucookies + i)) |
2712 | return -EFAULT; |
2713 | } |
2714 | } |
2715 | } |
2716 | |
2717 | if (kallsyms_show_value(current_cred())) { |
2718 | if (copy_to_user(to: uaddrs, from: kmulti_link->addrs, n: ucount * sizeof(u64))) |
2719 | return -EFAULT; |
2720 | } else { |
2721 | for (i = 0; i < ucount; i++) { |
2722 | if (put_user(0, uaddrs + i)) |
2723 | return -EFAULT; |
2724 | } |
2725 | } |
2726 | return err; |
2727 | } |
2728 | |
2729 | static const struct bpf_link_ops bpf_kprobe_multi_link_lops = { |
2730 | .release = bpf_kprobe_multi_link_release, |
2731 | .dealloc_deferred = bpf_kprobe_multi_link_dealloc, |
2732 | .fill_link_info = bpf_kprobe_multi_link_fill_link_info, |
2733 | }; |
2734 | |
2735 | static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv) |
2736 | { |
2737 | const struct bpf_kprobe_multi_link *link = priv; |
2738 | unsigned long *addr_a = a, *addr_b = b; |
2739 | u64 *cookie_a, *cookie_b; |
2740 | |
2741 | cookie_a = link->cookies + (addr_a - link->addrs); |
2742 | cookie_b = link->cookies + (addr_b - link->addrs); |
2743 | |
2744 | /* swap addr_a/addr_b and cookie_a/cookie_b values */ |
2745 | swap(*addr_a, *addr_b); |
2746 | swap(*cookie_a, *cookie_b); |
2747 | } |
2748 | |
2749 | static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b) |
2750 | { |
2751 | const unsigned long *addr_a = a, *addr_b = b; |
2752 | |
2753 | if (*addr_a == *addr_b) |
2754 | return 0; |
2755 | return *addr_a < *addr_b ? -1 : 1; |
2756 | } |
2757 | |
2758 | static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv) |
2759 | { |
2760 | return bpf_kprobe_multi_addrs_cmp(a, b); |
2761 | } |
2762 | |
2763 | static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) |
2764 | { |
2765 | struct bpf_kprobe_multi_run_ctx *run_ctx; |
2766 | struct bpf_kprobe_multi_link *link; |
2767 | u64 *cookie, entry_ip; |
2768 | unsigned long *addr; |
2769 | |
2770 | if (WARN_ON_ONCE(!ctx)) |
2771 | return 0; |
2772 | run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); |
2773 | link = run_ctx->link; |
2774 | if (!link->cookies) |
2775 | return 0; |
2776 | entry_ip = run_ctx->entry_ip; |
2777 | addr = bsearch(key: &entry_ip, base: link->addrs, num: link->cnt, size: sizeof(entry_ip), |
2778 | cmp: bpf_kprobe_multi_addrs_cmp); |
2779 | if (!addr) |
2780 | return 0; |
2781 | cookie = link->cookies + (addr - link->addrs); |
2782 | return *cookie; |
2783 | } |
2784 | |
2785 | static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) |
2786 | { |
2787 | struct bpf_kprobe_multi_run_ctx *run_ctx; |
2788 | |
2789 | run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); |
2790 | return run_ctx->entry_ip; |
2791 | } |
2792 | |
2793 | static int |
2794 | kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, |
2795 | unsigned long entry_ip, struct pt_regs *regs) |
2796 | { |
2797 | struct bpf_kprobe_multi_run_ctx run_ctx = { |
2798 | .link = link, |
2799 | .entry_ip = entry_ip, |
2800 | }; |
2801 | struct bpf_run_ctx *old_run_ctx; |
2802 | int err; |
2803 | |
2804 | if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { |
2805 | bpf_prog_inc_misses_counter(prog: link->link.prog); |
2806 | err = 0; |
2807 | goto out; |
2808 | } |
2809 | |
2810 | migrate_disable(); |
2811 | rcu_read_lock(); |
2812 | old_run_ctx = bpf_set_run_ctx(new_ctx: &run_ctx.run_ctx); |
2813 | err = bpf_prog_run(prog: link->link.prog, ctx: regs); |
2814 | bpf_reset_run_ctx(old_ctx: old_run_ctx); |
2815 | rcu_read_unlock(); |
2816 | migrate_enable(); |
2817 | |
2818 | out: |
2819 | __this_cpu_dec(bpf_prog_active); |
2820 | return err; |
2821 | } |
2822 | |
2823 | static int |
2824 | kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip, |
2825 | unsigned long ret_ip, struct pt_regs *regs, |
2826 | void *data) |
2827 | { |
2828 | struct bpf_kprobe_multi_link *link; |
2829 | |
2830 | link = container_of(fp, struct bpf_kprobe_multi_link, fp); |
2831 | kprobe_multi_link_prog_run(link, entry_ip: get_entry_ip(fentry_ip), regs); |
2832 | return 0; |
2833 | } |
2834 | |
2835 | static void |
2836 | kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip, |
2837 | unsigned long ret_ip, struct pt_regs *regs, |
2838 | void *data) |
2839 | { |
2840 | struct bpf_kprobe_multi_link *link; |
2841 | |
2842 | link = container_of(fp, struct bpf_kprobe_multi_link, fp); |
2843 | kprobe_multi_link_prog_run(link, entry_ip: get_entry_ip(fentry_ip), regs); |
2844 | } |
2845 | |
2846 | static int symbols_cmp_r(const void *a, const void *b, const void *priv) |
2847 | { |
2848 | const char **str_a = (const char **) a; |
2849 | const char **str_b = (const char **) b; |
2850 | |
2851 | return strcmp(*str_a, *str_b); |
2852 | } |
2853 | |
2854 | struct multi_symbols_sort { |
2855 | const char **funcs; |
2856 | u64 *cookies; |
2857 | }; |
2858 | |
2859 | static void symbols_swap_r(void *a, void *b, int size, const void *priv) |
2860 | { |
2861 | const struct multi_symbols_sort *data = priv; |
2862 | const char **name_a = a, **name_b = b; |
2863 | |
2864 | swap(*name_a, *name_b); |
2865 | |
2866 | /* If defined, swap also related cookies. */ |
2867 | if (data->cookies) { |
2868 | u64 *cookie_a, *cookie_b; |
2869 | |
2870 | cookie_a = data->cookies + (name_a - data->funcs); |
2871 | cookie_b = data->cookies + (name_b - data->funcs); |
2872 | swap(*cookie_a, *cookie_b); |
2873 | } |
2874 | } |
2875 | |
2876 | struct modules_array { |
2877 | struct module **mods; |
2878 | int mods_cnt; |
2879 | int mods_cap; |
2880 | }; |
2881 | |
2882 | static int add_module(struct modules_array *arr, struct module *mod) |
2883 | { |
2884 | struct module **mods; |
2885 | |
2886 | if (arr->mods_cnt == arr->mods_cap) { |
2887 | arr->mods_cap = max(16, arr->mods_cap * 3 / 2); |
2888 | mods = krealloc_array(p: arr->mods, new_n: arr->mods_cap, new_size: sizeof(*mods), GFP_KERNEL); |
2889 | if (!mods) |
2890 | return -ENOMEM; |
2891 | arr->mods = mods; |
2892 | } |
2893 | |
2894 | arr->mods[arr->mods_cnt] = mod; |
2895 | arr->mods_cnt++; |
2896 | return 0; |
2897 | } |
2898 | |
2899 | static bool has_module(struct modules_array *arr, struct module *mod) |
2900 | { |
2901 | int i; |
2902 | |
2903 | for (i = arr->mods_cnt - 1; i >= 0; i--) { |
2904 | if (arr->mods[i] == mod) |
2905 | return true; |
2906 | } |
2907 | return false; |
2908 | } |
2909 | |
2910 | static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt) |
2911 | { |
2912 | struct modules_array arr = {}; |
2913 | u32 i, err = 0; |
2914 | |
2915 | for (i = 0; i < addrs_cnt; i++) { |
2916 | struct module *mod; |
2917 | |
2918 | preempt_disable(); |
2919 | mod = __module_address(addr: addrs[i]); |
2920 | /* Either no module or we it's already stored */ |
2921 | if (!mod || has_module(arr: &arr, mod)) { |
2922 | preempt_enable(); |
2923 | continue; |
2924 | } |
2925 | if (!try_module_get(module: mod)) |
2926 | err = -EINVAL; |
2927 | preempt_enable(); |
2928 | if (err) |
2929 | break; |
2930 | err = add_module(arr: &arr, mod); |
2931 | if (err) { |
2932 | module_put(module: mod); |
2933 | break; |
2934 | } |
2935 | } |
2936 | |
2937 | /* We return either err < 0 in case of error, ... */ |
2938 | if (err) { |
2939 | kprobe_multi_put_modules(mods: arr.mods, cnt: arr.mods_cnt); |
2940 | kfree(objp: arr.mods); |
2941 | return err; |
2942 | } |
2943 | |
2944 | /* or number of modules found if everything is ok. */ |
2945 | *mods = arr.mods; |
2946 | return arr.mods_cnt; |
2947 | } |
2948 | |
2949 | static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt) |
2950 | { |
2951 | u32 i; |
2952 | |
2953 | for (i = 0; i < cnt; i++) { |
2954 | if (!within_error_injection_list(addr: addrs[i])) |
2955 | return -EINVAL; |
2956 | } |
2957 | return 0; |
2958 | } |
2959 | |
2960 | int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) |
2961 | { |
2962 | struct bpf_kprobe_multi_link *link = NULL; |
2963 | struct bpf_link_primer link_primer; |
2964 | void __user *ucookies; |
2965 | unsigned long *addrs; |
2966 | u32 flags, cnt, size; |
2967 | void __user *uaddrs; |
2968 | u64 *cookies = NULL; |
2969 | void __user *usyms; |
2970 | int err; |
2971 | |
2972 | /* no support for 32bit archs yet */ |
2973 | if (sizeof(u64) != sizeof(void *)) |
2974 | return -EOPNOTSUPP; |
2975 | |
2976 | if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI) |
2977 | return -EINVAL; |
2978 | |
2979 | flags = attr->link_create.kprobe_multi.flags; |
2980 | if (flags & ~BPF_F_KPROBE_MULTI_RETURN) |
2981 | return -EINVAL; |
2982 | |
2983 | uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs); |
2984 | usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms); |
2985 | if (!!uaddrs == !!usyms) |
2986 | return -EINVAL; |
2987 | |
2988 | cnt = attr->link_create.kprobe_multi.cnt; |
2989 | if (!cnt) |
2990 | return -EINVAL; |
2991 | if (cnt > MAX_KPROBE_MULTI_CNT) |
2992 | return -E2BIG; |
2993 | |
2994 | size = cnt * sizeof(*addrs); |
2995 | addrs = kvmalloc_array(n: cnt, size: sizeof(*addrs), GFP_KERNEL); |
2996 | if (!addrs) |
2997 | return -ENOMEM; |
2998 | |
2999 | ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); |
3000 | if (ucookies) { |
3001 | cookies = kvmalloc_array(n: cnt, size: sizeof(*addrs), GFP_KERNEL); |
3002 | if (!cookies) { |
3003 | err = -ENOMEM; |
3004 | goto error; |
3005 | } |
3006 | if (copy_from_user(to: cookies, from: ucookies, n: size)) { |
3007 | err = -EFAULT; |
3008 | goto error; |
3009 | } |
3010 | } |
3011 | |
3012 | if (uaddrs) { |
3013 | if (copy_from_user(to: addrs, from: uaddrs, n: size)) { |
3014 | err = -EFAULT; |
3015 | goto error; |
3016 | } |
3017 | } else { |
3018 | struct multi_symbols_sort data = { |
3019 | .cookies = cookies, |
3020 | }; |
3021 | struct user_syms us; |
3022 | |
3023 | err = copy_user_syms(us: &us, usyms, cnt); |
3024 | if (err) |
3025 | goto error; |
3026 | |
3027 | if (cookies) |
3028 | data.funcs = us.syms; |
3029 | |
3030 | sort_r(base: us.syms, num: cnt, size: sizeof(*us.syms), cmp_func: symbols_cmp_r, |
3031 | swap_func: symbols_swap_r, priv: &data); |
3032 | |
3033 | err = ftrace_lookup_symbols(sorted_syms: us.syms, cnt, addrs); |
3034 | free_user_syms(us: &us); |
3035 | if (err) |
3036 | goto error; |
3037 | } |
3038 | |
3039 | if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) { |
3040 | err = -EINVAL; |
3041 | goto error; |
3042 | } |
3043 | |
3044 | link = kzalloc(size: sizeof(*link), GFP_KERNEL); |
3045 | if (!link) { |
3046 | err = -ENOMEM; |
3047 | goto error; |
3048 | } |
3049 | |
3050 | bpf_link_init(link: &link->link, type: BPF_LINK_TYPE_KPROBE_MULTI, |
3051 | ops: &bpf_kprobe_multi_link_lops, prog); |
3052 | |
3053 | err = bpf_link_prime(link: &link->link, primer: &link_primer); |
3054 | if (err) |
3055 | goto error; |
3056 | |
3057 | if (flags & BPF_F_KPROBE_MULTI_RETURN) |
3058 | link->fp.exit_handler = kprobe_multi_link_exit_handler; |
3059 | else |
3060 | link->fp.entry_handler = kprobe_multi_link_handler; |
3061 | |
3062 | link->addrs = addrs; |
3063 | link->cookies = cookies; |
3064 | link->cnt = cnt; |
3065 | link->flags = flags; |
3066 | |
3067 | if (cookies) { |
3068 | /* |
3069 | * Sorting addresses will trigger sorting cookies as well |
3070 | * (check bpf_kprobe_multi_cookie_swap). This way we can |
3071 | * find cookie based on the address in bpf_get_attach_cookie |
3072 | * helper. |
3073 | */ |
3074 | sort_r(base: addrs, num: cnt, size: sizeof(*addrs), |
3075 | cmp_func: bpf_kprobe_multi_cookie_cmp, |
3076 | swap_func: bpf_kprobe_multi_cookie_swap, |
3077 | priv: link); |
3078 | } |
3079 | |
3080 | err = get_modules_for_addrs(mods: &link->mods, addrs, addrs_cnt: cnt); |
3081 | if (err < 0) { |
3082 | bpf_link_cleanup(primer: &link_primer); |
3083 | return err; |
3084 | } |
3085 | link->mods_cnt = err; |
3086 | |
3087 | err = register_fprobe_ips(fp: &link->fp, addrs, num: cnt); |
3088 | if (err) { |
3089 | kprobe_multi_put_modules(mods: link->mods, cnt: link->mods_cnt); |
3090 | bpf_link_cleanup(primer: &link_primer); |
3091 | return err; |
3092 | } |
3093 | |
3094 | return bpf_link_settle(primer: &link_primer); |
3095 | |
3096 | error: |
3097 | kfree(objp: link); |
3098 | kvfree(addr: addrs); |
3099 | kvfree(addr: cookies); |
3100 | return err; |
3101 | } |
3102 | #else /* !CONFIG_FPROBE */ |
3103 | int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) |
3104 | { |
3105 | return -EOPNOTSUPP; |
3106 | } |
3107 | static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) |
3108 | { |
3109 | return 0; |
3110 | } |
3111 | static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) |
3112 | { |
3113 | return 0; |
3114 | } |
3115 | #endif |
3116 | |
3117 | #ifdef CONFIG_UPROBES |
3118 | struct bpf_uprobe_multi_link; |
3119 | |
3120 | struct bpf_uprobe { |
3121 | struct bpf_uprobe_multi_link *link; |
3122 | loff_t offset; |
3123 | unsigned long ref_ctr_offset; |
3124 | u64 cookie; |
3125 | struct uprobe_consumer consumer; |
3126 | }; |
3127 | |
3128 | struct bpf_uprobe_multi_link { |
3129 | struct path path; |
3130 | struct bpf_link link; |
3131 | u32 cnt; |
3132 | u32 flags; |
3133 | struct bpf_uprobe *uprobes; |
3134 | struct task_struct *task; |
3135 | }; |
3136 | |
3137 | struct bpf_uprobe_multi_run_ctx { |
3138 | struct bpf_run_ctx run_ctx; |
3139 | unsigned long entry_ip; |
3140 | struct bpf_uprobe *uprobe; |
3141 | }; |
3142 | |
3143 | static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes, |
3144 | u32 cnt) |
3145 | { |
3146 | u32 i; |
3147 | |
3148 | for (i = 0; i < cnt; i++) { |
3149 | uprobe_unregister(inode: d_real_inode(dentry: path->dentry), offset: uprobes[i].offset, |
3150 | uc: &uprobes[i].consumer); |
3151 | } |
3152 | } |
3153 | |
3154 | static void bpf_uprobe_multi_link_release(struct bpf_link *link) |
3155 | { |
3156 | struct bpf_uprobe_multi_link *umulti_link; |
3157 | |
3158 | umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); |
3159 | bpf_uprobe_unregister(path: &umulti_link->path, uprobes: umulti_link->uprobes, cnt: umulti_link->cnt); |
3160 | if (umulti_link->task) |
3161 | put_task_struct(t: umulti_link->task); |
3162 | path_put(&umulti_link->path); |
3163 | } |
3164 | |
3165 | static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link) |
3166 | { |
3167 | struct bpf_uprobe_multi_link *umulti_link; |
3168 | |
3169 | umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); |
3170 | kvfree(addr: umulti_link->uprobes); |
3171 | kfree(objp: umulti_link); |
3172 | } |
3173 | |
3174 | static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link, |
3175 | struct bpf_link_info *info) |
3176 | { |
3177 | u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets); |
3178 | u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies); |
3179 | u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets); |
3180 | u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path); |
3181 | u32 upath_size = info->uprobe_multi.path_size; |
3182 | struct bpf_uprobe_multi_link *umulti_link; |
3183 | u32 ucount = info->uprobe_multi.count; |
3184 | int err = 0, i; |
3185 | long left; |
3186 | |
3187 | if (!upath ^ !upath_size) |
3188 | return -EINVAL; |
3189 | |
3190 | if ((uoffsets || uref_ctr_offsets || ucookies) && !ucount) |
3191 | return -EINVAL; |
3192 | |
3193 | umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); |
3194 | info->uprobe_multi.count = umulti_link->cnt; |
3195 | info->uprobe_multi.flags = umulti_link->flags; |
3196 | info->uprobe_multi.pid = umulti_link->task ? |
3197 | task_pid_nr_ns(tsk: umulti_link->task, ns: task_active_pid_ns(current)) : 0; |
3198 | |
3199 | if (upath) { |
3200 | char *p, *buf; |
3201 | |
3202 | upath_size = min_t(u32, upath_size, PATH_MAX); |
3203 | |
3204 | buf = kmalloc(size: upath_size, GFP_KERNEL); |
3205 | if (!buf) |
3206 | return -ENOMEM; |
3207 | p = d_path(&umulti_link->path, buf, upath_size); |
3208 | if (IS_ERR(ptr: p)) { |
3209 | kfree(objp: buf); |
3210 | return PTR_ERR(ptr: p); |
3211 | } |
3212 | upath_size = buf + upath_size - p; |
3213 | left = copy_to_user(to: upath, from: p, n: upath_size); |
3214 | kfree(objp: buf); |
3215 | if (left) |
3216 | return -EFAULT; |
3217 | info->uprobe_multi.path_size = upath_size; |
3218 | } |
3219 | |
3220 | if (!uoffsets && !ucookies && !uref_ctr_offsets) |
3221 | return 0; |
3222 | |
3223 | if (ucount < umulti_link->cnt) |
3224 | err = -ENOSPC; |
3225 | else |
3226 | ucount = umulti_link->cnt; |
3227 | |
3228 | for (i = 0; i < ucount; i++) { |
3229 | if (uoffsets && |
3230 | put_user(umulti_link->uprobes[i].offset, uoffsets + i)) |
3231 | return -EFAULT; |
3232 | if (uref_ctr_offsets && |
3233 | put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) |
3234 | return -EFAULT; |
3235 | if (ucookies && |
3236 | put_user(umulti_link->uprobes[i].cookie, ucookies + i)) |
3237 | return -EFAULT; |
3238 | } |
3239 | |
3240 | return err; |
3241 | } |
3242 | |
3243 | static const struct bpf_link_ops bpf_uprobe_multi_link_lops = { |
3244 | .release = bpf_uprobe_multi_link_release, |
3245 | .dealloc_deferred = bpf_uprobe_multi_link_dealloc, |
3246 | .fill_link_info = bpf_uprobe_multi_link_fill_link_info, |
3247 | }; |
3248 | |
3249 | static int uprobe_prog_run(struct bpf_uprobe *uprobe, |
3250 | unsigned long entry_ip, |
3251 | struct pt_regs *regs) |
3252 | { |
3253 | struct bpf_uprobe_multi_link *link = uprobe->link; |
3254 | struct bpf_uprobe_multi_run_ctx run_ctx = { |
3255 | .entry_ip = entry_ip, |
3256 | .uprobe = uprobe, |
3257 | }; |
3258 | struct bpf_prog *prog = link->link.prog; |
3259 | bool sleepable = prog->sleepable; |
3260 | struct bpf_run_ctx *old_run_ctx; |
3261 | int err = 0; |
3262 | |
3263 | if (link->task && current != link->task) |
3264 | return 0; |
3265 | |
3266 | if (sleepable) |
3267 | rcu_read_lock_trace(); |
3268 | else |
3269 | rcu_read_lock(); |
3270 | |
3271 | migrate_disable(); |
3272 | |
3273 | old_run_ctx = bpf_set_run_ctx(new_ctx: &run_ctx.run_ctx); |
3274 | err = bpf_prog_run(prog: link->link.prog, ctx: regs); |
3275 | bpf_reset_run_ctx(old_ctx: old_run_ctx); |
3276 | |
3277 | migrate_enable(); |
3278 | |
3279 | if (sleepable) |
3280 | rcu_read_unlock_trace(); |
3281 | else |
3282 | rcu_read_unlock(); |
3283 | return err; |
3284 | } |
3285 | |
3286 | static bool |
3287 | uprobe_multi_link_filter(struct uprobe_consumer *con, enum uprobe_filter_ctx ctx, |
3288 | struct mm_struct *mm) |
3289 | { |
3290 | struct bpf_uprobe *uprobe; |
3291 | |
3292 | uprobe = container_of(con, struct bpf_uprobe, consumer); |
3293 | return uprobe->link->task->mm == mm; |
3294 | } |
3295 | |
3296 | static int |
3297 | uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs) |
3298 | { |
3299 | struct bpf_uprobe *uprobe; |
3300 | |
3301 | uprobe = container_of(con, struct bpf_uprobe, consumer); |
3302 | return uprobe_prog_run(uprobe, entry_ip: instruction_pointer(regs), regs); |
3303 | } |
3304 | |
3305 | static int |
3306 | uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs) |
3307 | { |
3308 | struct bpf_uprobe *uprobe; |
3309 | |
3310 | uprobe = container_of(con, struct bpf_uprobe, consumer); |
3311 | return uprobe_prog_run(uprobe, entry_ip: func, regs); |
3312 | } |
3313 | |
3314 | static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) |
3315 | { |
3316 | struct bpf_uprobe_multi_run_ctx *run_ctx; |
3317 | |
3318 | run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx); |
3319 | return run_ctx->entry_ip; |
3320 | } |
3321 | |
3322 | static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) |
3323 | { |
3324 | struct bpf_uprobe_multi_run_ctx *run_ctx; |
3325 | |
3326 | run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx); |
3327 | return run_ctx->uprobe->cookie; |
3328 | } |
3329 | |
3330 | int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) |
3331 | { |
3332 | struct bpf_uprobe_multi_link *link = NULL; |
3333 | unsigned long __user *uref_ctr_offsets; |
3334 | struct bpf_link_primer link_primer; |
3335 | struct bpf_uprobe *uprobes = NULL; |
3336 | struct task_struct *task = NULL; |
3337 | unsigned long __user *uoffsets; |
3338 | u64 __user *ucookies; |
3339 | void __user *upath; |
3340 | u32 flags, cnt, i; |
3341 | struct path path; |
3342 | char *name; |
3343 | pid_t pid; |
3344 | int err; |
3345 | |
3346 | /* no support for 32bit archs yet */ |
3347 | if (sizeof(u64) != sizeof(void *)) |
3348 | return -EOPNOTSUPP; |
3349 | |
3350 | if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI) |
3351 | return -EINVAL; |
3352 | |
3353 | flags = attr->link_create.uprobe_multi.flags; |
3354 | if (flags & ~BPF_F_UPROBE_MULTI_RETURN) |
3355 | return -EINVAL; |
3356 | |
3357 | /* |
3358 | * path, offsets and cnt are mandatory, |
3359 | * ref_ctr_offsets and cookies are optional |
3360 | */ |
3361 | upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path); |
3362 | uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets); |
3363 | cnt = attr->link_create.uprobe_multi.cnt; |
3364 | |
3365 | if (!upath || !uoffsets || !cnt) |
3366 | return -EINVAL; |
3367 | if (cnt > MAX_UPROBE_MULTI_CNT) |
3368 | return -E2BIG; |
3369 | |
3370 | uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets); |
3371 | ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies); |
3372 | |
3373 | name = strndup_user(upath, PATH_MAX); |
3374 | if (IS_ERR(ptr: name)) { |
3375 | err = PTR_ERR(ptr: name); |
3376 | return err; |
3377 | } |
3378 | |
3379 | err = kern_path(name, LOOKUP_FOLLOW, &path); |
3380 | kfree(objp: name); |
3381 | if (err) |
3382 | return err; |
3383 | |
3384 | if (!d_is_reg(dentry: path.dentry)) { |
3385 | err = -EBADF; |
3386 | goto error_path_put; |
3387 | } |
3388 | |
3389 | pid = attr->link_create.uprobe_multi.pid; |
3390 | if (pid) { |
3391 | rcu_read_lock(); |
3392 | task = get_pid_task(pid: find_vpid(nr: pid), PIDTYPE_PID); |
3393 | rcu_read_unlock(); |
3394 | if (!task) { |
3395 | err = -ESRCH; |
3396 | goto error_path_put; |
3397 | } |
3398 | } |
3399 | |
3400 | err = -ENOMEM; |
3401 | |
3402 | link = kzalloc(size: sizeof(*link), GFP_KERNEL); |
3403 | uprobes = kvcalloc(n: cnt, size: sizeof(*uprobes), GFP_KERNEL); |
3404 | |
3405 | if (!uprobes || !link) |
3406 | goto error_free; |
3407 | |
3408 | for (i = 0; i < cnt; i++) { |
3409 | if (__get_user(uprobes[i].offset, uoffsets + i)) { |
3410 | err = -EFAULT; |
3411 | goto error_free; |
3412 | } |
3413 | if (uprobes[i].offset < 0) { |
3414 | err = -EINVAL; |
3415 | goto error_free; |
3416 | } |
3417 | if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) { |
3418 | err = -EFAULT; |
3419 | goto error_free; |
3420 | } |
3421 | if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) { |
3422 | err = -EFAULT; |
3423 | goto error_free; |
3424 | } |
3425 | |
3426 | uprobes[i].link = link; |
3427 | |
3428 | if (flags & BPF_F_UPROBE_MULTI_RETURN) |
3429 | uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler; |
3430 | else |
3431 | uprobes[i].consumer.handler = uprobe_multi_link_handler; |
3432 | |
3433 | if (pid) |
3434 | uprobes[i].consumer.filter = uprobe_multi_link_filter; |
3435 | } |
3436 | |
3437 | link->cnt = cnt; |
3438 | link->uprobes = uprobes; |
3439 | link->path = path; |
3440 | link->task = task; |
3441 | link->flags = flags; |
3442 | |
3443 | bpf_link_init(link: &link->link, type: BPF_LINK_TYPE_UPROBE_MULTI, |
3444 | ops: &bpf_uprobe_multi_link_lops, prog); |
3445 | |
3446 | for (i = 0; i < cnt; i++) { |
3447 | err = uprobe_register_refctr(inode: d_real_inode(dentry: link->path.dentry), |
3448 | offset: uprobes[i].offset, |
3449 | ref_ctr_offset: uprobes[i].ref_ctr_offset, |
3450 | uc: &uprobes[i].consumer); |
3451 | if (err) { |
3452 | bpf_uprobe_unregister(path: &path, uprobes, cnt: i); |
3453 | goto error_free; |
3454 | } |
3455 | } |
3456 | |
3457 | err = bpf_link_prime(link: &link->link, primer: &link_primer); |
3458 | if (err) |
3459 | goto error_free; |
3460 | |
3461 | return bpf_link_settle(primer: &link_primer); |
3462 | |
3463 | error_free: |
3464 | kvfree(addr: uprobes); |
3465 | kfree(objp: link); |
3466 | if (task) |
3467 | put_task_struct(t: task); |
3468 | error_path_put: |
3469 | path_put(&path); |
3470 | return err; |
3471 | } |
3472 | #else /* !CONFIG_UPROBES */ |
3473 | int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) |
3474 | { |
3475 | return -EOPNOTSUPP; |
3476 | } |
3477 | static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) |
3478 | { |
3479 | return 0; |
3480 | } |
3481 | static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) |
3482 | { |
3483 | return 0; |
3484 | } |
3485 | #endif /* CONFIG_UPROBES */ |
3486 | |