1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Fprobe-based tracing events |
4 | * Copyright (C) 2022 Google LLC. |
5 | */ |
6 | #define pr_fmt(fmt) "trace_fprobe: " fmt |
7 | #include <asm/ptrace.h> |
8 | |
9 | #include <linux/fprobe.h> |
10 | #include <linux/module.h> |
11 | #include <linux/rculist.h> |
12 | #include <linux/security.h> |
13 | #include <linux/tracepoint.h> |
14 | #include <linux/uaccess.h> |
15 | |
16 | #include "trace_dynevent.h" |
17 | #include "trace_probe.h" |
18 | #include "trace_probe_kernel.h" |
19 | #include "trace_probe_tmpl.h" |
20 | |
21 | #define FPROBE_EVENT_SYSTEM "fprobes" |
22 | #define TRACEPOINT_EVENT_SYSTEM "tracepoints" |
23 | #define RETHOOK_MAXACTIVE_MAX 4096 |
24 | #define TRACEPOINT_STUB ERR_PTR(-ENOENT) |
25 | |
26 | static int trace_fprobe_create(const char *raw_command); |
27 | static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev); |
28 | static int trace_fprobe_release(struct dyn_event *ev); |
29 | static bool trace_fprobe_is_busy(struct dyn_event *ev); |
30 | static bool trace_fprobe_match(const char *system, const char *event, |
31 | int argc, const char **argv, struct dyn_event *ev); |
32 | |
33 | static struct dyn_event_operations trace_fprobe_ops = { |
34 | .create = trace_fprobe_create, |
35 | .show = trace_fprobe_show, |
36 | .is_busy = trace_fprobe_is_busy, |
37 | .free = trace_fprobe_release, |
38 | .match = trace_fprobe_match, |
39 | }; |
40 | |
41 | /* |
42 | * Fprobe event core functions |
43 | */ |
44 | struct trace_fprobe { |
45 | struct dyn_event devent; |
46 | struct fprobe fp; |
47 | const char *symbol; |
48 | struct tracepoint *tpoint; |
49 | struct module *mod; |
50 | struct trace_probe tp; |
51 | }; |
52 | |
53 | static bool is_trace_fprobe(struct dyn_event *ev) |
54 | { |
55 | return ev->ops == &trace_fprobe_ops; |
56 | } |
57 | |
58 | static struct trace_fprobe *to_trace_fprobe(struct dyn_event *ev) |
59 | { |
60 | return container_of(ev, struct trace_fprobe, devent); |
61 | } |
62 | |
63 | /** |
64 | * for_each_trace_fprobe - iterate over the trace_fprobe list |
65 | * @pos: the struct trace_fprobe * for each entry |
66 | * @dpos: the struct dyn_event * to use as a loop cursor |
67 | */ |
68 | #define for_each_trace_fprobe(pos, dpos) \ |
69 | for_each_dyn_event(dpos) \ |
70 | if (is_trace_fprobe(dpos) && (pos = to_trace_fprobe(dpos))) |
71 | |
72 | static bool trace_fprobe_is_return(struct trace_fprobe *tf) |
73 | { |
74 | return tf->fp.exit_handler != NULL; |
75 | } |
76 | |
77 | static bool trace_fprobe_is_tracepoint(struct trace_fprobe *tf) |
78 | { |
79 | return tf->tpoint != NULL; |
80 | } |
81 | |
82 | static const char *trace_fprobe_symbol(struct trace_fprobe *tf) |
83 | { |
84 | return tf->symbol ? tf->symbol : "unknown" ; |
85 | } |
86 | |
87 | static bool trace_fprobe_is_busy(struct dyn_event *ev) |
88 | { |
89 | struct trace_fprobe *tf = to_trace_fprobe(ev); |
90 | |
91 | return trace_probe_is_enabled(tp: &tf->tp); |
92 | } |
93 | |
94 | static bool trace_fprobe_match_command_head(struct trace_fprobe *tf, |
95 | int argc, const char **argv) |
96 | { |
97 | char buf[MAX_ARGSTR_LEN + 1]; |
98 | |
99 | if (!argc) |
100 | return true; |
101 | |
102 | snprintf(buf, size: sizeof(buf), fmt: "%s" , trace_fprobe_symbol(tf)); |
103 | if (strcmp(buf, argv[0])) |
104 | return false; |
105 | argc--; argv++; |
106 | |
107 | return trace_probe_match_command_args(tp: &tf->tp, argc, argv); |
108 | } |
109 | |
110 | static bool trace_fprobe_match(const char *system, const char *event, |
111 | int argc, const char **argv, struct dyn_event *ev) |
112 | { |
113 | struct trace_fprobe *tf = to_trace_fprobe(ev); |
114 | |
115 | if (event[0] != '\0' && strcmp(trace_probe_name(tp: &tf->tp), event)) |
116 | return false; |
117 | |
118 | if (system && strcmp(trace_probe_group_name(tp: &tf->tp), system)) |
119 | return false; |
120 | |
121 | return trace_fprobe_match_command_head(tf, argc, argv); |
122 | } |
123 | |
124 | static bool trace_fprobe_is_registered(struct trace_fprobe *tf) |
125 | { |
126 | return fprobe_is_registered(fp: &tf->fp); |
127 | } |
128 | |
129 | /* |
130 | * Note that we don't verify the fetch_insn code, since it does not come |
131 | * from user space. |
132 | */ |
133 | static int |
134 | process_fetch_insn(struct fetch_insn *code, void *rec, void *edata, |
135 | void *dest, void *base) |
136 | { |
137 | struct ftrace_regs *fregs = rec; |
138 | unsigned long val; |
139 | int ret; |
140 | |
141 | retry: |
142 | /* 1st stage: get value from context */ |
143 | switch (code->op) { |
144 | case FETCH_OP_STACK: |
145 | val = ftrace_regs_get_kernel_stack_nth(fregs, nth: code->param); |
146 | break; |
147 | case FETCH_OP_STACKP: |
148 | val = ftrace_regs_get_stack_pointer(fregs); |
149 | break; |
150 | case FETCH_OP_RETVAL: |
151 | val = ftrace_regs_get_return_value(fregs); |
152 | break; |
153 | #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API |
154 | case FETCH_OP_ARG: |
155 | val = ftrace_regs_get_argument(fregs, code->param); |
156 | break; |
157 | case FETCH_OP_EDATA: |
158 | val = *(unsigned long *)((unsigned long)edata + code->offset); |
159 | break; |
160 | #endif |
161 | case FETCH_NOP_SYMBOL: /* Ignore a place holder */ |
162 | code++; |
163 | goto retry; |
164 | default: |
165 | ret = process_common_fetch_insn(code, val: &val); |
166 | if (ret < 0) |
167 | return ret; |
168 | } |
169 | code++; |
170 | |
171 | return process_fetch_insn_bottom(code, val, dest, base); |
172 | } |
173 | NOKPROBE_SYMBOL(process_fetch_insn) |
174 | |
175 | /* function entry handler */ |
176 | static nokprobe_inline void |
177 | __fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip, |
178 | struct ftrace_regs *fregs, |
179 | struct trace_event_file *trace_file) |
180 | { |
181 | struct fentry_trace_entry_head *entry; |
182 | struct trace_event_call *call = trace_probe_event_call(tp: &tf->tp); |
183 | struct trace_event_buffer fbuffer; |
184 | int dsize; |
185 | |
186 | if (WARN_ON_ONCE(call != trace_file->event_call)) |
187 | return; |
188 | |
189 | if (trace_trigger_soft_disabled(file: trace_file)) |
190 | return; |
191 | |
192 | dsize = __get_data_size(tp: &tf->tp, regs: fregs, NULL); |
193 | |
194 | entry = trace_event_buffer_reserve(fbuffer: &fbuffer, trace_file, |
195 | len: sizeof(*entry) + tf->tp.size + dsize); |
196 | if (!entry) |
197 | return; |
198 | |
199 | fbuffer.regs = ftrace_get_regs(fregs); |
200 | entry = fbuffer.entry = ring_buffer_event_data(event: fbuffer.event); |
201 | entry->ip = entry_ip; |
202 | store_trace_args(data: &entry[1], tp: &tf->tp, rec: fregs, NULL, header_size: sizeof(*entry), maxlen: dsize); |
203 | |
204 | trace_event_buffer_commit(fbuffer: &fbuffer); |
205 | } |
206 | |
207 | static void |
208 | fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip, |
209 | struct ftrace_regs *fregs) |
210 | { |
211 | struct event_file_link *link; |
212 | |
213 | trace_probe_for_each_link_rcu(link, &tf->tp) |
214 | __fentry_trace_func(tf, entry_ip, fregs, trace_file: link->file); |
215 | } |
216 | NOKPROBE_SYMBOL(fentry_trace_func); |
217 | |
218 | static nokprobe_inline |
219 | void store_fprobe_entry_data(void *edata, struct trace_probe *tp, struct ftrace_regs *fregs) |
220 | { |
221 | struct probe_entry_arg *earg = tp->entry_arg; |
222 | unsigned long val = 0; |
223 | int i; |
224 | |
225 | if (!earg) |
226 | return; |
227 | |
228 | for (i = 0; i < earg->size; i++) { |
229 | struct fetch_insn *code = &earg->code[i]; |
230 | |
231 | switch (code->op) { |
232 | case FETCH_OP_ARG: |
233 | val = ftrace_regs_get_argument(fregs, code->param); |
234 | break; |
235 | case FETCH_OP_ST_EDATA: |
236 | *(unsigned long *)((unsigned long)edata + code->offset) = val; |
237 | break; |
238 | case FETCH_OP_END: |
239 | goto end; |
240 | default: |
241 | break; |
242 | } |
243 | } |
244 | end: |
245 | return; |
246 | } |
247 | |
248 | /* function exit handler */ |
249 | static int trace_fprobe_entry_handler(struct fprobe *fp, unsigned long entry_ip, |
250 | unsigned long ret_ip, struct ftrace_regs *fregs, |
251 | void *entry_data) |
252 | { |
253 | struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp); |
254 | |
255 | if (tf->tp.entry_arg) |
256 | store_fprobe_entry_data(edata: entry_data, tp: &tf->tp, fregs); |
257 | |
258 | return 0; |
259 | } |
260 | NOKPROBE_SYMBOL(trace_fprobe_entry_handler) |
261 | |
262 | static nokprobe_inline void |
263 | __fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip, |
264 | unsigned long ret_ip, struct ftrace_regs *fregs, |
265 | void *entry_data, struct trace_event_file *trace_file) |
266 | { |
267 | struct fexit_trace_entry_head *entry; |
268 | struct trace_event_buffer fbuffer; |
269 | struct trace_event_call *call = trace_probe_event_call(tp: &tf->tp); |
270 | int dsize; |
271 | |
272 | if (WARN_ON_ONCE(call != trace_file->event_call)) |
273 | return; |
274 | |
275 | if (trace_trigger_soft_disabled(file: trace_file)) |
276 | return; |
277 | |
278 | dsize = __get_data_size(tp: &tf->tp, regs: fregs, edata: entry_data); |
279 | |
280 | entry = trace_event_buffer_reserve(fbuffer: &fbuffer, trace_file, |
281 | len: sizeof(*entry) + tf->tp.size + dsize); |
282 | if (!entry) |
283 | return; |
284 | |
285 | fbuffer.regs = ftrace_get_regs(fregs); |
286 | entry = fbuffer.entry = ring_buffer_event_data(event: fbuffer.event); |
287 | entry->func = entry_ip; |
288 | entry->ret_ip = ret_ip; |
289 | store_trace_args(data: &entry[1], tp: &tf->tp, rec: fregs, edata: entry_data, header_size: sizeof(*entry), maxlen: dsize); |
290 | |
291 | trace_event_buffer_commit(fbuffer: &fbuffer); |
292 | } |
293 | |
294 | static void |
295 | fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip, |
296 | unsigned long ret_ip, struct ftrace_regs *fregs, void *entry_data) |
297 | { |
298 | struct event_file_link *link; |
299 | |
300 | trace_probe_for_each_link_rcu(link, &tf->tp) |
301 | __fexit_trace_func(tf, entry_ip, ret_ip, fregs, entry_data, trace_file: link->file); |
302 | } |
303 | NOKPROBE_SYMBOL(fexit_trace_func); |
304 | |
305 | #ifdef CONFIG_PERF_EVENTS |
306 | |
307 | static int fentry_perf_func(struct trace_fprobe *tf, unsigned long entry_ip, |
308 | struct ftrace_regs *fregs) |
309 | { |
310 | struct trace_event_call *call = trace_probe_event_call(tp: &tf->tp); |
311 | struct fentry_trace_entry_head *entry; |
312 | struct hlist_head *head; |
313 | int size, __size, dsize; |
314 | struct pt_regs *regs; |
315 | int rctx; |
316 | |
317 | head = this_cpu_ptr(call->perf_events); |
318 | if (hlist_empty(h: head)) |
319 | return 0; |
320 | |
321 | dsize = __get_data_size(tp: &tf->tp, regs: fregs, NULL); |
322 | __size = sizeof(*entry) + tf->tp.size + dsize; |
323 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
324 | size -= sizeof(u32); |
325 | |
326 | entry = perf_trace_buf_alloc(size, regs: ®s, rctxp: &rctx); |
327 | if (!entry) |
328 | return 0; |
329 | |
330 | regs = ftrace_fill_perf_regs(fregs, regs); |
331 | |
332 | entry->ip = entry_ip; |
333 | memset(&entry[1], 0, dsize); |
334 | store_trace_args(data: &entry[1], tp: &tf->tp, rec: fregs, NULL, header_size: sizeof(*entry), maxlen: dsize); |
335 | perf_trace_buf_submit(raw_data: entry, size, rctx, type: call->event.type, count: 1, regs, |
336 | head, NULL); |
337 | return 0; |
338 | } |
339 | NOKPROBE_SYMBOL(fentry_perf_func); |
340 | |
341 | static void |
342 | fexit_perf_func(struct trace_fprobe *tf, unsigned long entry_ip, |
343 | unsigned long ret_ip, struct ftrace_regs *fregs, |
344 | void *entry_data) |
345 | { |
346 | struct trace_event_call *call = trace_probe_event_call(tp: &tf->tp); |
347 | struct fexit_trace_entry_head *entry; |
348 | struct hlist_head *head; |
349 | int size, __size, dsize; |
350 | struct pt_regs *regs; |
351 | int rctx; |
352 | |
353 | head = this_cpu_ptr(call->perf_events); |
354 | if (hlist_empty(h: head)) |
355 | return; |
356 | |
357 | dsize = __get_data_size(tp: &tf->tp, regs: fregs, edata: entry_data); |
358 | __size = sizeof(*entry) + tf->tp.size + dsize; |
359 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
360 | size -= sizeof(u32); |
361 | |
362 | entry = perf_trace_buf_alloc(size, regs: ®s, rctxp: &rctx); |
363 | if (!entry) |
364 | return; |
365 | |
366 | regs = ftrace_fill_perf_regs(fregs, regs); |
367 | |
368 | entry->func = entry_ip; |
369 | entry->ret_ip = ret_ip; |
370 | store_trace_args(data: &entry[1], tp: &tf->tp, rec: fregs, edata: entry_data, header_size: sizeof(*entry), maxlen: dsize); |
371 | perf_trace_buf_submit(raw_data: entry, size, rctx, type: call->event.type, count: 1, regs, |
372 | head, NULL); |
373 | } |
374 | NOKPROBE_SYMBOL(fexit_perf_func); |
375 | #endif /* CONFIG_PERF_EVENTS */ |
376 | |
377 | static int fentry_dispatcher(struct fprobe *fp, unsigned long entry_ip, |
378 | unsigned long ret_ip, struct ftrace_regs *fregs, |
379 | void *entry_data) |
380 | { |
381 | struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp); |
382 | int ret = 0; |
383 | |
384 | if (trace_probe_test_flag(tp: &tf->tp, TP_FLAG_TRACE)) |
385 | fentry_trace_func(tf, entry_ip, fregs); |
386 | |
387 | #ifdef CONFIG_PERF_EVENTS |
388 | if (trace_probe_test_flag(tp: &tf->tp, TP_FLAG_PROFILE)) |
389 | ret = fentry_perf_func(tf, entry_ip, fregs); |
390 | #endif |
391 | return ret; |
392 | } |
393 | NOKPROBE_SYMBOL(fentry_dispatcher); |
394 | |
395 | static void fexit_dispatcher(struct fprobe *fp, unsigned long entry_ip, |
396 | unsigned long ret_ip, struct ftrace_regs *fregs, |
397 | void *entry_data) |
398 | { |
399 | struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp); |
400 | |
401 | if (trace_probe_test_flag(tp: &tf->tp, TP_FLAG_TRACE)) |
402 | fexit_trace_func(tf, entry_ip, ret_ip, fregs, entry_data); |
403 | #ifdef CONFIG_PERF_EVENTS |
404 | if (trace_probe_test_flag(tp: &tf->tp, TP_FLAG_PROFILE)) |
405 | fexit_perf_func(tf, entry_ip, ret_ip, fregs, entry_data); |
406 | #endif |
407 | } |
408 | NOKPROBE_SYMBOL(fexit_dispatcher); |
409 | |
410 | static void free_trace_fprobe(struct trace_fprobe *tf) |
411 | { |
412 | if (tf) { |
413 | trace_probe_cleanup(tp: &tf->tp); |
414 | kfree(objp: tf->symbol); |
415 | kfree(objp: tf); |
416 | } |
417 | } |
418 | |
419 | /* Since alloc_trace_fprobe() can return error, check the pointer is ERR too. */ |
420 | DEFINE_FREE(free_trace_fprobe, struct trace_fprobe *, if (!IS_ERR_OR_NULL(_T)) free_trace_fprobe(_T)) |
421 | |
422 | /* |
423 | * Allocate new trace_probe and initialize it (including fprobe). |
424 | */ |
425 | static struct trace_fprobe *alloc_trace_fprobe(const char *group, |
426 | const char *event, |
427 | const char *symbol, |
428 | struct tracepoint *tpoint, |
429 | struct module *mod, |
430 | int nargs, bool is_return) |
431 | { |
432 | struct trace_fprobe *tf __free(free_trace_fprobe) = NULL; |
433 | int ret = -ENOMEM; |
434 | |
435 | tf = kzalloc(struct_size(tf, tp.args, nargs), GFP_KERNEL); |
436 | if (!tf) |
437 | return ERR_PTR(error: ret); |
438 | |
439 | tf->symbol = kstrdup(s: symbol, GFP_KERNEL); |
440 | if (!tf->symbol) |
441 | return ERR_PTR(error: -ENOMEM); |
442 | |
443 | if (is_return) |
444 | tf->fp.exit_handler = fexit_dispatcher; |
445 | else |
446 | tf->fp.entry_handler = fentry_dispatcher; |
447 | |
448 | tf->tpoint = tpoint; |
449 | tf->mod = mod; |
450 | |
451 | ret = trace_probe_init(tp: &tf->tp, event, group, alloc_filter: false, nargs); |
452 | if (ret < 0) |
453 | return ERR_PTR(error: ret); |
454 | |
455 | dyn_event_init(ev: &tf->devent, ops: &trace_fprobe_ops); |
456 | return_ptr(tf); |
457 | } |
458 | |
459 | static struct trace_fprobe *find_trace_fprobe(const char *event, |
460 | const char *group) |
461 | { |
462 | struct dyn_event *pos; |
463 | struct trace_fprobe *tf; |
464 | |
465 | for_each_trace_fprobe(tf, pos) |
466 | if (strcmp(trace_probe_name(tp: &tf->tp), event) == 0 && |
467 | strcmp(trace_probe_group_name(tp: &tf->tp), group) == 0) |
468 | return tf; |
469 | return NULL; |
470 | } |
471 | |
472 | static inline int __enable_trace_fprobe(struct trace_fprobe *tf) |
473 | { |
474 | if (trace_fprobe_is_registered(tf)) |
475 | enable_fprobe(fp: &tf->fp); |
476 | |
477 | return 0; |
478 | } |
479 | |
480 | static void __disable_trace_fprobe(struct trace_probe *tp) |
481 | { |
482 | struct trace_fprobe *tf; |
483 | |
484 | list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) { |
485 | if (!trace_fprobe_is_registered(tf)) |
486 | continue; |
487 | disable_fprobe(fp: &tf->fp); |
488 | } |
489 | } |
490 | |
491 | /* |
492 | * Enable trace_probe |
493 | * if the file is NULL, enable "perf" handler, or enable "trace" handler. |
494 | */ |
495 | static int enable_trace_fprobe(struct trace_event_call *call, |
496 | struct trace_event_file *file) |
497 | { |
498 | struct trace_probe *tp; |
499 | struct trace_fprobe *tf; |
500 | bool enabled; |
501 | int ret = 0; |
502 | |
503 | tp = trace_probe_primary_from_call(call); |
504 | if (WARN_ON_ONCE(!tp)) |
505 | return -ENODEV; |
506 | enabled = trace_probe_is_enabled(tp); |
507 | |
508 | /* This also changes "enabled" state */ |
509 | if (file) { |
510 | ret = trace_probe_add_file(tp, file); |
511 | if (ret) |
512 | return ret; |
513 | } else |
514 | trace_probe_set_flag(tp, TP_FLAG_PROFILE); |
515 | |
516 | if (!enabled) { |
517 | list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) { |
518 | /* TODO: check the fprobe is gone */ |
519 | __enable_trace_fprobe(tf); |
520 | } |
521 | } |
522 | |
523 | return 0; |
524 | } |
525 | |
526 | /* |
527 | * Disable trace_probe |
528 | * if the file is NULL, disable "perf" handler, or disable "trace" handler. |
529 | */ |
530 | static int disable_trace_fprobe(struct trace_event_call *call, |
531 | struct trace_event_file *file) |
532 | { |
533 | struct trace_probe *tp; |
534 | |
535 | tp = trace_probe_primary_from_call(call); |
536 | if (WARN_ON_ONCE(!tp)) |
537 | return -ENODEV; |
538 | |
539 | if (file) { |
540 | if (!trace_probe_get_file_link(tp, file)) |
541 | return -ENOENT; |
542 | if (!trace_probe_has_single_file(tp)) |
543 | goto out; |
544 | trace_probe_clear_flag(tp, TP_FLAG_TRACE); |
545 | } else |
546 | trace_probe_clear_flag(tp, TP_FLAG_PROFILE); |
547 | |
548 | if (!trace_probe_is_enabled(tp)) |
549 | __disable_trace_fprobe(tp); |
550 | |
551 | out: |
552 | if (file) |
553 | /* |
554 | * Synchronization is done in below function. For perf event, |
555 | * file == NULL and perf_trace_event_unreg() calls |
556 | * tracepoint_synchronize_unregister() to ensure synchronize |
557 | * event. We don't need to care about it. |
558 | */ |
559 | trace_probe_remove_file(tp, file); |
560 | |
561 | return 0; |
562 | } |
563 | |
564 | /* Event entry printers */ |
565 | static enum print_line_t |
566 | print_fentry_event(struct trace_iterator *iter, int flags, |
567 | struct trace_event *event) |
568 | { |
569 | struct fentry_trace_entry_head *field; |
570 | struct trace_seq *s = &iter->seq; |
571 | struct trace_probe *tp; |
572 | |
573 | field = (struct fentry_trace_entry_head *)iter->ent; |
574 | tp = trace_probe_primary_from_call( |
575 | container_of(event, struct trace_event_call, event)); |
576 | if (WARN_ON_ONCE(!tp)) |
577 | goto out; |
578 | |
579 | trace_seq_printf(s, fmt: "%s: (" , trace_probe_name(tp)); |
580 | |
581 | if (!seq_print_ip_sym(s, ip: field->ip, sym_flags: flags | TRACE_ITER_SYM_OFFSET)) |
582 | goto out; |
583 | |
584 | trace_seq_putc(s, c: ')'); |
585 | |
586 | if (trace_probe_print_args(s, args: tp->args, nr_args: tp->nr_args, |
587 | data: (u8 *)&field[1], field) < 0) |
588 | goto out; |
589 | |
590 | trace_seq_putc(s, c: '\n'); |
591 | out: |
592 | return trace_handle_return(s); |
593 | } |
594 | |
595 | static enum print_line_t |
596 | print_fexit_event(struct trace_iterator *iter, int flags, |
597 | struct trace_event *event) |
598 | { |
599 | struct fexit_trace_entry_head *field; |
600 | struct trace_seq *s = &iter->seq; |
601 | struct trace_probe *tp; |
602 | |
603 | field = (struct fexit_trace_entry_head *)iter->ent; |
604 | tp = trace_probe_primary_from_call( |
605 | container_of(event, struct trace_event_call, event)); |
606 | if (WARN_ON_ONCE(!tp)) |
607 | goto out; |
608 | |
609 | trace_seq_printf(s, fmt: "%s: (" , trace_probe_name(tp)); |
610 | |
611 | if (!seq_print_ip_sym(s, ip: field->ret_ip, sym_flags: flags | TRACE_ITER_SYM_OFFSET)) |
612 | goto out; |
613 | |
614 | trace_seq_puts(s, str: " <- " ); |
615 | |
616 | if (!seq_print_ip_sym(s, ip: field->func, sym_flags: flags & ~TRACE_ITER_SYM_OFFSET)) |
617 | goto out; |
618 | |
619 | trace_seq_putc(s, c: ')'); |
620 | |
621 | if (trace_probe_print_args(s, args: tp->args, nr_args: tp->nr_args, |
622 | data: (u8 *)&field[1], field) < 0) |
623 | goto out; |
624 | |
625 | trace_seq_putc(s, c: '\n'); |
626 | |
627 | out: |
628 | return trace_handle_return(s); |
629 | } |
630 | |
631 | static int fentry_event_define_fields(struct trace_event_call *event_call) |
632 | { |
633 | int ret; |
634 | struct fentry_trace_entry_head field; |
635 | struct trace_probe *tp; |
636 | |
637 | tp = trace_probe_primary_from_call(call: event_call); |
638 | if (WARN_ON_ONCE(!tp)) |
639 | return -ENOENT; |
640 | |
641 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); |
642 | |
643 | return traceprobe_define_arg_fields(event_call, offset: sizeof(field), tp); |
644 | } |
645 | |
646 | static int fexit_event_define_fields(struct trace_event_call *event_call) |
647 | { |
648 | int ret; |
649 | struct fexit_trace_entry_head field; |
650 | struct trace_probe *tp; |
651 | |
652 | tp = trace_probe_primary_from_call(call: event_call); |
653 | if (WARN_ON_ONCE(!tp)) |
654 | return -ENOENT; |
655 | |
656 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); |
657 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); |
658 | |
659 | return traceprobe_define_arg_fields(event_call, offset: sizeof(field), tp); |
660 | } |
661 | |
662 | static struct trace_event_functions fentry_funcs = { |
663 | .trace = print_fentry_event |
664 | }; |
665 | |
666 | static struct trace_event_functions fexit_funcs = { |
667 | .trace = print_fexit_event |
668 | }; |
669 | |
670 | static struct trace_event_fields fentry_fields_array[] = { |
671 | { .type = TRACE_FUNCTION_TYPE, |
672 | .define_fields = fentry_event_define_fields }, |
673 | {} |
674 | }; |
675 | |
676 | static struct trace_event_fields fexit_fields_array[] = { |
677 | { .type = TRACE_FUNCTION_TYPE, |
678 | .define_fields = fexit_event_define_fields }, |
679 | {} |
680 | }; |
681 | |
682 | static int fprobe_register(struct trace_event_call *event, |
683 | enum trace_reg type, void *data); |
684 | |
685 | static inline void init_trace_event_call(struct trace_fprobe *tf) |
686 | { |
687 | struct trace_event_call *call = trace_probe_event_call(tp: &tf->tp); |
688 | |
689 | if (trace_fprobe_is_return(tf)) { |
690 | call->event.funcs = &fexit_funcs; |
691 | call->class->fields_array = fexit_fields_array; |
692 | } else { |
693 | call->event.funcs = &fentry_funcs; |
694 | call->class->fields_array = fentry_fields_array; |
695 | } |
696 | |
697 | call->flags = TRACE_EVENT_FL_FPROBE; |
698 | call->class->reg = fprobe_register; |
699 | } |
700 | |
701 | static int register_fprobe_event(struct trace_fprobe *tf) |
702 | { |
703 | init_trace_event_call(tf); |
704 | |
705 | return trace_probe_register_event_call(tp: &tf->tp); |
706 | } |
707 | |
708 | static int unregister_fprobe_event(struct trace_fprobe *tf) |
709 | { |
710 | return trace_probe_unregister_event_call(tp: &tf->tp); |
711 | } |
712 | |
713 | static int __regsiter_tracepoint_fprobe(struct trace_fprobe *tf) |
714 | { |
715 | struct tracepoint *tpoint = tf->tpoint; |
716 | unsigned long ip = (unsigned long)tpoint->probestub; |
717 | int ret; |
718 | |
719 | /* |
720 | * Here, we do 2 steps to enable fprobe on a tracepoint. |
721 | * At first, put __probestub_##TP function on the tracepoint |
722 | * and put a fprobe on the stub function. |
723 | */ |
724 | ret = tracepoint_probe_register_prio_may_exist(tp: tpoint, |
725 | probe: tpoint->probestub, NULL, prio: 0); |
726 | if (ret < 0) |
727 | return ret; |
728 | return register_fprobe_ips(fp: &tf->fp, addrs: &ip, num: 1); |
729 | } |
730 | |
731 | /* Internal register function - just handle fprobe and flags */ |
732 | static int __register_trace_fprobe(struct trace_fprobe *tf) |
733 | { |
734 | int i, ret; |
735 | |
736 | /* Should we need new LOCKDOWN flag for fprobe? */ |
737 | ret = security_locked_down(what: LOCKDOWN_KPROBES); |
738 | if (ret) |
739 | return ret; |
740 | |
741 | if (trace_fprobe_is_registered(tf)) |
742 | return -EINVAL; |
743 | |
744 | for (i = 0; i < tf->tp.nr_args; i++) { |
745 | ret = traceprobe_update_arg(arg: &tf->tp.args[i]); |
746 | if (ret) |
747 | return ret; |
748 | } |
749 | |
750 | /* Set/clear disabled flag according to tp->flag */ |
751 | if (trace_probe_is_enabled(tp: &tf->tp)) |
752 | tf->fp.flags &= ~FPROBE_FL_DISABLED; |
753 | else |
754 | tf->fp.flags |= FPROBE_FL_DISABLED; |
755 | |
756 | if (trace_fprobe_is_tracepoint(tf)) { |
757 | |
758 | /* This tracepoint is not loaded yet */ |
759 | if (tf->tpoint == TRACEPOINT_STUB) |
760 | return 0; |
761 | |
762 | return __regsiter_tracepoint_fprobe(tf); |
763 | } |
764 | |
765 | /* TODO: handle filter, nofilter or symbol list */ |
766 | return register_fprobe(fp: &tf->fp, filter: tf->symbol, NULL); |
767 | } |
768 | |
769 | /* Internal unregister function - just handle fprobe and flags */ |
770 | static void __unregister_trace_fprobe(struct trace_fprobe *tf) |
771 | { |
772 | if (trace_fprobe_is_registered(tf)) { |
773 | unregister_fprobe(fp: &tf->fp); |
774 | memset(&tf->fp, 0, sizeof(tf->fp)); |
775 | if (trace_fprobe_is_tracepoint(tf)) { |
776 | tracepoint_probe_unregister(tp: tf->tpoint, |
777 | probe: tf->tpoint->probestub, NULL); |
778 | tf->tpoint = NULL; |
779 | tf->mod = NULL; |
780 | } |
781 | } |
782 | } |
783 | |
784 | /* TODO: make this trace_*probe common function */ |
785 | /* Unregister a trace_probe and probe_event */ |
786 | static int unregister_trace_fprobe(struct trace_fprobe *tf) |
787 | { |
788 | /* If other probes are on the event, just unregister fprobe */ |
789 | if (trace_probe_has_sibling(tp: &tf->tp)) |
790 | goto unreg; |
791 | |
792 | /* Enabled event can not be unregistered */ |
793 | if (trace_probe_is_enabled(tp: &tf->tp)) |
794 | return -EBUSY; |
795 | |
796 | /* If there's a reference to the dynamic event */ |
797 | if (trace_event_dyn_busy(call: trace_probe_event_call(tp: &tf->tp))) |
798 | return -EBUSY; |
799 | |
800 | /* Will fail if probe is being used by ftrace or perf */ |
801 | if (unregister_fprobe_event(tf)) |
802 | return -EBUSY; |
803 | |
804 | unreg: |
805 | __unregister_trace_fprobe(tf); |
806 | dyn_event_remove(ev: &tf->devent); |
807 | trace_probe_unlink(tp: &tf->tp); |
808 | |
809 | return 0; |
810 | } |
811 | |
812 | static bool trace_fprobe_has_same_fprobe(struct trace_fprobe *orig, |
813 | struct trace_fprobe *comp) |
814 | { |
815 | struct trace_probe_event *tpe = orig->tp.event; |
816 | int i; |
817 | |
818 | list_for_each_entry(orig, &tpe->probes, tp.list) { |
819 | if (strcmp(trace_fprobe_symbol(tf: orig), |
820 | trace_fprobe_symbol(tf: comp))) |
821 | continue; |
822 | |
823 | /* |
824 | * trace_probe_compare_arg_type() ensured that nr_args and |
825 | * each argument name and type are same. Let's compare comm. |
826 | */ |
827 | for (i = 0; i < orig->tp.nr_args; i++) { |
828 | if (strcmp(orig->tp.args[i].comm, |
829 | comp->tp.args[i].comm)) |
830 | break; |
831 | } |
832 | |
833 | if (i == orig->tp.nr_args) |
834 | return true; |
835 | } |
836 | |
837 | return false; |
838 | } |
839 | |
840 | static int append_trace_fprobe(struct trace_fprobe *tf, struct trace_fprobe *to) |
841 | { |
842 | int ret; |
843 | |
844 | if (trace_fprobe_is_return(tf) != trace_fprobe_is_return(tf: to) || |
845 | trace_fprobe_is_tracepoint(tf) != trace_fprobe_is_tracepoint(tf: to)) { |
846 | trace_probe_log_set_index(index: 0); |
847 | trace_probe_log_err(0, DIFF_PROBE_TYPE); |
848 | return -EEXIST; |
849 | } |
850 | ret = trace_probe_compare_arg_type(a: &tf->tp, b: &to->tp); |
851 | if (ret) { |
852 | /* Note that argument starts index = 2 */ |
853 | trace_probe_log_set_index(index: ret + 1); |
854 | trace_probe_log_err(0, DIFF_ARG_TYPE); |
855 | return -EEXIST; |
856 | } |
857 | if (trace_fprobe_has_same_fprobe(orig: to, comp: tf)) { |
858 | trace_probe_log_set_index(index: 0); |
859 | trace_probe_log_err(0, SAME_PROBE); |
860 | return -EEXIST; |
861 | } |
862 | |
863 | /* Append to existing event */ |
864 | ret = trace_probe_append(tp: &tf->tp, to: &to->tp); |
865 | if (ret) |
866 | return ret; |
867 | |
868 | ret = __register_trace_fprobe(tf); |
869 | if (ret) |
870 | trace_probe_unlink(tp: &tf->tp); |
871 | else |
872 | dyn_event_add(ev: &tf->devent, call: trace_probe_event_call(tp: &tf->tp)); |
873 | |
874 | return ret; |
875 | } |
876 | |
877 | /* Register a trace_probe and probe_event */ |
878 | static int register_trace_fprobe(struct trace_fprobe *tf) |
879 | { |
880 | struct trace_fprobe *old_tf; |
881 | int ret; |
882 | |
883 | guard(mutex)(T: &event_mutex); |
884 | |
885 | old_tf = find_trace_fprobe(event: trace_probe_name(tp: &tf->tp), |
886 | group: trace_probe_group_name(tp: &tf->tp)); |
887 | if (old_tf) |
888 | return append_trace_fprobe(tf, to: old_tf); |
889 | |
890 | /* Register new event */ |
891 | ret = register_fprobe_event(tf); |
892 | if (ret) { |
893 | if (ret == -EEXIST) { |
894 | trace_probe_log_set_index(index: 0); |
895 | trace_probe_log_err(0, EVENT_EXIST); |
896 | } else |
897 | pr_warn("Failed to register probe event(%d)\n" , ret); |
898 | return ret; |
899 | } |
900 | |
901 | /* Register fprobe */ |
902 | ret = __register_trace_fprobe(tf); |
903 | if (ret < 0) |
904 | unregister_fprobe_event(tf); |
905 | else |
906 | dyn_event_add(ev: &tf->devent, call: trace_probe_event_call(tp: &tf->tp)); |
907 | |
908 | return ret; |
909 | } |
910 | |
911 | struct __find_tracepoint_cb_data { |
912 | const char *tp_name; |
913 | struct tracepoint *tpoint; |
914 | struct module *mod; |
915 | }; |
916 | |
917 | static void __find_tracepoint_module_cb(struct tracepoint *tp, struct module *mod, void *priv) |
918 | { |
919 | struct __find_tracepoint_cb_data *data = priv; |
920 | |
921 | if (!data->tpoint && !strcmp(data->tp_name, tp->name)) { |
922 | /* If module is not specified, try getting module refcount. */ |
923 | if (!data->mod && mod) { |
924 | /* If failed to get refcount, ignore this tracepoint. */ |
925 | if (!try_module_get(module: mod)) |
926 | return; |
927 | |
928 | data->mod = mod; |
929 | } |
930 | data->tpoint = tp; |
931 | } |
932 | } |
933 | |
934 | static void __find_tracepoint_cb(struct tracepoint *tp, void *priv) |
935 | { |
936 | struct __find_tracepoint_cb_data *data = priv; |
937 | |
938 | if (!data->tpoint && !strcmp(data->tp_name, tp->name)) |
939 | data->tpoint = tp; |
940 | } |
941 | |
942 | /* |
943 | * Find a tracepoint from kernel and module. If the tracepoint is on the module, |
944 | * the module's refcount is incremented and returned as *@tp_mod. Thus, if it is |
945 | * not NULL, caller must call module_put(*tp_mod) after used the tracepoint. |
946 | */ |
947 | static struct tracepoint *find_tracepoint(const char *tp_name, |
948 | struct module **tp_mod) |
949 | { |
950 | struct __find_tracepoint_cb_data data = { |
951 | .tp_name = tp_name, |
952 | .mod = NULL, |
953 | }; |
954 | |
955 | for_each_kernel_tracepoint(fct: __find_tracepoint_cb, priv: &data); |
956 | |
957 | if (!data.tpoint && IS_ENABLED(CONFIG_MODULES)) { |
958 | for_each_module_tracepoint(fct: __find_tracepoint_module_cb, priv: &data); |
959 | *tp_mod = data.mod; |
960 | } |
961 | |
962 | return data.tpoint; |
963 | } |
964 | |
965 | #ifdef CONFIG_MODULES |
966 | static void reenable_trace_fprobe(struct trace_fprobe *tf) |
967 | { |
968 | struct trace_probe *tp = &tf->tp; |
969 | |
970 | list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) { |
971 | __enable_trace_fprobe(tf); |
972 | } |
973 | } |
974 | |
975 | /* |
976 | * Find a tracepoint from specified module. In this case, this does not get the |
977 | * module's refcount. The caller must ensure the module is not freed. |
978 | */ |
979 | static struct tracepoint *find_tracepoint_in_module(struct module *mod, |
980 | const char *tp_name) |
981 | { |
982 | struct __find_tracepoint_cb_data data = { |
983 | .tp_name = tp_name, |
984 | .mod = mod, |
985 | }; |
986 | |
987 | for_each_tracepoint_in_module(mod, fct: __find_tracepoint_module_cb, priv: &data); |
988 | return data.tpoint; |
989 | } |
990 | |
991 | static int __tracepoint_probe_module_cb(struct notifier_block *self, |
992 | unsigned long val, void *data) |
993 | { |
994 | struct tp_module *tp_mod = data; |
995 | struct tracepoint *tpoint; |
996 | struct trace_fprobe *tf; |
997 | struct dyn_event *pos; |
998 | |
999 | if (val != MODULE_STATE_GOING && val != MODULE_STATE_COMING) |
1000 | return NOTIFY_DONE; |
1001 | |
1002 | mutex_lock(&event_mutex); |
1003 | for_each_trace_fprobe(tf, pos) { |
1004 | if (val == MODULE_STATE_COMING && tf->tpoint == TRACEPOINT_STUB) { |
1005 | tpoint = find_tracepoint_in_module(mod: tp_mod->mod, tp_name: tf->symbol); |
1006 | if (tpoint) { |
1007 | tf->tpoint = tpoint; |
1008 | tf->mod = tp_mod->mod; |
1009 | if (!WARN_ON_ONCE(__regsiter_tracepoint_fprobe(tf)) && |
1010 | trace_probe_is_enabled(tp: &tf->tp)) |
1011 | reenable_trace_fprobe(tf); |
1012 | } |
1013 | } else if (val == MODULE_STATE_GOING && tp_mod->mod == tf->mod) { |
1014 | unregister_fprobe(fp: &tf->fp); |
1015 | if (trace_fprobe_is_tracepoint(tf)) { |
1016 | tracepoint_probe_unregister(tp: tf->tpoint, |
1017 | probe: tf->tpoint->probestub, NULL); |
1018 | tf->tpoint = TRACEPOINT_STUB; |
1019 | tf->mod = NULL; |
1020 | } |
1021 | } |
1022 | } |
1023 | mutex_unlock(lock: &event_mutex); |
1024 | |
1025 | return NOTIFY_DONE; |
1026 | } |
1027 | |
1028 | static struct notifier_block tracepoint_module_nb = { |
1029 | .notifier_call = __tracepoint_probe_module_cb, |
1030 | }; |
1031 | #endif /* CONFIG_MODULES */ |
1032 | |
1033 | static int parse_symbol_and_return(int argc, const char *argv[], |
1034 | char **symbol, bool *is_return, |
1035 | bool is_tracepoint) |
1036 | { |
1037 | char *tmp = strchr(argv[1], '%'); |
1038 | int i; |
1039 | |
1040 | if (tmp) { |
1041 | int len = tmp - argv[1]; |
1042 | |
1043 | if (!is_tracepoint && !strcmp(tmp, "%return" )) { |
1044 | *is_return = true; |
1045 | } else { |
1046 | trace_probe_log_err(len, BAD_ADDR_SUFFIX); |
1047 | return -EINVAL; |
1048 | } |
1049 | *symbol = kmemdup_nul(s: argv[1], len, GFP_KERNEL); |
1050 | } else |
1051 | *symbol = kstrdup(s: argv[1], GFP_KERNEL); |
1052 | if (!*symbol) |
1053 | return -ENOMEM; |
1054 | |
1055 | if (*is_return) |
1056 | return 0; |
1057 | |
1058 | if (is_tracepoint) { |
1059 | tmp = *symbol; |
1060 | while (*tmp && (isalnum(*tmp) || *tmp == '_')) |
1061 | tmp++; |
1062 | if (*tmp) { |
1063 | /* find a wrong character. */ |
1064 | trace_probe_log_err(tmp - *symbol, BAD_TP_NAME); |
1065 | kfree(objp: *symbol); |
1066 | *symbol = NULL; |
1067 | return -EINVAL; |
1068 | } |
1069 | } |
1070 | |
1071 | /* If there is $retval, this should be a return fprobe. */ |
1072 | for (i = 2; i < argc; i++) { |
1073 | tmp = strstr(argv[i], "$retval" ); |
1074 | if (tmp && !isalnum(tmp[7]) && tmp[7] != '_') { |
1075 | if (is_tracepoint) { |
1076 | trace_probe_log_set_index(index: i); |
1077 | trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE); |
1078 | kfree(objp: *symbol); |
1079 | *symbol = NULL; |
1080 | return -EINVAL; |
1081 | } |
1082 | *is_return = true; |
1083 | break; |
1084 | } |
1085 | } |
1086 | return 0; |
1087 | } |
1088 | |
1089 | DEFINE_FREE(module_put, struct module *, if (_T) module_put(_T)) |
1090 | |
1091 | static int trace_fprobe_create_internal(int argc, const char *argv[], |
1092 | struct traceprobe_parse_context *ctx) |
1093 | { |
1094 | /* |
1095 | * Argument syntax: |
1096 | * - Add fentry probe: |
1097 | * f[:[GRP/][EVENT]] [MOD:]KSYM [FETCHARGS] |
1098 | * - Add fexit probe: |
1099 | * f[N][:[GRP/][EVENT]] [MOD:]KSYM%return [FETCHARGS] |
1100 | * - Add tracepoint probe: |
1101 | * t[:[GRP/][EVENT]] TRACEPOINT [FETCHARGS] |
1102 | * |
1103 | * Fetch args: |
1104 | * $retval : fetch return value |
1105 | * $stack : fetch stack address |
1106 | * $stackN : fetch Nth entry of stack (N:0-) |
1107 | * $argN : fetch Nth argument (N:1-) |
1108 | * $comm : fetch current task comm |
1109 | * @ADDR : fetch memory at ADDR (ADDR should be in kernel) |
1110 | * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) |
1111 | * Dereferencing memory fetch: |
1112 | * +|-offs(ARG) : fetch memory at ARG +|- offs address. |
1113 | * Alias name of args: |
1114 | * NAME=FETCHARG : set NAME as alias of FETCHARG. |
1115 | * Type of args: |
1116 | * FETCHARG:TYPE : use TYPE instead of unsigned long. |
1117 | */ |
1118 | struct trace_fprobe *tf __free(free_trace_fprobe) = NULL; |
1119 | int i, new_argc = 0, ret = 0; |
1120 | bool is_return = false; |
1121 | char *symbol __free(kfree) = NULL; |
1122 | const char *event = NULL, *group = FPROBE_EVENT_SYSTEM; |
1123 | const char **new_argv __free(kfree) = NULL; |
1124 | char buf[MAX_EVENT_NAME_LEN]; |
1125 | char gbuf[MAX_EVENT_NAME_LEN]; |
1126 | char sbuf[KSYM_NAME_LEN]; |
1127 | char abuf[MAX_BTF_ARGS_LEN]; |
1128 | char *dbuf __free(kfree) = NULL; |
1129 | bool is_tracepoint = false; |
1130 | struct module *tp_mod __free(module_put) = NULL; |
1131 | struct tracepoint *tpoint = NULL; |
1132 | |
1133 | if ((argv[0][0] != 'f' && argv[0][0] != 't') || argc < 2) |
1134 | return -ECANCELED; |
1135 | |
1136 | if (argv[0][0] == 't') { |
1137 | is_tracepoint = true; |
1138 | group = TRACEPOINT_EVENT_SYSTEM; |
1139 | } |
1140 | |
1141 | if (argv[0][1] != '\0') { |
1142 | if (argv[0][1] != ':') { |
1143 | trace_probe_log_set_index(index: 0); |
1144 | trace_probe_log_err(1, BAD_MAXACT); |
1145 | return -EINVAL; |
1146 | } |
1147 | event = &argv[0][2]; |
1148 | } |
1149 | |
1150 | trace_probe_log_set_index(index: 1); |
1151 | |
1152 | /* a symbol(or tracepoint) must be specified */ |
1153 | ret = parse_symbol_and_return(argc, argv, symbol: &symbol, is_return: &is_return, is_tracepoint); |
1154 | if (ret < 0) |
1155 | return -EINVAL; |
1156 | |
1157 | trace_probe_log_set_index(index: 0); |
1158 | if (event) { |
1159 | ret = traceprobe_parse_event_name(pevent: &event, pgroup: &group, buf: gbuf, |
1160 | offset: event - argv[0]); |
1161 | if (ret) |
1162 | return -EINVAL; |
1163 | } |
1164 | |
1165 | if (!event) { |
1166 | /* Make a new event name */ |
1167 | if (is_tracepoint) |
1168 | snprintf(buf, MAX_EVENT_NAME_LEN, fmt: "%s%s" , |
1169 | isdigit(c: *symbol) ? "_" : "" , symbol); |
1170 | else |
1171 | snprintf(buf, MAX_EVENT_NAME_LEN, fmt: "%s__%s" , symbol, |
1172 | is_return ? "exit" : "entry" ); |
1173 | sanitize_event_name(name: buf); |
1174 | event = buf; |
1175 | } |
1176 | |
1177 | if (is_return) |
1178 | ctx->flags |= TPARG_FL_RETURN; |
1179 | else |
1180 | ctx->flags |= TPARG_FL_FENTRY; |
1181 | |
1182 | if (is_tracepoint) { |
1183 | ctx->flags |= TPARG_FL_TPOINT; |
1184 | tpoint = find_tracepoint(tp_name: symbol, tp_mod: &tp_mod); |
1185 | if (tpoint) { |
1186 | ctx->funcname = kallsyms_lookup( |
1187 | addr: (unsigned long)tpoint->probestub, |
1188 | NULL, NULL, NULL, namebuf: sbuf); |
1189 | } else if (IS_ENABLED(CONFIG_MODULES)) { |
1190 | /* This *may* be loaded afterwards */ |
1191 | tpoint = TRACEPOINT_STUB; |
1192 | ctx->funcname = symbol; |
1193 | } else { |
1194 | trace_probe_log_set_index(index: 1); |
1195 | trace_probe_log_err(0, NO_TRACEPOINT); |
1196 | return -EINVAL; |
1197 | } |
1198 | } else |
1199 | ctx->funcname = symbol; |
1200 | |
1201 | argc -= 2; argv += 2; |
1202 | new_argv = traceprobe_expand_meta_args(argc, argv, new_argc: &new_argc, |
1203 | buf: abuf, MAX_BTF_ARGS_LEN, ctx); |
1204 | if (IS_ERR(ptr: new_argv)) |
1205 | return PTR_ERR(ptr: new_argv); |
1206 | if (new_argv) { |
1207 | argc = new_argc; |
1208 | argv = new_argv; |
1209 | } |
1210 | if (argc > MAX_TRACE_ARGS) { |
1211 | trace_probe_log_set_index(index: 2); |
1212 | trace_probe_log_err(0, TOO_MANY_ARGS); |
1213 | return -E2BIG; |
1214 | } |
1215 | |
1216 | ret = traceprobe_expand_dentry_args(argc, argv, buf: &dbuf); |
1217 | if (ret) |
1218 | return ret; |
1219 | |
1220 | /* setup a probe */ |
1221 | tf = alloc_trace_fprobe(group, event, symbol, tpoint, mod: tp_mod, |
1222 | nargs: argc, is_return); |
1223 | if (IS_ERR(ptr: tf)) { |
1224 | ret = PTR_ERR(ptr: tf); |
1225 | /* This must return -ENOMEM, else there is a bug */ |
1226 | WARN_ON_ONCE(ret != -ENOMEM); |
1227 | return ret; |
1228 | } |
1229 | |
1230 | /* parse arguments */ |
1231 | for (i = 0; i < argc; i++) { |
1232 | trace_probe_log_set_index(index: i + 2); |
1233 | ctx->offset = 0; |
1234 | ret = traceprobe_parse_probe_arg(tp: &tf->tp, i, argv: argv[i], ctx); |
1235 | if (ret) |
1236 | return ret; /* This can be -ENOMEM */ |
1237 | } |
1238 | |
1239 | if (is_return && tf->tp.entry_arg) { |
1240 | tf->fp.entry_handler = trace_fprobe_entry_handler; |
1241 | tf->fp.entry_data_size = traceprobe_get_entry_data_size(tp: &tf->tp); |
1242 | if (ALIGN(tf->fp.entry_data_size, sizeof(long)) > MAX_FPROBE_DATA_SIZE) { |
1243 | trace_probe_log_set_index(index: 2); |
1244 | trace_probe_log_err(0, TOO_MANY_EARGS); |
1245 | return -E2BIG; |
1246 | } |
1247 | } |
1248 | |
1249 | ret = traceprobe_set_print_fmt(tp: &tf->tp, |
1250 | ptype: is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL); |
1251 | if (ret < 0) |
1252 | return ret; |
1253 | |
1254 | ret = register_trace_fprobe(tf); |
1255 | if (ret) { |
1256 | trace_probe_log_set_index(index: 1); |
1257 | if (ret == -EILSEQ) |
1258 | trace_probe_log_err(0, BAD_INSN_BNDRY); |
1259 | else if (ret == -ENOENT) |
1260 | trace_probe_log_err(0, BAD_PROBE_ADDR); |
1261 | else if (ret != -ENOMEM && ret != -EEXIST) |
1262 | trace_probe_log_err(0, FAIL_REG_PROBE); |
1263 | return -EINVAL; |
1264 | } |
1265 | |
1266 | /* 'tf' is successfully registered. To avoid freeing, assign NULL. */ |
1267 | tf = NULL; |
1268 | |
1269 | return 0; |
1270 | } |
1271 | |
1272 | static int trace_fprobe_create_cb(int argc, const char *argv[]) |
1273 | { |
1274 | struct traceprobe_parse_context ctx = { |
1275 | .flags = TPARG_FL_KERNEL | TPARG_FL_FPROBE, |
1276 | }; |
1277 | int ret; |
1278 | |
1279 | trace_probe_log_init(subsystem: "trace_fprobe" , argc, argv); |
1280 | ret = trace_fprobe_create_internal(argc, argv, ctx: &ctx); |
1281 | traceprobe_finish_parse(ctx: &ctx); |
1282 | trace_probe_log_clear(); |
1283 | return ret; |
1284 | } |
1285 | |
1286 | static int trace_fprobe_create(const char *raw_command) |
1287 | { |
1288 | return trace_probe_create(raw_command, createfn: trace_fprobe_create_cb); |
1289 | } |
1290 | |
1291 | static int trace_fprobe_release(struct dyn_event *ev) |
1292 | { |
1293 | struct trace_fprobe *tf = to_trace_fprobe(ev); |
1294 | int ret = unregister_trace_fprobe(tf); |
1295 | |
1296 | if (!ret) |
1297 | free_trace_fprobe(tf); |
1298 | return ret; |
1299 | } |
1300 | |
1301 | static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev) |
1302 | { |
1303 | struct trace_fprobe *tf = to_trace_fprobe(ev); |
1304 | int i; |
1305 | |
1306 | if (trace_fprobe_is_tracepoint(tf)) |
1307 | seq_putc(m, c: 't'); |
1308 | else |
1309 | seq_putc(m, c: 'f'); |
1310 | seq_printf(m, fmt: ":%s/%s" , trace_probe_group_name(tp: &tf->tp), |
1311 | trace_probe_name(tp: &tf->tp)); |
1312 | |
1313 | seq_printf(m, fmt: " %s%s" , trace_fprobe_symbol(tf), |
1314 | trace_fprobe_is_return(tf) ? "%return" : "" ); |
1315 | |
1316 | for (i = 0; i < tf->tp.nr_args; i++) |
1317 | seq_printf(m, fmt: " %s=%s" , tf->tp.args[i].name, tf->tp.args[i].comm); |
1318 | seq_putc(m, c: '\n'); |
1319 | |
1320 | return 0; |
1321 | } |
1322 | |
1323 | /* |
1324 | * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex. |
1325 | */ |
1326 | static int fprobe_register(struct trace_event_call *event, |
1327 | enum trace_reg type, void *data) |
1328 | { |
1329 | struct trace_event_file *file = data; |
1330 | |
1331 | switch (type) { |
1332 | case TRACE_REG_REGISTER: |
1333 | return enable_trace_fprobe(call: event, file); |
1334 | case TRACE_REG_UNREGISTER: |
1335 | return disable_trace_fprobe(call: event, file); |
1336 | |
1337 | #ifdef CONFIG_PERF_EVENTS |
1338 | case TRACE_REG_PERF_REGISTER: |
1339 | return enable_trace_fprobe(call: event, NULL); |
1340 | case TRACE_REG_PERF_UNREGISTER: |
1341 | return disable_trace_fprobe(call: event, NULL); |
1342 | case TRACE_REG_PERF_OPEN: |
1343 | case TRACE_REG_PERF_CLOSE: |
1344 | case TRACE_REG_PERF_ADD: |
1345 | case TRACE_REG_PERF_DEL: |
1346 | return 0; |
1347 | #endif |
1348 | } |
1349 | return 0; |
1350 | } |
1351 | |
1352 | /* |
1353 | * Register dynevent at core_initcall. This allows kernel to setup fprobe |
1354 | * events in postcore_initcall without tracefs. |
1355 | */ |
1356 | static __init int init_fprobe_trace_early(void) |
1357 | { |
1358 | int ret; |
1359 | |
1360 | ret = dyn_event_register(ops: &trace_fprobe_ops); |
1361 | if (ret) |
1362 | return ret; |
1363 | |
1364 | #ifdef CONFIG_MODULES |
1365 | ret = register_tracepoint_module_notifier(nb: &tracepoint_module_nb); |
1366 | if (ret) |
1367 | return ret; |
1368 | #endif |
1369 | |
1370 | return 0; |
1371 | } |
1372 | core_initcall(init_fprobe_trace_early); |
1373 | |