1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * Infrastructure for profiling code inserted by 'gcc -pg'. |
4 | * |
5 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
6 | * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> |
7 | * |
8 | * Originally ported from the -rt patch by: |
9 | * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> |
10 | * |
11 | * Based on code in the latency_tracer, that is: |
12 | * |
13 | * Copyright (C) 2004-2006 Ingo Molnar |
14 | * Copyright (C) 2004 Nadia Yvette Chambers |
15 | */ |
16 | |
17 | #include <linux/stop_machine.h> |
18 | #include <linux/clocksource.h> |
19 | #include <linux/sched/task.h> |
20 | #include <linux/kallsyms.h> |
21 | #include <linux/security.h> |
22 | #include <linux/seq_file.h> |
23 | #include <linux/tracefs.h> |
24 | #include <linux/hardirq.h> |
25 | #include <linux/kthread.h> |
26 | #include <linux/uaccess.h> |
27 | #include <linux/bsearch.h> |
28 | #include <linux/module.h> |
29 | #include <linux/ftrace.h> |
30 | #include <linux/sysctl.h> |
31 | #include <linux/slab.h> |
32 | #include <linux/ctype.h> |
33 | #include <linux/sort.h> |
34 | #include <linux/list.h> |
35 | #include <linux/hash.h> |
36 | #include <linux/rcupdate.h> |
37 | #include <linux/kprobes.h> |
38 | |
39 | #include <trace/events/sched.h> |
40 | |
41 | #include <asm/sections.h> |
42 | #include <asm/setup.h> |
43 | |
44 | #include "ftrace_internal.h" |
45 | #include "trace_output.h" |
46 | #include "trace_stat.h" |
47 | |
48 | /* Flags that do not get reset */ |
49 | #define FTRACE_NOCLEAR_FLAGS (FTRACE_FL_DISABLED | FTRACE_FL_TOUCHED | \ |
50 | FTRACE_FL_MODIFIED) |
51 | |
52 | #define FTRACE_INVALID_FUNCTION "__ftrace_invalid_address__" |
53 | |
54 | #define FTRACE_WARN_ON(cond) \ |
55 | ({ \ |
56 | int ___r = cond; \ |
57 | if (WARN_ON(___r)) \ |
58 | ftrace_kill(); \ |
59 | ___r; \ |
60 | }) |
61 | |
62 | #define FTRACE_WARN_ON_ONCE(cond) \ |
63 | ({ \ |
64 | int ___r = cond; \ |
65 | if (WARN_ON_ONCE(___r)) \ |
66 | ftrace_kill(); \ |
67 | ___r; \ |
68 | }) |
69 | |
70 | /* hash bits for specific function selection */ |
71 | #define FTRACE_HASH_DEFAULT_BITS 10 |
72 | #define FTRACE_HASH_MAX_BITS 12 |
73 | |
74 | #ifdef CONFIG_DYNAMIC_FTRACE |
75 | #define INIT_OPS_HASH(opsname) \ |
76 | .func_hash = &opsname.local_hash, \ |
77 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), |
78 | #else |
79 | #define INIT_OPS_HASH(opsname) |
80 | #endif |
81 | |
82 | enum { |
83 | FTRACE_MODIFY_ENABLE_FL = (1 << 0), |
84 | FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1), |
85 | }; |
86 | |
87 | struct ftrace_ops ftrace_list_end __read_mostly = { |
88 | .func = ftrace_stub, |
89 | .flags = FTRACE_OPS_FL_STUB, |
90 | INIT_OPS_HASH(ftrace_list_end) |
91 | }; |
92 | |
93 | /* ftrace_enabled is a method to turn ftrace on or off */ |
94 | int ftrace_enabled __read_mostly; |
95 | static int __maybe_unused last_ftrace_enabled; |
96 | |
97 | /* Current function tracing op */ |
98 | struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; |
99 | /* What to set function_trace_op to */ |
100 | static struct ftrace_ops *set_function_trace_op; |
101 | |
102 | static bool ftrace_pids_enabled(struct ftrace_ops *ops) |
103 | { |
104 | struct trace_array *tr; |
105 | |
106 | if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) |
107 | return false; |
108 | |
109 | tr = ops->private; |
110 | |
111 | return tr->function_pids != NULL || tr->function_no_pids != NULL; |
112 | } |
113 | |
114 | static void ftrace_update_trampoline(struct ftrace_ops *ops); |
115 | |
116 | /* |
117 | * ftrace_disabled is set when an anomaly is discovered. |
118 | * ftrace_disabled is much stronger than ftrace_enabled. |
119 | */ |
120 | static int ftrace_disabled __read_mostly; |
121 | |
122 | DEFINE_MUTEX(ftrace_lock); |
123 | |
124 | struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; |
125 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
126 | struct ftrace_ops global_ops; |
127 | |
128 | /* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */ |
129 | void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
130 | struct ftrace_ops *op, struct ftrace_regs *fregs); |
131 | |
132 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS |
133 | /* |
134 | * Stub used to invoke the list ops without requiring a separate trampoline. |
135 | */ |
136 | const struct ftrace_ops ftrace_list_ops = { |
137 | .func = ftrace_ops_list_func, |
138 | .flags = FTRACE_OPS_FL_STUB, |
139 | }; |
140 | |
141 | static void ftrace_ops_nop_func(unsigned long ip, unsigned long parent_ip, |
142 | struct ftrace_ops *op, |
143 | struct ftrace_regs *fregs) |
144 | { |
145 | /* do nothing */ |
146 | } |
147 | |
148 | /* |
149 | * Stub used when a call site is disabled. May be called transiently by threads |
150 | * which have made it into ftrace_caller but haven't yet recovered the ops at |
151 | * the point the call site is disabled. |
152 | */ |
153 | const struct ftrace_ops ftrace_nop_ops = { |
154 | .func = ftrace_ops_nop_func, |
155 | .flags = FTRACE_OPS_FL_STUB, |
156 | }; |
157 | #endif |
158 | |
159 | static inline void ftrace_ops_init(struct ftrace_ops *ops) |
160 | { |
161 | #ifdef CONFIG_DYNAMIC_FTRACE |
162 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { |
163 | mutex_init(&ops->local_hash.regex_lock); |
164 | ops->func_hash = &ops->local_hash; |
165 | ops->flags |= FTRACE_OPS_FL_INITIALIZED; |
166 | } |
167 | #endif |
168 | } |
169 | |
170 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, |
171 | struct ftrace_ops *op, struct ftrace_regs *fregs) |
172 | { |
173 | struct trace_array *tr = op->private; |
174 | int pid; |
175 | |
176 | if (tr) { |
177 | pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); |
178 | if (pid == FTRACE_PID_IGNORE) |
179 | return; |
180 | if (pid != FTRACE_PID_TRACE && |
181 | pid != current->pid) |
182 | return; |
183 | } |
184 | |
185 | op->saved_func(ip, parent_ip, op, fregs); |
186 | } |
187 | |
188 | static void ftrace_sync_ipi(void *data) |
189 | { |
190 | /* Probably not needed, but do it anyway */ |
191 | smp_rmb(); |
192 | } |
193 | |
194 | static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) |
195 | { |
196 | /* |
197 | * If this is a dynamic or RCU ops, or we force list func, |
198 | * then it needs to call the list anyway. |
199 | */ |
200 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || |
201 | FTRACE_FORCE_LIST_FUNC) |
202 | return ftrace_ops_list_func; |
203 | |
204 | return ftrace_ops_get_func(ops); |
205 | } |
206 | |
207 | static void update_ftrace_function(void) |
208 | { |
209 | ftrace_func_t func; |
210 | |
211 | /* |
212 | * Prepare the ftrace_ops that the arch callback will use. |
213 | * If there's only one ftrace_ops registered, the ftrace_ops_list |
214 | * will point to the ops we want. |
215 | */ |
216 | set_function_trace_op = rcu_dereference_protected(ftrace_ops_list, |
217 | lockdep_is_held(&ftrace_lock)); |
218 | |
219 | /* If there's no ftrace_ops registered, just call the stub function */ |
220 | if (set_function_trace_op == &ftrace_list_end) { |
221 | func = ftrace_stub; |
222 | |
223 | /* |
224 | * If we are at the end of the list and this ops is |
225 | * recursion safe and not dynamic and the arch supports passing ops, |
226 | * then have the mcount trampoline call the function directly. |
227 | */ |
228 | } else if (rcu_dereference_protected(ftrace_ops_list->next, |
229 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { |
230 | func = ftrace_ops_get_list_func(ops: ftrace_ops_list); |
231 | |
232 | } else { |
233 | /* Just use the default ftrace_ops */ |
234 | set_function_trace_op = &ftrace_list_end; |
235 | func = ftrace_ops_list_func; |
236 | } |
237 | |
238 | update_function_graph_func(); |
239 | |
240 | /* If there's no change, then do nothing more here */ |
241 | if (ftrace_trace_function == func) |
242 | return; |
243 | |
244 | /* |
245 | * If we are using the list function, it doesn't care |
246 | * about the function_trace_ops. |
247 | */ |
248 | if (func == ftrace_ops_list_func) { |
249 | ftrace_trace_function = func; |
250 | /* |
251 | * Don't even bother setting function_trace_ops, |
252 | * it would be racy to do so anyway. |
253 | */ |
254 | return; |
255 | } |
256 | |
257 | #ifndef CONFIG_DYNAMIC_FTRACE |
258 | /* |
259 | * For static tracing, we need to be a bit more careful. |
260 | * The function change takes affect immediately. Thus, |
261 | * we need to coordinate the setting of the function_trace_ops |
262 | * with the setting of the ftrace_trace_function. |
263 | * |
264 | * Set the function to the list ops, which will call the |
265 | * function we want, albeit indirectly, but it handles the |
266 | * ftrace_ops and doesn't depend on function_trace_op. |
267 | */ |
268 | ftrace_trace_function = ftrace_ops_list_func; |
269 | /* |
270 | * Make sure all CPUs see this. Yes this is slow, but static |
271 | * tracing is slow and nasty to have enabled. |
272 | */ |
273 | synchronize_rcu_tasks_rude(); |
274 | /* Now all cpus are using the list ops. */ |
275 | function_trace_op = set_function_trace_op; |
276 | /* Make sure the function_trace_op is visible on all CPUs */ |
277 | smp_wmb(); |
278 | /* Nasty way to force a rmb on all cpus */ |
279 | smp_call_function(ftrace_sync_ipi, NULL, 1); |
280 | /* OK, we are all set to update the ftrace_trace_function now! */ |
281 | #endif /* !CONFIG_DYNAMIC_FTRACE */ |
282 | |
283 | ftrace_trace_function = func; |
284 | } |
285 | |
286 | static void add_ftrace_ops(struct ftrace_ops __rcu **list, |
287 | struct ftrace_ops *ops) |
288 | { |
289 | rcu_assign_pointer(ops->next, *list); |
290 | |
291 | /* |
292 | * We are entering ops into the list but another |
293 | * CPU might be walking that list. We need to make sure |
294 | * the ops->next pointer is valid before another CPU sees |
295 | * the ops pointer included into the list. |
296 | */ |
297 | rcu_assign_pointer(*list, ops); |
298 | } |
299 | |
300 | static int remove_ftrace_ops(struct ftrace_ops __rcu **list, |
301 | struct ftrace_ops *ops) |
302 | { |
303 | struct ftrace_ops **p; |
304 | |
305 | /* |
306 | * If we are removing the last function, then simply point |
307 | * to the ftrace_stub. |
308 | */ |
309 | if (rcu_dereference_protected(*list, |
310 | lockdep_is_held(&ftrace_lock)) == ops && |
311 | rcu_dereference_protected(ops->next, |
312 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { |
313 | *list = &ftrace_list_end; |
314 | return 0; |
315 | } |
316 | |
317 | for (p = list; *p != &ftrace_list_end; p = &(*p)->next) |
318 | if (*p == ops) |
319 | break; |
320 | |
321 | if (*p != ops) |
322 | return -1; |
323 | |
324 | *p = (*p)->next; |
325 | return 0; |
326 | } |
327 | |
328 | static void ftrace_update_trampoline(struct ftrace_ops *ops); |
329 | |
330 | int __register_ftrace_function(struct ftrace_ops *ops) |
331 | { |
332 | if (ops->flags & FTRACE_OPS_FL_DELETED) |
333 | return -EINVAL; |
334 | |
335 | if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) |
336 | return -EBUSY; |
337 | |
338 | #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
339 | /* |
340 | * If the ftrace_ops specifies SAVE_REGS, then it only can be used |
341 | * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. |
342 | * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. |
343 | */ |
344 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && |
345 | !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) |
346 | return -EINVAL; |
347 | |
348 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) |
349 | ops->flags |= FTRACE_OPS_FL_SAVE_REGS; |
350 | #endif |
351 | if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT)) |
352 | return -EBUSY; |
353 | |
354 | if (!is_kernel_core_data(addr: (unsigned long)ops)) |
355 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; |
356 | |
357 | add_ftrace_ops(list: &ftrace_ops_list, ops); |
358 | |
359 | /* Always save the function, and reset at unregistering */ |
360 | ops->saved_func = ops->func; |
361 | |
362 | if (ftrace_pids_enabled(ops)) |
363 | ops->func = ftrace_pid_func; |
364 | |
365 | ftrace_update_trampoline(ops); |
366 | |
367 | if (ftrace_enabled) |
368 | update_ftrace_function(); |
369 | |
370 | return 0; |
371 | } |
372 | |
373 | int __unregister_ftrace_function(struct ftrace_ops *ops) |
374 | { |
375 | int ret; |
376 | |
377 | if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) |
378 | return -EBUSY; |
379 | |
380 | ret = remove_ftrace_ops(list: &ftrace_ops_list, ops); |
381 | |
382 | if (ret < 0) |
383 | return ret; |
384 | |
385 | if (ftrace_enabled) |
386 | update_ftrace_function(); |
387 | |
388 | ops->func = ops->saved_func; |
389 | |
390 | return 0; |
391 | } |
392 | |
393 | static void ftrace_update_pid_func(void) |
394 | { |
395 | struct ftrace_ops *op; |
396 | |
397 | /* Only do something if we are tracing something */ |
398 | if (ftrace_trace_function == ftrace_stub) |
399 | return; |
400 | |
401 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
402 | if (op->flags & FTRACE_OPS_FL_PID) { |
403 | op->func = ftrace_pids_enabled(ops: op) ? |
404 | ftrace_pid_func : op->saved_func; |
405 | ftrace_update_trampoline(ops: op); |
406 | } |
407 | } while_for_each_ftrace_op(op); |
408 | |
409 | update_ftrace_function(); |
410 | } |
411 | |
412 | #ifdef CONFIG_FUNCTION_PROFILER |
413 | struct ftrace_profile { |
414 | struct hlist_node node; |
415 | unsigned long ip; |
416 | unsigned long counter; |
417 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
418 | unsigned long long time; |
419 | unsigned long long time_squared; |
420 | #endif |
421 | }; |
422 | |
423 | struct ftrace_profile_page { |
424 | struct ftrace_profile_page *next; |
425 | unsigned long index; |
426 | struct ftrace_profile records[]; |
427 | }; |
428 | |
429 | struct ftrace_profile_stat { |
430 | atomic_t disabled; |
431 | struct hlist_head *hash; |
432 | struct ftrace_profile_page *pages; |
433 | struct ftrace_profile_page *start; |
434 | struct tracer_stat stat; |
435 | }; |
436 | |
437 | #define PROFILE_RECORDS_SIZE \ |
438 | (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) |
439 | |
440 | #define PROFILES_PER_PAGE \ |
441 | (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) |
442 | |
443 | static int ftrace_profile_enabled __read_mostly; |
444 | |
445 | /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ |
446 | static DEFINE_MUTEX(ftrace_profile_lock); |
447 | |
448 | static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); |
449 | |
450 | #define FTRACE_PROFILE_HASH_BITS 10 |
451 | #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS) |
452 | |
453 | static void * |
454 | function_stat_next(void *v, int idx) |
455 | { |
456 | struct ftrace_profile *rec = v; |
457 | struct ftrace_profile_page *pg; |
458 | |
459 | pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); |
460 | |
461 | again: |
462 | if (idx != 0) |
463 | rec++; |
464 | |
465 | if ((void *)rec >= (void *)&pg->records[pg->index]) { |
466 | pg = pg->next; |
467 | if (!pg) |
468 | return NULL; |
469 | rec = &pg->records[0]; |
470 | if (!rec->counter) |
471 | goto again; |
472 | } |
473 | |
474 | return rec; |
475 | } |
476 | |
477 | static void *function_stat_start(struct tracer_stat *trace) |
478 | { |
479 | struct ftrace_profile_stat *stat = |
480 | container_of(trace, struct ftrace_profile_stat, stat); |
481 | |
482 | if (!stat || !stat->start) |
483 | return NULL; |
484 | |
485 | return function_stat_next(v: &stat->start->records[0], idx: 0); |
486 | } |
487 | |
488 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
489 | /* function graph compares on total time */ |
490 | static int function_stat_cmp(const void *p1, const void *p2) |
491 | { |
492 | const struct ftrace_profile *a = p1; |
493 | const struct ftrace_profile *b = p2; |
494 | |
495 | if (a->time < b->time) |
496 | return -1; |
497 | if (a->time > b->time) |
498 | return 1; |
499 | else |
500 | return 0; |
501 | } |
502 | #else |
503 | /* not function graph compares against hits */ |
504 | static int function_stat_cmp(const void *p1, const void *p2) |
505 | { |
506 | const struct ftrace_profile *a = p1; |
507 | const struct ftrace_profile *b = p2; |
508 | |
509 | if (a->counter < b->counter) |
510 | return -1; |
511 | if (a->counter > b->counter) |
512 | return 1; |
513 | else |
514 | return 0; |
515 | } |
516 | #endif |
517 | |
518 | static int function_stat_headers(struct seq_file *m) |
519 | { |
520 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
521 | seq_puts(m, s: " Function " |
522 | "Hit Time Avg s^2\n" |
523 | " -------- " |
524 | "--- ---- --- ---\n"); |
525 | #else |
526 | seq_puts(m, " Function Hit\n" |
527 | " -------- ---\n"); |
528 | #endif |
529 | return 0; |
530 | } |
531 | |
532 | static int function_stat_show(struct seq_file *m, void *v) |
533 | { |
534 | struct ftrace_profile *rec = v; |
535 | char str[KSYM_SYMBOL_LEN]; |
536 | int ret = 0; |
537 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
538 | static struct trace_seq s; |
539 | unsigned long long avg; |
540 | unsigned long long stddev; |
541 | #endif |
542 | mutex_lock(&ftrace_profile_lock); |
543 | |
544 | /* we raced with function_profile_reset() */ |
545 | if (unlikely(rec->counter == 0)) { |
546 | ret = -EBUSY; |
547 | goto out; |
548 | } |
549 | |
550 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
551 | avg = div64_ul(rec->time, rec->counter); |
552 | if (tracing_thresh && (avg < tracing_thresh)) |
553 | goto out; |
554 | #endif |
555 | |
556 | kallsyms_lookup(addr: rec->ip, NULL, NULL, NULL, namebuf: str); |
557 | seq_printf(m, fmt: " %-30.30s %10lu", str, rec->counter); |
558 | |
559 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
560 | seq_puts(m, s: " "); |
561 | |
562 | /* Sample standard deviation (s^2) */ |
563 | if (rec->counter <= 1) |
564 | stddev = 0; |
565 | else { |
566 | /* |
567 | * Apply Welford's method: |
568 | * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) |
569 | */ |
570 | stddev = rec->counter * rec->time_squared - |
571 | rec->time * rec->time; |
572 | |
573 | /* |
574 | * Divide only 1000 for ns^2 -> us^2 conversion. |
575 | * trace_print_graph_duration will divide 1000 again. |
576 | */ |
577 | stddev = div64_ul(stddev, |
578 | rec->counter * (rec->counter - 1) * 1000); |
579 | } |
580 | |
581 | trace_seq_init(s: &s); |
582 | trace_print_graph_duration(duration: rec->time, s: &s); |
583 | trace_seq_puts(s: &s, str: " "); |
584 | trace_print_graph_duration(duration: avg, s: &s); |
585 | trace_seq_puts(s: &s, str: " "); |
586 | trace_print_graph_duration(duration: stddev, s: &s); |
587 | trace_print_seq(m, s: &s); |
588 | #endif |
589 | seq_putc(m, c: '\n'); |
590 | out: |
591 | mutex_unlock(lock: &ftrace_profile_lock); |
592 | |
593 | return ret; |
594 | } |
595 | |
596 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) |
597 | { |
598 | struct ftrace_profile_page *pg; |
599 | |
600 | pg = stat->pages = stat->start; |
601 | |
602 | while (pg) { |
603 | memset(pg->records, 0, PROFILE_RECORDS_SIZE); |
604 | pg->index = 0; |
605 | pg = pg->next; |
606 | } |
607 | |
608 | memset(stat->hash, 0, |
609 | FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); |
610 | } |
611 | |
612 | static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) |
613 | { |
614 | struct ftrace_profile_page *pg; |
615 | int functions; |
616 | int pages; |
617 | int i; |
618 | |
619 | /* If we already allocated, do nothing */ |
620 | if (stat->pages) |
621 | return 0; |
622 | |
623 | stat->pages = (void *)get_zeroed_page(GFP_KERNEL); |
624 | if (!stat->pages) |
625 | return -ENOMEM; |
626 | |
627 | #ifdef CONFIG_DYNAMIC_FTRACE |
628 | functions = ftrace_update_tot_cnt; |
629 | #else |
630 | /* |
631 | * We do not know the number of functions that exist because |
632 | * dynamic tracing is what counts them. With past experience |
633 | * we have around 20K functions. That should be more than enough. |
634 | * It is highly unlikely we will execute every function in |
635 | * the kernel. |
636 | */ |
637 | functions = 20000; |
638 | #endif |
639 | |
640 | pg = stat->start = stat->pages; |
641 | |
642 | pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); |
643 | |
644 | for (i = 1; i < pages; i++) { |
645 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); |
646 | if (!pg->next) |
647 | goto out_free; |
648 | pg = pg->next; |
649 | } |
650 | |
651 | return 0; |
652 | |
653 | out_free: |
654 | pg = stat->start; |
655 | while (pg) { |
656 | unsigned long tmp = (unsigned long)pg; |
657 | |
658 | pg = pg->next; |
659 | free_page(tmp); |
660 | } |
661 | |
662 | stat->pages = NULL; |
663 | stat->start = NULL; |
664 | |
665 | return -ENOMEM; |
666 | } |
667 | |
668 | static int ftrace_profile_init_cpu(int cpu) |
669 | { |
670 | struct ftrace_profile_stat *stat; |
671 | int size; |
672 | |
673 | stat = &per_cpu(ftrace_profile_stats, cpu); |
674 | |
675 | if (stat->hash) { |
676 | /* If the profile is already created, simply reset it */ |
677 | ftrace_profile_reset(stat); |
678 | return 0; |
679 | } |
680 | |
681 | /* |
682 | * We are profiling all functions, but usually only a few thousand |
683 | * functions are hit. We'll make a hash of 1024 items. |
684 | */ |
685 | size = FTRACE_PROFILE_HASH_SIZE; |
686 | |
687 | stat->hash = kcalloc(n: size, size: sizeof(struct hlist_head), GFP_KERNEL); |
688 | |
689 | if (!stat->hash) |
690 | return -ENOMEM; |
691 | |
692 | /* Preallocate the function profiling pages */ |
693 | if (ftrace_profile_pages_init(stat) < 0) { |
694 | kfree(objp: stat->hash); |
695 | stat->hash = NULL; |
696 | return -ENOMEM; |
697 | } |
698 | |
699 | return 0; |
700 | } |
701 | |
702 | static int ftrace_profile_init(void) |
703 | { |
704 | int cpu; |
705 | int ret = 0; |
706 | |
707 | for_each_possible_cpu(cpu) { |
708 | ret = ftrace_profile_init_cpu(cpu); |
709 | if (ret) |
710 | break; |
711 | } |
712 | |
713 | return ret; |
714 | } |
715 | |
716 | /* interrupts must be disabled */ |
717 | static struct ftrace_profile * |
718 | ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) |
719 | { |
720 | struct ftrace_profile *rec; |
721 | struct hlist_head *hhd; |
722 | unsigned long key; |
723 | |
724 | key = hash_long(ip, FTRACE_PROFILE_HASH_BITS); |
725 | hhd = &stat->hash[key]; |
726 | |
727 | if (hlist_empty(h: hhd)) |
728 | return NULL; |
729 | |
730 | hlist_for_each_entry_rcu_notrace(rec, hhd, node) { |
731 | if (rec->ip == ip) |
732 | return rec; |
733 | } |
734 | |
735 | return NULL; |
736 | } |
737 | |
738 | static void ftrace_add_profile(struct ftrace_profile_stat *stat, |
739 | struct ftrace_profile *rec) |
740 | { |
741 | unsigned long key; |
742 | |
743 | key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); |
744 | hlist_add_head_rcu(n: &rec->node, h: &stat->hash[key]); |
745 | } |
746 | |
747 | /* |
748 | * The memory is already allocated, this simply finds a new record to use. |
749 | */ |
750 | static struct ftrace_profile * |
751 | ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) |
752 | { |
753 | struct ftrace_profile *rec = NULL; |
754 | |
755 | /* prevent recursion (from NMIs) */ |
756 | if (atomic_inc_return(v: &stat->disabled) != 1) |
757 | goto out; |
758 | |
759 | /* |
760 | * Try to find the function again since an NMI |
761 | * could have added it |
762 | */ |
763 | rec = ftrace_find_profiled_func(stat, ip); |
764 | if (rec) |
765 | goto out; |
766 | |
767 | if (stat->pages->index == PROFILES_PER_PAGE) { |
768 | if (!stat->pages->next) |
769 | goto out; |
770 | stat->pages = stat->pages->next; |
771 | } |
772 | |
773 | rec = &stat->pages->records[stat->pages->index++]; |
774 | rec->ip = ip; |
775 | ftrace_add_profile(stat, rec); |
776 | |
777 | out: |
778 | atomic_dec(v: &stat->disabled); |
779 | |
780 | return rec; |
781 | } |
782 | |
783 | static void |
784 | function_profile_call(unsigned long ip, unsigned long parent_ip, |
785 | struct ftrace_ops *ops, struct ftrace_regs *fregs) |
786 | { |
787 | struct ftrace_profile_stat *stat; |
788 | struct ftrace_profile *rec; |
789 | unsigned long flags; |
790 | |
791 | if (!ftrace_profile_enabled) |
792 | return; |
793 | |
794 | local_irq_save(flags); |
795 | |
796 | stat = this_cpu_ptr(&ftrace_profile_stats); |
797 | if (!stat->hash || !ftrace_profile_enabled) |
798 | goto out; |
799 | |
800 | rec = ftrace_find_profiled_func(stat, ip); |
801 | if (!rec) { |
802 | rec = ftrace_profile_alloc(stat, ip); |
803 | if (!rec) |
804 | goto out; |
805 | } |
806 | |
807 | rec->counter++; |
808 | out: |
809 | local_irq_restore(flags); |
810 | } |
811 | |
812 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
813 | static bool fgraph_graph_time = true; |
814 | |
815 | void ftrace_graph_graph_time_control(bool enable) |
816 | { |
817 | fgraph_graph_time = enable; |
818 | } |
819 | |
820 | static int profile_graph_entry(struct ftrace_graph_ent *trace) |
821 | { |
822 | struct ftrace_ret_stack *ret_stack; |
823 | |
824 | function_profile_call(ip: trace->func, parent_ip: 0, NULL, NULL); |
825 | |
826 | /* If function graph is shutting down, ret_stack can be NULL */ |
827 | if (!current->ret_stack) |
828 | return 0; |
829 | |
830 | ret_stack = ftrace_graph_get_ret_stack(current, idx: 0); |
831 | if (ret_stack) |
832 | ret_stack->subtime = 0; |
833 | |
834 | return 1; |
835 | } |
836 | |
837 | static void profile_graph_return(struct ftrace_graph_ret *trace) |
838 | { |
839 | struct ftrace_ret_stack *ret_stack; |
840 | struct ftrace_profile_stat *stat; |
841 | unsigned long long calltime; |
842 | struct ftrace_profile *rec; |
843 | unsigned long flags; |
844 | |
845 | local_irq_save(flags); |
846 | stat = this_cpu_ptr(&ftrace_profile_stats); |
847 | if (!stat->hash || !ftrace_profile_enabled) |
848 | goto out; |
849 | |
850 | /* If the calltime was zero'd ignore it */ |
851 | if (!trace->calltime) |
852 | goto out; |
853 | |
854 | calltime = trace->rettime - trace->calltime; |
855 | |
856 | if (!fgraph_graph_time) { |
857 | |
858 | /* Append this call time to the parent time to subtract */ |
859 | ret_stack = ftrace_graph_get_ret_stack(current, idx: 1); |
860 | if (ret_stack) |
861 | ret_stack->subtime += calltime; |
862 | |
863 | ret_stack = ftrace_graph_get_ret_stack(current, idx: 0); |
864 | if (ret_stack && ret_stack->subtime < calltime) |
865 | calltime -= ret_stack->subtime; |
866 | else |
867 | calltime = 0; |
868 | } |
869 | |
870 | rec = ftrace_find_profiled_func(stat, ip: trace->func); |
871 | if (rec) { |
872 | rec->time += calltime; |
873 | rec->time_squared += calltime * calltime; |
874 | } |
875 | |
876 | out: |
877 | local_irq_restore(flags); |
878 | } |
879 | |
880 | static struct fgraph_ops fprofiler_ops = { |
881 | .entryfunc = &profile_graph_entry, |
882 | .retfunc = &profile_graph_return, |
883 | }; |
884 | |
885 | static int register_ftrace_profiler(void) |
886 | { |
887 | return register_ftrace_graph(ops: &fprofiler_ops); |
888 | } |
889 | |
890 | static void unregister_ftrace_profiler(void) |
891 | { |
892 | unregister_ftrace_graph(ops: &fprofiler_ops); |
893 | } |
894 | #else |
895 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { |
896 | .func = function_profile_call, |
897 | .flags = FTRACE_OPS_FL_INITIALIZED, |
898 | INIT_OPS_HASH(ftrace_profile_ops) |
899 | }; |
900 | |
901 | static int register_ftrace_profiler(void) |
902 | { |
903 | return register_ftrace_function(&ftrace_profile_ops); |
904 | } |
905 | |
906 | static void unregister_ftrace_profiler(void) |
907 | { |
908 | unregister_ftrace_function(&ftrace_profile_ops); |
909 | } |
910 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
911 | |
912 | static ssize_t |
913 | ftrace_profile_write(struct file *filp, const char __user *ubuf, |
914 | size_t cnt, loff_t *ppos) |
915 | { |
916 | unsigned long val; |
917 | int ret; |
918 | |
919 | ret = kstrtoul_from_user(s: ubuf, count: cnt, base: 10, res: &val); |
920 | if (ret) |
921 | return ret; |
922 | |
923 | val = !!val; |
924 | |
925 | mutex_lock(&ftrace_profile_lock); |
926 | if (ftrace_profile_enabled ^ val) { |
927 | if (val) { |
928 | ret = ftrace_profile_init(); |
929 | if (ret < 0) { |
930 | cnt = ret; |
931 | goto out; |
932 | } |
933 | |
934 | ret = register_ftrace_profiler(); |
935 | if (ret < 0) { |
936 | cnt = ret; |
937 | goto out; |
938 | } |
939 | ftrace_profile_enabled = 1; |
940 | } else { |
941 | ftrace_profile_enabled = 0; |
942 | /* |
943 | * unregister_ftrace_profiler calls stop_machine |
944 | * so this acts like an synchronize_rcu. |
945 | */ |
946 | unregister_ftrace_profiler(); |
947 | } |
948 | } |
949 | out: |
950 | mutex_unlock(lock: &ftrace_profile_lock); |
951 | |
952 | *ppos += cnt; |
953 | |
954 | return cnt; |
955 | } |
956 | |
957 | static ssize_t |
958 | ftrace_profile_read(struct file *filp, char __user *ubuf, |
959 | size_t cnt, loff_t *ppos) |
960 | { |
961 | char buf[64]; /* big enough to hold a number */ |
962 | int r; |
963 | |
964 | r = sprintf(buf, fmt: "%u\n", ftrace_profile_enabled); |
965 | return simple_read_from_buffer(to: ubuf, count: cnt, ppos, from: buf, available: r); |
966 | } |
967 | |
968 | static const struct file_operations ftrace_profile_fops = { |
969 | .open = tracing_open_generic, |
970 | .read = ftrace_profile_read, |
971 | .write = ftrace_profile_write, |
972 | .llseek = default_llseek, |
973 | }; |
974 | |
975 | /* used to initialize the real stat files */ |
976 | static struct tracer_stat function_stats __initdata = { |
977 | .name = "functions", |
978 | .stat_start = function_stat_start, |
979 | .stat_next = function_stat_next, |
980 | .stat_cmp = function_stat_cmp, |
981 | .stat_headers = function_stat_headers, |
982 | .stat_show = function_stat_show |
983 | }; |
984 | |
985 | static __init void ftrace_profile_tracefs(struct dentry *d_tracer) |
986 | { |
987 | struct ftrace_profile_stat *stat; |
988 | char *name; |
989 | int ret; |
990 | int cpu; |
991 | |
992 | for_each_possible_cpu(cpu) { |
993 | stat = &per_cpu(ftrace_profile_stats, cpu); |
994 | |
995 | name = kasprintf(GFP_KERNEL, fmt: "function%d", cpu); |
996 | if (!name) { |
997 | /* |
998 | * The files created are permanent, if something happens |
999 | * we still do not free memory. |
1000 | */ |
1001 | WARN(1, |
1002 | "Could not allocate stat file for cpu %d\n", |
1003 | cpu); |
1004 | return; |
1005 | } |
1006 | stat->stat = function_stats; |
1007 | stat->stat.name = name; |
1008 | ret = register_stat_tracer(trace: &stat->stat); |
1009 | if (ret) { |
1010 | WARN(1, |
1011 | "Could not register function stat for cpu %d\n", |
1012 | cpu); |
1013 | kfree(objp: name); |
1014 | return; |
1015 | } |
1016 | } |
1017 | |
1018 | trace_create_file(name: "function_profile_enabled", |
1019 | TRACE_MODE_WRITE, parent: d_tracer, NULL, |
1020 | fops: &ftrace_profile_fops); |
1021 | } |
1022 | |
1023 | #else /* CONFIG_FUNCTION_PROFILER */ |
1024 | static __init void ftrace_profile_tracefs(struct dentry *d_tracer) |
1025 | { |
1026 | } |
1027 | #endif /* CONFIG_FUNCTION_PROFILER */ |
1028 | |
1029 | #ifdef CONFIG_DYNAMIC_FTRACE |
1030 | |
1031 | static struct ftrace_ops *removed_ops; |
1032 | |
1033 | /* |
1034 | * Set when doing a global update, like enabling all recs or disabling them. |
1035 | * It is not set when just updating a single ftrace_ops. |
1036 | */ |
1037 | static bool update_all_ops; |
1038 | |
1039 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
1040 | # error Dynamic ftrace depends on MCOUNT_RECORD |
1041 | #endif |
1042 | |
1043 | struct ftrace_func_probe { |
1044 | struct ftrace_probe_ops *probe_ops; |
1045 | struct ftrace_ops ops; |
1046 | struct trace_array *tr; |
1047 | struct list_head list; |
1048 | void *data; |
1049 | int ref; |
1050 | }; |
1051 | |
1052 | /* |
1053 | * We make these constant because no one should touch them, |
1054 | * but they are used as the default "empty hash", to avoid allocating |
1055 | * it all the time. These are in a read only section such that if |
1056 | * anyone does try to modify it, it will cause an exception. |
1057 | */ |
1058 | static const struct hlist_head empty_buckets[1]; |
1059 | static const struct ftrace_hash empty_hash = { |
1060 | .buckets = (struct hlist_head *)empty_buckets, |
1061 | }; |
1062 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) |
1063 | |
1064 | struct ftrace_ops global_ops = { |
1065 | .func = ftrace_stub, |
1066 | .local_hash.notrace_hash = EMPTY_HASH, |
1067 | .local_hash.filter_hash = EMPTY_HASH, |
1068 | INIT_OPS_HASH(global_ops) |
1069 | .flags = FTRACE_OPS_FL_INITIALIZED | |
1070 | FTRACE_OPS_FL_PID, |
1071 | }; |
1072 | |
1073 | /* |
1074 | * Used by the stack unwinder to know about dynamic ftrace trampolines. |
1075 | */ |
1076 | struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr) |
1077 | { |
1078 | struct ftrace_ops *op = NULL; |
1079 | |
1080 | /* |
1081 | * Some of the ops may be dynamically allocated, |
1082 | * they are freed after a synchronize_rcu(). |
1083 | */ |
1084 | preempt_disable_notrace(); |
1085 | |
1086 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
1087 | /* |
1088 | * This is to check for dynamically allocated trampolines. |
1089 | * Trampolines that are in kernel text will have |
1090 | * core_kernel_text() return true. |
1091 | */ |
1092 | if (op->trampoline && op->trampoline_size) |
1093 | if (addr >= op->trampoline && |
1094 | addr < op->trampoline + op->trampoline_size) { |
1095 | preempt_enable_notrace(); |
1096 | return op; |
1097 | } |
1098 | } while_for_each_ftrace_op(op); |
1099 | preempt_enable_notrace(); |
1100 | |
1101 | return NULL; |
1102 | } |
1103 | |
1104 | /* |
1105 | * This is used by __kernel_text_address() to return true if the |
1106 | * address is on a dynamically allocated trampoline that would |
1107 | * not return true for either core_kernel_text() or |
1108 | * is_module_text_address(). |
1109 | */ |
1110 | bool is_ftrace_trampoline(unsigned long addr) |
1111 | { |
1112 | return ftrace_ops_trampoline(addr) != NULL; |
1113 | } |
1114 | |
1115 | struct ftrace_page { |
1116 | struct ftrace_page *next; |
1117 | struct dyn_ftrace *records; |
1118 | int index; |
1119 | int order; |
1120 | }; |
1121 | |
1122 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) |
1123 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) |
1124 | |
1125 | static struct ftrace_page *ftrace_pages_start; |
1126 | static struct ftrace_page *ftrace_pages; |
1127 | |
1128 | static __always_inline unsigned long |
1129 | ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip) |
1130 | { |
1131 | if (hash->size_bits > 0) |
1132 | return hash_long(ip, hash->size_bits); |
1133 | |
1134 | return 0; |
1135 | } |
1136 | |
1137 | /* Only use this function if ftrace_hash_empty() has already been tested */ |
1138 | static __always_inline struct ftrace_func_entry * |
1139 | __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) |
1140 | { |
1141 | unsigned long key; |
1142 | struct ftrace_func_entry *entry; |
1143 | struct hlist_head *hhd; |
1144 | |
1145 | key = ftrace_hash_key(hash, ip); |
1146 | hhd = &hash->buckets[key]; |
1147 | |
1148 | hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { |
1149 | if (entry->ip == ip) |
1150 | return entry; |
1151 | } |
1152 | return NULL; |
1153 | } |
1154 | |
1155 | /** |
1156 | * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash |
1157 | * @hash: The hash to look at |
1158 | * @ip: The instruction pointer to test |
1159 | * |
1160 | * Search a given @hash to see if a given instruction pointer (@ip) |
1161 | * exists in it. |
1162 | * |
1163 | * Returns: the entry that holds the @ip if found. NULL otherwise. |
1164 | */ |
1165 | struct ftrace_func_entry * |
1166 | ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) |
1167 | { |
1168 | if (ftrace_hash_empty(hash)) |
1169 | return NULL; |
1170 | |
1171 | return __ftrace_lookup_ip(hash, ip); |
1172 | } |
1173 | |
1174 | static void __add_hash_entry(struct ftrace_hash *hash, |
1175 | struct ftrace_func_entry *entry) |
1176 | { |
1177 | struct hlist_head *hhd; |
1178 | unsigned long key; |
1179 | |
1180 | key = ftrace_hash_key(hash, ip: entry->ip); |
1181 | hhd = &hash->buckets[key]; |
1182 | hlist_add_head(n: &entry->hlist, h: hhd); |
1183 | hash->count++; |
1184 | } |
1185 | |
1186 | static struct ftrace_func_entry * |
1187 | add_hash_entry(struct ftrace_hash *hash, unsigned long ip) |
1188 | { |
1189 | struct ftrace_func_entry *entry; |
1190 | |
1191 | entry = kmalloc(size: sizeof(*entry), GFP_KERNEL); |
1192 | if (!entry) |
1193 | return NULL; |
1194 | |
1195 | entry->ip = ip; |
1196 | __add_hash_entry(hash, entry); |
1197 | |
1198 | return entry; |
1199 | } |
1200 | |
1201 | static void |
1202 | free_hash_entry(struct ftrace_hash *hash, |
1203 | struct ftrace_func_entry *entry) |
1204 | { |
1205 | hlist_del(n: &entry->hlist); |
1206 | kfree(objp: entry); |
1207 | hash->count--; |
1208 | } |
1209 | |
1210 | static void |
1211 | remove_hash_entry(struct ftrace_hash *hash, |
1212 | struct ftrace_func_entry *entry) |
1213 | { |
1214 | hlist_del_rcu(n: &entry->hlist); |
1215 | hash->count--; |
1216 | } |
1217 | |
1218 | static void ftrace_hash_clear(struct ftrace_hash *hash) |
1219 | { |
1220 | struct hlist_head *hhd; |
1221 | struct hlist_node *tn; |
1222 | struct ftrace_func_entry *entry; |
1223 | int size = 1 << hash->size_bits; |
1224 | int i; |
1225 | |
1226 | if (!hash->count) |
1227 | return; |
1228 | |
1229 | for (i = 0; i < size; i++) { |
1230 | hhd = &hash->buckets[i]; |
1231 | hlist_for_each_entry_safe(entry, tn, hhd, hlist) |
1232 | free_hash_entry(hash, entry); |
1233 | } |
1234 | FTRACE_WARN_ON(hash->count); |
1235 | } |
1236 | |
1237 | static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod) |
1238 | { |
1239 | list_del(entry: &ftrace_mod->list); |
1240 | kfree(objp: ftrace_mod->module); |
1241 | kfree(objp: ftrace_mod->func); |
1242 | kfree(objp: ftrace_mod); |
1243 | } |
1244 | |
1245 | static void clear_ftrace_mod_list(struct list_head *head) |
1246 | { |
1247 | struct ftrace_mod_load *p, *n; |
1248 | |
1249 | /* stack tracer isn't supported yet */ |
1250 | if (!head) |
1251 | return; |
1252 | |
1253 | mutex_lock(&ftrace_lock); |
1254 | list_for_each_entry_safe(p, n, head, list) |
1255 | free_ftrace_mod(ftrace_mod: p); |
1256 | mutex_unlock(lock: &ftrace_lock); |
1257 | } |
1258 | |
1259 | static void free_ftrace_hash(struct ftrace_hash *hash) |
1260 | { |
1261 | if (!hash || hash == EMPTY_HASH) |
1262 | return; |
1263 | ftrace_hash_clear(hash); |
1264 | kfree(objp: hash->buckets); |
1265 | kfree(objp: hash); |
1266 | } |
1267 | |
1268 | static void __free_ftrace_hash_rcu(struct rcu_head *rcu) |
1269 | { |
1270 | struct ftrace_hash *hash; |
1271 | |
1272 | hash = container_of(rcu, struct ftrace_hash, rcu); |
1273 | free_ftrace_hash(hash); |
1274 | } |
1275 | |
1276 | static void free_ftrace_hash_rcu(struct ftrace_hash *hash) |
1277 | { |
1278 | if (!hash || hash == EMPTY_HASH) |
1279 | return; |
1280 | call_rcu(head: &hash->rcu, func: __free_ftrace_hash_rcu); |
1281 | } |
1282 | |
1283 | /** |
1284 | * ftrace_free_filter - remove all filters for an ftrace_ops |
1285 | * @ops: the ops to remove the filters from |
1286 | */ |
1287 | void ftrace_free_filter(struct ftrace_ops *ops) |
1288 | { |
1289 | ftrace_ops_init(ops); |
1290 | free_ftrace_hash(hash: ops->func_hash->filter_hash); |
1291 | free_ftrace_hash(hash: ops->func_hash->notrace_hash); |
1292 | } |
1293 | EXPORT_SYMBOL_GPL(ftrace_free_filter); |
1294 | |
1295 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) |
1296 | { |
1297 | struct ftrace_hash *hash; |
1298 | int size; |
1299 | |
1300 | hash = kzalloc(size: sizeof(*hash), GFP_KERNEL); |
1301 | if (!hash) |
1302 | return NULL; |
1303 | |
1304 | size = 1 << size_bits; |
1305 | hash->buckets = kcalloc(n: size, size: sizeof(*hash->buckets), GFP_KERNEL); |
1306 | |
1307 | if (!hash->buckets) { |
1308 | kfree(objp: hash); |
1309 | return NULL; |
1310 | } |
1311 | |
1312 | hash->size_bits = size_bits; |
1313 | |
1314 | return hash; |
1315 | } |
1316 | |
1317 | |
1318 | static int ftrace_add_mod(struct trace_array *tr, |
1319 | const char *func, const char *module, |
1320 | int enable) |
1321 | { |
1322 | struct ftrace_mod_load *ftrace_mod; |
1323 | struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; |
1324 | |
1325 | ftrace_mod = kzalloc(size: sizeof(*ftrace_mod), GFP_KERNEL); |
1326 | if (!ftrace_mod) |
1327 | return -ENOMEM; |
1328 | |
1329 | INIT_LIST_HEAD(list: &ftrace_mod->list); |
1330 | ftrace_mod->func = kstrdup(s: func, GFP_KERNEL); |
1331 | ftrace_mod->module = kstrdup(s: module, GFP_KERNEL); |
1332 | ftrace_mod->enable = enable; |
1333 | |
1334 | if (!ftrace_mod->func || !ftrace_mod->module) |
1335 | goto out_free; |
1336 | |
1337 | list_add(new: &ftrace_mod->list, head: mod_head); |
1338 | |
1339 | return 0; |
1340 | |
1341 | out_free: |
1342 | free_ftrace_mod(ftrace_mod); |
1343 | |
1344 | return -ENOMEM; |
1345 | } |
1346 | |
1347 | static struct ftrace_hash * |
1348 | alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) |
1349 | { |
1350 | struct ftrace_func_entry *entry; |
1351 | struct ftrace_hash *new_hash; |
1352 | int size; |
1353 | int i; |
1354 | |
1355 | new_hash = alloc_ftrace_hash(size_bits); |
1356 | if (!new_hash) |
1357 | return NULL; |
1358 | |
1359 | if (hash) |
1360 | new_hash->flags = hash->flags; |
1361 | |
1362 | /* Empty hash? */ |
1363 | if (ftrace_hash_empty(hash)) |
1364 | return new_hash; |
1365 | |
1366 | size = 1 << hash->size_bits; |
1367 | for (i = 0; i < size; i++) { |
1368 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
1369 | if (add_hash_entry(hash: new_hash, ip: entry->ip) == NULL) |
1370 | goto free_hash; |
1371 | } |
1372 | } |
1373 | |
1374 | FTRACE_WARN_ON(new_hash->count != hash->count); |
1375 | |
1376 | return new_hash; |
1377 | |
1378 | free_hash: |
1379 | free_ftrace_hash(hash: new_hash); |
1380 | return NULL; |
1381 | } |
1382 | |
1383 | static void |
1384 | ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); |
1385 | static void |
1386 | ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); |
1387 | |
1388 | static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, |
1389 | struct ftrace_hash *new_hash); |
1390 | |
1391 | static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size) |
1392 | { |
1393 | struct ftrace_func_entry *entry; |
1394 | struct ftrace_hash *new_hash; |
1395 | struct hlist_head *hhd; |
1396 | struct hlist_node *tn; |
1397 | int bits = 0; |
1398 | int i; |
1399 | |
1400 | /* |
1401 | * Use around half the size (max bit of it), but |
1402 | * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits). |
1403 | */ |
1404 | bits = fls(x: size / 2); |
1405 | |
1406 | /* Don't allocate too much */ |
1407 | if (bits > FTRACE_HASH_MAX_BITS) |
1408 | bits = FTRACE_HASH_MAX_BITS; |
1409 | |
1410 | new_hash = alloc_ftrace_hash(size_bits: bits); |
1411 | if (!new_hash) |
1412 | return NULL; |
1413 | |
1414 | new_hash->flags = src->flags; |
1415 | |
1416 | size = 1 << src->size_bits; |
1417 | for (i = 0; i < size; i++) { |
1418 | hhd = &src->buckets[i]; |
1419 | hlist_for_each_entry_safe(entry, tn, hhd, hlist) { |
1420 | remove_hash_entry(hash: src, entry); |
1421 | __add_hash_entry(hash: new_hash, entry); |
1422 | } |
1423 | } |
1424 | return new_hash; |
1425 | } |
1426 | |
1427 | static struct ftrace_hash * |
1428 | __ftrace_hash_move(struct ftrace_hash *src) |
1429 | { |
1430 | int size = src->count; |
1431 | |
1432 | /* |
1433 | * If the new source is empty, just return the empty_hash. |
1434 | */ |
1435 | if (ftrace_hash_empty(hash: src)) |
1436 | return EMPTY_HASH; |
1437 | |
1438 | return dup_hash(src, size); |
1439 | } |
1440 | |
1441 | static int |
1442 | ftrace_hash_move(struct ftrace_ops *ops, int enable, |
1443 | struct ftrace_hash **dst, struct ftrace_hash *src) |
1444 | { |
1445 | struct ftrace_hash *new_hash; |
1446 | int ret; |
1447 | |
1448 | /* Reject setting notrace hash on IPMODIFY ftrace_ops */ |
1449 | if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) |
1450 | return -EINVAL; |
1451 | |
1452 | new_hash = __ftrace_hash_move(src); |
1453 | if (!new_hash) |
1454 | return -ENOMEM; |
1455 | |
1456 | /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ |
1457 | if (enable) { |
1458 | /* IPMODIFY should be updated only when filter_hash updating */ |
1459 | ret = ftrace_hash_ipmodify_update(ops, new_hash); |
1460 | if (ret < 0) { |
1461 | free_ftrace_hash(hash: new_hash); |
1462 | return ret; |
1463 | } |
1464 | } |
1465 | |
1466 | /* |
1467 | * Remove the current set, update the hash and add |
1468 | * them back. |
1469 | */ |
1470 | ftrace_hash_rec_disable_modify(ops, filter_hash: enable); |
1471 | |
1472 | rcu_assign_pointer(*dst, new_hash); |
1473 | |
1474 | ftrace_hash_rec_enable_modify(ops, filter_hash: enable); |
1475 | |
1476 | return 0; |
1477 | } |
1478 | |
1479 | static bool hash_contains_ip(unsigned long ip, |
1480 | struct ftrace_ops_hash *hash) |
1481 | { |
1482 | /* |
1483 | * The function record is a match if it exists in the filter |
1484 | * hash and not in the notrace hash. Note, an empty hash is |
1485 | * considered a match for the filter hash, but an empty |
1486 | * notrace hash is considered not in the notrace hash. |
1487 | */ |
1488 | return (ftrace_hash_empty(hash: hash->filter_hash) || |
1489 | __ftrace_lookup_ip(hash: hash->filter_hash, ip)) && |
1490 | (ftrace_hash_empty(hash: hash->notrace_hash) || |
1491 | !__ftrace_lookup_ip(hash: hash->notrace_hash, ip)); |
1492 | } |
1493 | |
1494 | /* |
1495 | * Test the hashes for this ops to see if we want to call |
1496 | * the ops->func or not. |
1497 | * |
1498 | * It's a match if the ip is in the ops->filter_hash or |
1499 | * the filter_hash does not exist or is empty, |
1500 | * AND |
1501 | * the ip is not in the ops->notrace_hash. |
1502 | * |
1503 | * This needs to be called with preemption disabled as |
1504 | * the hashes are freed with call_rcu(). |
1505 | */ |
1506 | int |
1507 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
1508 | { |
1509 | struct ftrace_ops_hash hash; |
1510 | int ret; |
1511 | |
1512 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
1513 | /* |
1514 | * There's a small race when adding ops that the ftrace handler |
1515 | * that wants regs, may be called without them. We can not |
1516 | * allow that handler to be called if regs is NULL. |
1517 | */ |
1518 | if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) |
1519 | return 0; |
1520 | #endif |
1521 | |
1522 | rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash); |
1523 | rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash); |
1524 | |
1525 | if (hash_contains_ip(ip, hash: &hash)) |
1526 | ret = 1; |
1527 | else |
1528 | ret = 0; |
1529 | |
1530 | return ret; |
1531 | } |
1532 | |
1533 | /* |
1534 | * This is a double for. Do not use 'break' to break out of the loop, |
1535 | * you must use a goto. |
1536 | */ |
1537 | #define do_for_each_ftrace_rec(pg, rec) \ |
1538 | for (pg = ftrace_pages_start; pg; pg = pg->next) { \ |
1539 | int _____i; \ |
1540 | for (_____i = 0; _____i < pg->index; _____i++) { \ |
1541 | rec = &pg->records[_____i]; |
1542 | |
1543 | #define while_for_each_ftrace_rec() \ |
1544 | } \ |
1545 | } |
1546 | |
1547 | |
1548 | static int ftrace_cmp_recs(const void *a, const void *b) |
1549 | { |
1550 | const struct dyn_ftrace *key = a; |
1551 | const struct dyn_ftrace *rec = b; |
1552 | |
1553 | if (key->flags < rec->ip) |
1554 | return -1; |
1555 | if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) |
1556 | return 1; |
1557 | return 0; |
1558 | } |
1559 | |
1560 | static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end) |
1561 | { |
1562 | struct ftrace_page *pg; |
1563 | struct dyn_ftrace *rec = NULL; |
1564 | struct dyn_ftrace key; |
1565 | |
1566 | key.ip = start; |
1567 | key.flags = end; /* overload flags, as it is unsigned long */ |
1568 | |
1569 | for (pg = ftrace_pages_start; pg; pg = pg->next) { |
1570 | if (pg->index == 0 || |
1571 | end < pg->records[0].ip || |
1572 | start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) |
1573 | continue; |
1574 | rec = bsearch(key: &key, base: pg->records, num: pg->index, |
1575 | size: sizeof(struct dyn_ftrace), |
1576 | cmp: ftrace_cmp_recs); |
1577 | if (rec) |
1578 | break; |
1579 | } |
1580 | return rec; |
1581 | } |
1582 | |
1583 | /** |
1584 | * ftrace_location_range - return the first address of a traced location |
1585 | * if it touches the given ip range |
1586 | * @start: start of range to search. |
1587 | * @end: end of range to search (inclusive). @end points to the last byte |
1588 | * to check. |
1589 | * |
1590 | * Returns: rec->ip if the related ftrace location is a least partly within |
1591 | * the given address range. That is, the first address of the instruction |
1592 | * that is either a NOP or call to the function tracer. It checks the ftrace |
1593 | * internal tables to determine if the address belongs or not. |
1594 | */ |
1595 | unsigned long ftrace_location_range(unsigned long start, unsigned long end) |
1596 | { |
1597 | struct dyn_ftrace *rec; |
1598 | |
1599 | rec = lookup_rec(start, end); |
1600 | if (rec) |
1601 | return rec->ip; |
1602 | |
1603 | return 0; |
1604 | } |
1605 | |
1606 | /** |
1607 | * ftrace_location - return the ftrace location |
1608 | * @ip: the instruction pointer to check |
1609 | * |
1610 | * Returns: |
1611 | * * If @ip matches the ftrace location, return @ip. |
1612 | * * If @ip matches sym+0, return sym's ftrace location. |
1613 | * * Otherwise, return 0. |
1614 | */ |
1615 | unsigned long ftrace_location(unsigned long ip) |
1616 | { |
1617 | struct dyn_ftrace *rec; |
1618 | unsigned long offset; |
1619 | unsigned long size; |
1620 | |
1621 | rec = lookup_rec(start: ip, end: ip); |
1622 | if (!rec) { |
1623 | if (!kallsyms_lookup_size_offset(addr: ip, symbolsize: &size, offset: &offset)) |
1624 | goto out; |
1625 | |
1626 | /* map sym+0 to __fentry__ */ |
1627 | if (!offset) |
1628 | rec = lookup_rec(start: ip, end: ip + size - 1); |
1629 | } |
1630 | |
1631 | if (rec) |
1632 | return rec->ip; |
1633 | |
1634 | out: |
1635 | return 0; |
1636 | } |
1637 | |
1638 | /** |
1639 | * ftrace_text_reserved - return true if range contains an ftrace location |
1640 | * @start: start of range to search |
1641 | * @end: end of range to search (inclusive). @end points to the last byte to check. |
1642 | * |
1643 | * Returns: 1 if @start and @end contains a ftrace location. |
1644 | * That is, the instruction that is either a NOP or call to |
1645 | * the function tracer. It checks the ftrace internal tables to |
1646 | * determine if the address belongs or not. |
1647 | */ |
1648 | int ftrace_text_reserved(const void *start, const void *end) |
1649 | { |
1650 | unsigned long ret; |
1651 | |
1652 | ret = ftrace_location_range(start: (unsigned long)start, |
1653 | end: (unsigned long)end); |
1654 | |
1655 | return (int)!!ret; |
1656 | } |
1657 | |
1658 | /* Test if ops registered to this rec needs regs */ |
1659 | static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) |
1660 | { |
1661 | struct ftrace_ops *ops; |
1662 | bool keep_regs = false; |
1663 | |
1664 | for (ops = ftrace_ops_list; |
1665 | ops != &ftrace_list_end; ops = ops->next) { |
1666 | /* pass rec in as regs to have non-NULL val */ |
1667 | if (ftrace_ops_test(ops, ip: rec->ip, regs: rec)) { |
1668 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { |
1669 | keep_regs = true; |
1670 | break; |
1671 | } |
1672 | } |
1673 | } |
1674 | |
1675 | return keep_regs; |
1676 | } |
1677 | |
1678 | static struct ftrace_ops * |
1679 | ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); |
1680 | static struct ftrace_ops * |
1681 | ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude); |
1682 | static struct ftrace_ops * |
1683 | ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops); |
1684 | |
1685 | static bool skip_record(struct dyn_ftrace *rec) |
1686 | { |
1687 | /* |
1688 | * At boot up, weak functions are set to disable. Function tracing |
1689 | * can be enabled before they are, and they still need to be disabled now. |
1690 | * If the record is disabled, still continue if it is marked as already |
1691 | * enabled (this is needed to keep the accounting working). |
1692 | */ |
1693 | return rec->flags & FTRACE_FL_DISABLED && |
1694 | !(rec->flags & FTRACE_FL_ENABLED); |
1695 | } |
1696 | |
1697 | static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, |
1698 | int filter_hash, |
1699 | bool inc) |
1700 | { |
1701 | struct ftrace_hash *hash; |
1702 | struct ftrace_hash *other_hash; |
1703 | struct ftrace_page *pg; |
1704 | struct dyn_ftrace *rec; |
1705 | bool update = false; |
1706 | int count = 0; |
1707 | int all = false; |
1708 | |
1709 | /* Only update if the ops has been registered */ |
1710 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) |
1711 | return false; |
1712 | |
1713 | /* |
1714 | * In the filter_hash case: |
1715 | * If the count is zero, we update all records. |
1716 | * Otherwise we just update the items in the hash. |
1717 | * |
1718 | * In the notrace_hash case: |
1719 | * We enable the update in the hash. |
1720 | * As disabling notrace means enabling the tracing, |
1721 | * and enabling notrace means disabling, the inc variable |
1722 | * gets inversed. |
1723 | */ |
1724 | if (filter_hash) { |
1725 | hash = ops->func_hash->filter_hash; |
1726 | other_hash = ops->func_hash->notrace_hash; |
1727 | if (ftrace_hash_empty(hash)) |
1728 | all = true; |
1729 | } else { |
1730 | inc = !inc; |
1731 | hash = ops->func_hash->notrace_hash; |
1732 | other_hash = ops->func_hash->filter_hash; |
1733 | /* |
1734 | * If the notrace hash has no items, |
1735 | * then there's nothing to do. |
1736 | */ |
1737 | if (ftrace_hash_empty(hash)) |
1738 | return false; |
1739 | } |
1740 | |
1741 | do_for_each_ftrace_rec(pg, rec) { |
1742 | int in_other_hash = 0; |
1743 | int in_hash = 0; |
1744 | int match = 0; |
1745 | |
1746 | if (skip_record(rec)) |
1747 | continue; |
1748 | |
1749 | if (all) { |
1750 | /* |
1751 | * Only the filter_hash affects all records. |
1752 | * Update if the record is not in the notrace hash. |
1753 | */ |
1754 | if (!other_hash || !ftrace_lookup_ip(hash: other_hash, ip: rec->ip)) |
1755 | match = 1; |
1756 | } else { |
1757 | in_hash = !!ftrace_lookup_ip(hash, ip: rec->ip); |
1758 | in_other_hash = !!ftrace_lookup_ip(hash: other_hash, ip: rec->ip); |
1759 | |
1760 | /* |
1761 | * If filter_hash is set, we want to match all functions |
1762 | * that are in the hash but not in the other hash. |
1763 | * |
1764 | * If filter_hash is not set, then we are decrementing. |
1765 | * That means we match anything that is in the hash |
1766 | * and also in the other_hash. That is, we need to turn |
1767 | * off functions in the other hash because they are disabled |
1768 | * by this hash. |
1769 | */ |
1770 | if (filter_hash && in_hash && !in_other_hash) |
1771 | match = 1; |
1772 | else if (!filter_hash && in_hash && |
1773 | (in_other_hash || ftrace_hash_empty(hash: other_hash))) |
1774 | match = 1; |
1775 | } |
1776 | if (!match) |
1777 | continue; |
1778 | |
1779 | if (inc) { |
1780 | rec->flags++; |
1781 | if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX)) |
1782 | return false; |
1783 | |
1784 | if (ops->flags & FTRACE_OPS_FL_DIRECT) |
1785 | rec->flags |= FTRACE_FL_DIRECT; |
1786 | |
1787 | /* |
1788 | * If there's only a single callback registered to a |
1789 | * function, and the ops has a trampoline registered |
1790 | * for it, then we can call it directly. |
1791 | */ |
1792 | if (ftrace_rec_count(rec) == 1 && ops->trampoline) |
1793 | rec->flags |= FTRACE_FL_TRAMP; |
1794 | else |
1795 | /* |
1796 | * If we are adding another function callback |
1797 | * to this function, and the previous had a |
1798 | * custom trampoline in use, then we need to go |
1799 | * back to the default trampoline. |
1800 | */ |
1801 | rec->flags &= ~FTRACE_FL_TRAMP; |
1802 | |
1803 | /* |
1804 | * If any ops wants regs saved for this function |
1805 | * then all ops will get saved regs. |
1806 | */ |
1807 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) |
1808 | rec->flags |= FTRACE_FL_REGS; |
1809 | } else { |
1810 | if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0)) |
1811 | return false; |
1812 | rec->flags--; |
1813 | |
1814 | /* |
1815 | * Only the internal direct_ops should have the |
1816 | * DIRECT flag set. Thus, if it is removing a |
1817 | * function, then that function should no longer |
1818 | * be direct. |
1819 | */ |
1820 | if (ops->flags & FTRACE_OPS_FL_DIRECT) |
1821 | rec->flags &= ~FTRACE_FL_DIRECT; |
1822 | |
1823 | /* |
1824 | * If the rec had REGS enabled and the ops that is |
1825 | * being removed had REGS set, then see if there is |
1826 | * still any ops for this record that wants regs. |
1827 | * If not, we can stop recording them. |
1828 | */ |
1829 | if (ftrace_rec_count(rec) > 0 && |
1830 | rec->flags & FTRACE_FL_REGS && |
1831 | ops->flags & FTRACE_OPS_FL_SAVE_REGS) { |
1832 | if (!test_rec_ops_needs_regs(rec)) |
1833 | rec->flags &= ~FTRACE_FL_REGS; |
1834 | } |
1835 | |
1836 | /* |
1837 | * The TRAMP needs to be set only if rec count |
1838 | * is decremented to one, and the ops that is |
1839 | * left has a trampoline. As TRAMP can only be |
1840 | * enabled if there is only a single ops attached |
1841 | * to it. |
1842 | */ |
1843 | if (ftrace_rec_count(rec) == 1 && |
1844 | ftrace_find_tramp_ops_any_other(rec, op_exclude: ops)) |
1845 | rec->flags |= FTRACE_FL_TRAMP; |
1846 | else |
1847 | rec->flags &= ~FTRACE_FL_TRAMP; |
1848 | |
1849 | /* |
1850 | * flags will be cleared in ftrace_check_record() |
1851 | * if rec count is zero. |
1852 | */ |
1853 | } |
1854 | |
1855 | /* |
1856 | * If the rec has a single associated ops, and ops->func can be |
1857 | * called directly, allow the call site to call via the ops. |
1858 | */ |
1859 | if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) && |
1860 | ftrace_rec_count(rec) == 1 && |
1861 | ftrace_ops_get_func(ops) == ops->func) |
1862 | rec->flags |= FTRACE_FL_CALL_OPS; |
1863 | else |
1864 | rec->flags &= ~FTRACE_FL_CALL_OPS; |
1865 | |
1866 | count++; |
1867 | |
1868 | /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */ |
1869 | update |= ftrace_test_record(rec, enable: true) != FTRACE_UPDATE_IGNORE; |
1870 | |
1871 | /* Shortcut, if we handled all records, we are done. */ |
1872 | if (!all && count == hash->count) |
1873 | return update; |
1874 | } while_for_each_ftrace_rec(); |
1875 | |
1876 | return update; |
1877 | } |
1878 | |
1879 | static bool ftrace_hash_rec_disable(struct ftrace_ops *ops, |
1880 | int filter_hash) |
1881 | { |
1882 | return __ftrace_hash_rec_update(ops, filter_hash, inc: 0); |
1883 | } |
1884 | |
1885 | static bool ftrace_hash_rec_enable(struct ftrace_ops *ops, |
1886 | int filter_hash) |
1887 | { |
1888 | return __ftrace_hash_rec_update(ops, filter_hash, inc: 1); |
1889 | } |
1890 | |
1891 | static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, |
1892 | int filter_hash, int inc) |
1893 | { |
1894 | struct ftrace_ops *op; |
1895 | |
1896 | __ftrace_hash_rec_update(ops, filter_hash, inc); |
1897 | |
1898 | if (ops->func_hash != &global_ops.local_hash) |
1899 | return; |
1900 | |
1901 | /* |
1902 | * If the ops shares the global_ops hash, then we need to update |
1903 | * all ops that are enabled and use this hash. |
1904 | */ |
1905 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
1906 | /* Already done */ |
1907 | if (op == ops) |
1908 | continue; |
1909 | if (op->func_hash == &global_ops.local_hash) |
1910 | __ftrace_hash_rec_update(ops: op, filter_hash, inc); |
1911 | } while_for_each_ftrace_op(op); |
1912 | } |
1913 | |
1914 | static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, |
1915 | int filter_hash) |
1916 | { |
1917 | ftrace_hash_rec_update_modify(ops, filter_hash, inc: 0); |
1918 | } |
1919 | |
1920 | static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, |
1921 | int filter_hash) |
1922 | { |
1923 | ftrace_hash_rec_update_modify(ops, filter_hash, inc: 1); |
1924 | } |
1925 | |
1926 | /* |
1927 | * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK |
1928 | * or no-needed to update, -EBUSY if it detects a conflict of the flag |
1929 | * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs. |
1930 | * Note that old_hash and new_hash has below meanings |
1931 | * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected) |
1932 | * - If the hash is EMPTY_HASH, it hits nothing |
1933 | * - Anything else hits the recs which match the hash entries. |
1934 | * |
1935 | * DIRECT ops does not have IPMODIFY flag, but we still need to check it |
1936 | * against functions with FTRACE_FL_IPMODIFY. If there is any overlap, call |
1937 | * ops_func(SHARE_IPMODIFY_SELF) to make sure current ops can share with |
1938 | * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate |
1939 | * the return value to the caller and eventually to the owner of the DIRECT |
1940 | * ops. |
1941 | */ |
1942 | static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, |
1943 | struct ftrace_hash *old_hash, |
1944 | struct ftrace_hash *new_hash) |
1945 | { |
1946 | struct ftrace_page *pg; |
1947 | struct dyn_ftrace *rec, *end = NULL; |
1948 | int in_old, in_new; |
1949 | bool is_ipmodify, is_direct; |
1950 | |
1951 | /* Only update if the ops has been registered */ |
1952 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) |
1953 | return 0; |
1954 | |
1955 | is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY; |
1956 | is_direct = ops->flags & FTRACE_OPS_FL_DIRECT; |
1957 | |
1958 | /* neither IPMODIFY nor DIRECT, skip */ |
1959 | if (!is_ipmodify && !is_direct) |
1960 | return 0; |
1961 | |
1962 | if (WARN_ON_ONCE(is_ipmodify && is_direct)) |
1963 | return 0; |
1964 | |
1965 | /* |
1966 | * Since the IPMODIFY and DIRECT are very address sensitive |
1967 | * actions, we do not allow ftrace_ops to set all functions to new |
1968 | * hash. |
1969 | */ |
1970 | if (!new_hash || !old_hash) |
1971 | return -EINVAL; |
1972 | |
1973 | /* Update rec->flags */ |
1974 | do_for_each_ftrace_rec(pg, rec) { |
1975 | |
1976 | if (rec->flags & FTRACE_FL_DISABLED) |
1977 | continue; |
1978 | |
1979 | /* We need to update only differences of filter_hash */ |
1980 | in_old = !!ftrace_lookup_ip(hash: old_hash, ip: rec->ip); |
1981 | in_new = !!ftrace_lookup_ip(hash: new_hash, ip: rec->ip); |
1982 | if (in_old == in_new) |
1983 | continue; |
1984 | |
1985 | if (in_new) { |
1986 | if (rec->flags & FTRACE_FL_IPMODIFY) { |
1987 | int ret; |
1988 | |
1989 | /* Cannot have two ipmodify on same rec */ |
1990 | if (is_ipmodify) |
1991 | goto rollback; |
1992 | |
1993 | FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT); |
1994 | |
1995 | /* |
1996 | * Another ops with IPMODIFY is already |
1997 | * attached. We are now attaching a direct |
1998 | * ops. Run SHARE_IPMODIFY_SELF, to check |
1999 | * whether sharing is supported. |
2000 | */ |
2001 | if (!ops->ops_func) |
2002 | return -EBUSY; |
2003 | ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF); |
2004 | if (ret) |
2005 | return ret; |
2006 | } else if (is_ipmodify) { |
2007 | rec->flags |= FTRACE_FL_IPMODIFY; |
2008 | } |
2009 | } else if (is_ipmodify) { |
2010 | rec->flags &= ~FTRACE_FL_IPMODIFY; |
2011 | } |
2012 | } while_for_each_ftrace_rec(); |
2013 | |
2014 | return 0; |
2015 | |
2016 | rollback: |
2017 | end = rec; |
2018 | |
2019 | /* Roll back what we did above */ |
2020 | do_for_each_ftrace_rec(pg, rec) { |
2021 | |
2022 | if (rec->flags & FTRACE_FL_DISABLED) |
2023 | continue; |
2024 | |
2025 | if (rec == end) |
2026 | goto err_out; |
2027 | |
2028 | in_old = !!ftrace_lookup_ip(hash: old_hash, ip: rec->ip); |
2029 | in_new = !!ftrace_lookup_ip(hash: new_hash, ip: rec->ip); |
2030 | if (in_old == in_new) |
2031 | continue; |
2032 | |
2033 | if (in_new) |
2034 | rec->flags &= ~FTRACE_FL_IPMODIFY; |
2035 | else |
2036 | rec->flags |= FTRACE_FL_IPMODIFY; |
2037 | } while_for_each_ftrace_rec(); |
2038 | |
2039 | err_out: |
2040 | return -EBUSY; |
2041 | } |
2042 | |
2043 | static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops) |
2044 | { |
2045 | struct ftrace_hash *hash = ops->func_hash->filter_hash; |
2046 | |
2047 | if (ftrace_hash_empty(hash)) |
2048 | hash = NULL; |
2049 | |
2050 | return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, new_hash: hash); |
2051 | } |
2052 | |
2053 | /* Disabling always succeeds */ |
2054 | static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops) |
2055 | { |
2056 | struct ftrace_hash *hash = ops->func_hash->filter_hash; |
2057 | |
2058 | if (ftrace_hash_empty(hash)) |
2059 | hash = NULL; |
2060 | |
2061 | __ftrace_hash_update_ipmodify(ops, old_hash: hash, EMPTY_HASH); |
2062 | } |
2063 | |
2064 | static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, |
2065 | struct ftrace_hash *new_hash) |
2066 | { |
2067 | struct ftrace_hash *old_hash = ops->func_hash->filter_hash; |
2068 | |
2069 | if (ftrace_hash_empty(hash: old_hash)) |
2070 | old_hash = NULL; |
2071 | |
2072 | if (ftrace_hash_empty(hash: new_hash)) |
2073 | new_hash = NULL; |
2074 | |
2075 | return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); |
2076 | } |
2077 | |
2078 | static void print_ip_ins(const char *fmt, const unsigned char *p) |
2079 | { |
2080 | char ins[MCOUNT_INSN_SIZE]; |
2081 | |
2082 | if (copy_from_kernel_nofault(dst: ins, src: p, MCOUNT_INSN_SIZE)) { |
2083 | printk(KERN_CONT "%s[FAULT] %px\n", fmt, p); |
2084 | return; |
2085 | } |
2086 | |
2087 | printk(KERN_CONT "%s", fmt); |
2088 | pr_cont("%*phC", MCOUNT_INSN_SIZE, ins); |
2089 | } |
2090 | |
2091 | enum ftrace_bug_type ftrace_bug_type; |
2092 | const void *ftrace_expected; |
2093 | |
2094 | static void print_bug_type(void) |
2095 | { |
2096 | switch (ftrace_bug_type) { |
2097 | case FTRACE_BUG_UNKNOWN: |
2098 | break; |
2099 | case FTRACE_BUG_INIT: |
2100 | pr_info("Initializing ftrace call sites\n"); |
2101 | break; |
2102 | case FTRACE_BUG_NOP: |
2103 | pr_info("Setting ftrace call site to NOP\n"); |
2104 | break; |
2105 | case FTRACE_BUG_CALL: |
2106 | pr_info("Setting ftrace call site to call ftrace function\n"); |
2107 | break; |
2108 | case FTRACE_BUG_UPDATE: |
2109 | pr_info("Updating ftrace call site to call a different ftrace function\n"); |
2110 | break; |
2111 | } |
2112 | } |
2113 | |
2114 | /** |
2115 | * ftrace_bug - report and shutdown function tracer |
2116 | * @failed: The failed type (EFAULT, EINVAL, EPERM) |
2117 | * @rec: The record that failed |
2118 | * |
2119 | * The arch code that enables or disables the function tracing |
2120 | * can call ftrace_bug() when it has detected a problem in |
2121 | * modifying the code. @failed should be one of either: |
2122 | * EFAULT - if the problem happens on reading the @ip address |
2123 | * EINVAL - if what is read at @ip is not what was expected |
2124 | * EPERM - if the problem happens on writing to the @ip address |
2125 | */ |
2126 | void ftrace_bug(int failed, struct dyn_ftrace *rec) |
2127 | { |
2128 | unsigned long ip = rec ? rec->ip : 0; |
2129 | |
2130 | pr_info("------------[ ftrace bug ]------------\n"); |
2131 | |
2132 | switch (failed) { |
2133 | case -EFAULT: |
2134 | pr_info("ftrace faulted on modifying "); |
2135 | print_ip_sym(KERN_INFO, ip); |
2136 | break; |
2137 | case -EINVAL: |
2138 | pr_info("ftrace failed to modify "); |
2139 | print_ip_sym(KERN_INFO, ip); |
2140 | print_ip_ins(fmt: " actual: ", p: (unsigned char *)ip); |
2141 | pr_cont("\n"); |
2142 | if (ftrace_expected) { |
2143 | print_ip_ins(fmt: " expected: ", p: ftrace_expected); |
2144 | pr_cont("\n"); |
2145 | } |
2146 | break; |
2147 | case -EPERM: |
2148 | pr_info("ftrace faulted on writing "); |
2149 | print_ip_sym(KERN_INFO, ip); |
2150 | break; |
2151 | default: |
2152 | pr_info("ftrace faulted on unknown error "); |
2153 | print_ip_sym(KERN_INFO, ip); |
2154 | } |
2155 | print_bug_type(); |
2156 | if (rec) { |
2157 | struct ftrace_ops *ops = NULL; |
2158 | |
2159 | pr_info("ftrace record flags: %lx\n", rec->flags); |
2160 | pr_cont(" (%ld)%s%s", ftrace_rec_count(rec), |
2161 | rec->flags & FTRACE_FL_REGS ? " R": " ", |
2162 | rec->flags & FTRACE_FL_CALL_OPS ? " O": " "); |
2163 | if (rec->flags & FTRACE_FL_TRAMP_EN) { |
2164 | ops = ftrace_find_tramp_ops_any(rec); |
2165 | if (ops) { |
2166 | do { |
2167 | pr_cont("\ttramp: %pS (%pS)", |
2168 | (void *)ops->trampoline, |
2169 | (void *)ops->func); |
2170 | ops = ftrace_find_tramp_ops_next(rec, ops); |
2171 | } while (ops); |
2172 | } else |
2173 | pr_cont("\ttramp: ERROR!"); |
2174 | |
2175 | } |
2176 | ip = ftrace_get_addr_curr(rec); |
2177 | pr_cont("\n expected tramp: %lx\n", ip); |
2178 | } |
2179 | |
2180 | FTRACE_WARN_ON_ONCE(1); |
2181 | } |
2182 | |
2183 | static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) |
2184 | { |
2185 | unsigned long flag = 0UL; |
2186 | |
2187 | ftrace_bug_type = FTRACE_BUG_UNKNOWN; |
2188 | |
2189 | if (skip_record(rec)) |
2190 | return FTRACE_UPDATE_IGNORE; |
2191 | |
2192 | /* |
2193 | * If we are updating calls: |
2194 | * |
2195 | * If the record has a ref count, then we need to enable it |
2196 | * because someone is using it. |
2197 | * |
2198 | * Otherwise we make sure its disabled. |
2199 | * |
2200 | * If we are disabling calls, then disable all records that |
2201 | * are enabled. |
2202 | */ |
2203 | if (enable && ftrace_rec_count(rec)) |
2204 | flag = FTRACE_FL_ENABLED; |
2205 | |
2206 | /* |
2207 | * If enabling and the REGS flag does not match the REGS_EN, or |
2208 | * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore |
2209 | * this record. Set flags to fail the compare against ENABLED. |
2210 | * Same for direct calls. |
2211 | */ |
2212 | if (flag) { |
2213 | if (!(rec->flags & FTRACE_FL_REGS) != |
2214 | !(rec->flags & FTRACE_FL_REGS_EN)) |
2215 | flag |= FTRACE_FL_REGS; |
2216 | |
2217 | if (!(rec->flags & FTRACE_FL_TRAMP) != |
2218 | !(rec->flags & FTRACE_FL_TRAMP_EN)) |
2219 | flag |= FTRACE_FL_TRAMP; |
2220 | |
2221 | /* |
2222 | * Direct calls are special, as count matters. |
2223 | * We must test the record for direct, if the |
2224 | * DIRECT and DIRECT_EN do not match, but only |
2225 | * if the count is 1. That's because, if the |
2226 | * count is something other than one, we do not |
2227 | * want the direct enabled (it will be done via the |
2228 | * direct helper). But if DIRECT_EN is set, and |
2229 | * the count is not one, we need to clear it. |
2230 | * |
2231 | */ |
2232 | if (ftrace_rec_count(rec) == 1) { |
2233 | if (!(rec->flags & FTRACE_FL_DIRECT) != |
2234 | !(rec->flags & FTRACE_FL_DIRECT_EN)) |
2235 | flag |= FTRACE_FL_DIRECT; |
2236 | } else if (rec->flags & FTRACE_FL_DIRECT_EN) { |
2237 | flag |= FTRACE_FL_DIRECT; |
2238 | } |
2239 | |
2240 | /* |
2241 | * Ops calls are special, as count matters. |
2242 | * As with direct calls, they must only be enabled when count |
2243 | * is one, otherwise they'll be handled via the list ops. |
2244 | */ |
2245 | if (ftrace_rec_count(rec) == 1) { |
2246 | if (!(rec->flags & FTRACE_FL_CALL_OPS) != |
2247 | !(rec->flags & FTRACE_FL_CALL_OPS_EN)) |
2248 | flag |= FTRACE_FL_CALL_OPS; |
2249 | } else if (rec->flags & FTRACE_FL_CALL_OPS_EN) { |
2250 | flag |= FTRACE_FL_CALL_OPS; |
2251 | } |
2252 | } |
2253 | |
2254 | /* If the state of this record hasn't changed, then do nothing */ |
2255 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) |
2256 | return FTRACE_UPDATE_IGNORE; |
2257 | |
2258 | if (flag) { |
2259 | /* Save off if rec is being enabled (for return value) */ |
2260 | flag ^= rec->flags & FTRACE_FL_ENABLED; |
2261 | |
2262 | if (update) { |
2263 | rec->flags |= FTRACE_FL_ENABLED | FTRACE_FL_TOUCHED; |
2264 | if (flag & FTRACE_FL_REGS) { |
2265 | if (rec->flags & FTRACE_FL_REGS) |
2266 | rec->flags |= FTRACE_FL_REGS_EN; |
2267 | else |
2268 | rec->flags &= ~FTRACE_FL_REGS_EN; |
2269 | } |
2270 | if (flag & FTRACE_FL_TRAMP) { |
2271 | if (rec->flags & FTRACE_FL_TRAMP) |
2272 | rec->flags |= FTRACE_FL_TRAMP_EN; |
2273 | else |
2274 | rec->flags &= ~FTRACE_FL_TRAMP_EN; |
2275 | } |
2276 | |
2277 | /* Keep track of anything that modifies the function */ |
2278 | if (rec->flags & (FTRACE_FL_DIRECT | FTRACE_FL_IPMODIFY)) |
2279 | rec->flags |= FTRACE_FL_MODIFIED; |
2280 | |
2281 | if (flag & FTRACE_FL_DIRECT) { |
2282 | /* |
2283 | * If there's only one user (direct_ops helper) |
2284 | * then we can call the direct function |
2285 | * directly (no ftrace trampoline). |
2286 | */ |
2287 | if (ftrace_rec_count(rec) == 1) { |
2288 | if (rec->flags & FTRACE_FL_DIRECT) |
2289 | rec->flags |= FTRACE_FL_DIRECT_EN; |
2290 | else |
2291 | rec->flags &= ~FTRACE_FL_DIRECT_EN; |
2292 | } else { |
2293 | /* |
2294 | * Can only call directly if there's |
2295 | * only one callback to the function. |
2296 | */ |
2297 | rec->flags &= ~FTRACE_FL_DIRECT_EN; |
2298 | } |
2299 | } |
2300 | |
2301 | if (flag & FTRACE_FL_CALL_OPS) { |
2302 | if (ftrace_rec_count(rec) == 1) { |
2303 | if (rec->flags & FTRACE_FL_CALL_OPS) |
2304 | rec->flags |= FTRACE_FL_CALL_OPS_EN; |
2305 | else |
2306 | rec->flags &= ~FTRACE_FL_CALL_OPS_EN; |
2307 | } else { |
2308 | /* |
2309 | * Can only call directly if there's |
2310 | * only one set of associated ops. |
2311 | */ |
2312 | rec->flags &= ~FTRACE_FL_CALL_OPS_EN; |
2313 | } |
2314 | } |
2315 | } |
2316 | |
2317 | /* |
2318 | * If this record is being updated from a nop, then |
2319 | * return UPDATE_MAKE_CALL. |
2320 | * Otherwise, |
2321 | * return UPDATE_MODIFY_CALL to tell the caller to convert |
2322 | * from the save regs, to a non-save regs function or |
2323 | * vice versa, or from a trampoline call. |
2324 | */ |
2325 | if (flag & FTRACE_FL_ENABLED) { |
2326 | ftrace_bug_type = FTRACE_BUG_CALL; |
2327 | return FTRACE_UPDATE_MAKE_CALL; |
2328 | } |
2329 | |
2330 | ftrace_bug_type = FTRACE_BUG_UPDATE; |
2331 | return FTRACE_UPDATE_MODIFY_CALL; |
2332 | } |
2333 | |
2334 | if (update) { |
2335 | /* If there's no more users, clear all flags */ |
2336 | if (!ftrace_rec_count(rec)) |
2337 | rec->flags &= FTRACE_NOCLEAR_FLAGS; |
2338 | else |
2339 | /* |
2340 | * Just disable the record, but keep the ops TRAMP |
2341 | * and REGS states. The _EN flags must be disabled though. |
2342 | */ |
2343 | rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | |
2344 | FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN | |
2345 | FTRACE_FL_CALL_OPS_EN); |
2346 | } |
2347 | |
2348 | ftrace_bug_type = FTRACE_BUG_NOP; |
2349 | return FTRACE_UPDATE_MAKE_NOP; |
2350 | } |
2351 | |
2352 | /** |
2353 | * ftrace_update_record - set a record that now is tracing or not |
2354 | * @rec: the record to update |
2355 | * @enable: set to true if the record is tracing, false to force disable |
2356 | * |
2357 | * The records that represent all functions that can be traced need |
2358 | * to be updated when tracing has been enabled. |
2359 | */ |
2360 | int ftrace_update_record(struct dyn_ftrace *rec, bool enable) |
2361 | { |
2362 | return ftrace_check_record(rec, enable, update: true); |
2363 | } |
2364 | |
2365 | /** |
2366 | * ftrace_test_record - check if the record has been enabled or not |
2367 | * @rec: the record to test |
2368 | * @enable: set to true to check if enabled, false if it is disabled |
2369 | * |
2370 | * The arch code may need to test if a record is already set to |
2371 | * tracing to determine how to modify the function code that it |
2372 | * represents. |
2373 | */ |
2374 | int ftrace_test_record(struct dyn_ftrace *rec, bool enable) |
2375 | { |
2376 | return ftrace_check_record(rec, enable, update: false); |
2377 | } |
2378 | |
2379 | static struct ftrace_ops * |
2380 | ftrace_find_tramp_ops_any(struct dyn_ftrace *rec) |
2381 | { |
2382 | struct ftrace_ops *op; |
2383 | unsigned long ip = rec->ip; |
2384 | |
2385 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
2386 | |
2387 | if (!op->trampoline) |
2388 | continue; |
2389 | |
2390 | if (hash_contains_ip(ip, hash: op->func_hash)) |
2391 | return op; |
2392 | } while_for_each_ftrace_op(op); |
2393 | |
2394 | return NULL; |
2395 | } |
2396 | |
2397 | static struct ftrace_ops * |
2398 | ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude) |
2399 | { |
2400 | struct ftrace_ops *op; |
2401 | unsigned long ip = rec->ip; |
2402 | |
2403 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
2404 | |
2405 | if (op == op_exclude || !op->trampoline) |
2406 | continue; |
2407 | |
2408 | if (hash_contains_ip(ip, hash: op->func_hash)) |
2409 | return op; |
2410 | } while_for_each_ftrace_op(op); |
2411 | |
2412 | return NULL; |
2413 | } |
2414 | |
2415 | static struct ftrace_ops * |
2416 | ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, |
2417 | struct ftrace_ops *op) |
2418 | { |
2419 | unsigned long ip = rec->ip; |
2420 | |
2421 | while_for_each_ftrace_op(op) { |
2422 | |
2423 | if (!op->trampoline) |
2424 | continue; |
2425 | |
2426 | if (hash_contains_ip(ip, hash: op->func_hash)) |
2427 | return op; |
2428 | } |
2429 | |
2430 | return NULL; |
2431 | } |
2432 | |
2433 | static struct ftrace_ops * |
2434 | ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) |
2435 | { |
2436 | struct ftrace_ops *op; |
2437 | unsigned long ip = rec->ip; |
2438 | |
2439 | /* |
2440 | * Need to check removed ops first. |
2441 | * If they are being removed, and this rec has a tramp, |
2442 | * and this rec is in the ops list, then it would be the |
2443 | * one with the tramp. |
2444 | */ |
2445 | if (removed_ops) { |
2446 | if (hash_contains_ip(ip, hash: &removed_ops->old_hash)) |
2447 | return removed_ops; |
2448 | } |
2449 | |
2450 | /* |
2451 | * Need to find the current trampoline for a rec. |
2452 | * Now, a trampoline is only attached to a rec if there |
2453 | * was a single 'ops' attached to it. But this can be called |
2454 | * when we are adding another op to the rec or removing the |
2455 | * current one. Thus, if the op is being added, we can |
2456 | * ignore it because it hasn't attached itself to the rec |
2457 | * yet. |
2458 | * |
2459 | * If an ops is being modified (hooking to different functions) |
2460 | * then we don't care about the new functions that are being |
2461 | * added, just the old ones (that are probably being removed). |
2462 | * |
2463 | * If we are adding an ops to a function that already is using |
2464 | * a trampoline, it needs to be removed (trampolines are only |
2465 | * for single ops connected), then an ops that is not being |
2466 | * modified also needs to be checked. |
2467 | */ |
2468 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
2469 | |
2470 | if (!op->trampoline) |
2471 | continue; |
2472 | |
2473 | /* |
2474 | * If the ops is being added, it hasn't gotten to |
2475 | * the point to be removed from this tree yet. |
2476 | */ |
2477 | if (op->flags & FTRACE_OPS_FL_ADDING) |
2478 | continue; |
2479 | |
2480 | |
2481 | /* |
2482 | * If the ops is being modified and is in the old |
2483 | * hash, then it is probably being removed from this |
2484 | * function. |
2485 | */ |
2486 | if ((op->flags & FTRACE_OPS_FL_MODIFYING) && |
2487 | hash_contains_ip(ip, hash: &op->old_hash)) |
2488 | return op; |
2489 | /* |
2490 | * If the ops is not being added or modified, and it's |
2491 | * in its normal filter hash, then this must be the one |
2492 | * we want! |
2493 | */ |
2494 | if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && |
2495 | hash_contains_ip(ip, hash: op->func_hash)) |
2496 | return op; |
2497 | |
2498 | } while_for_each_ftrace_op(op); |
2499 | |
2500 | return NULL; |
2501 | } |
2502 | |
2503 | static struct ftrace_ops * |
2504 | ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) |
2505 | { |
2506 | struct ftrace_ops *op; |
2507 | unsigned long ip = rec->ip; |
2508 | |
2509 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
2510 | /* pass rec in as regs to have non-NULL val */ |
2511 | if (hash_contains_ip(ip, hash: op->func_hash)) |
2512 | return op; |
2513 | } while_for_each_ftrace_op(op); |
2514 | |
2515 | return NULL; |
2516 | } |
2517 | |
2518 | struct ftrace_ops * |
2519 | ftrace_find_unique_ops(struct dyn_ftrace *rec) |
2520 | { |
2521 | struct ftrace_ops *op, *found = NULL; |
2522 | unsigned long ip = rec->ip; |
2523 | |
2524 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
2525 | |
2526 | if (hash_contains_ip(ip, hash: op->func_hash)) { |
2527 | if (found) |
2528 | return NULL; |
2529 | found = op; |
2530 | } |
2531 | |
2532 | } while_for_each_ftrace_op(op); |
2533 | |
2534 | return found; |
2535 | } |
2536 | |
2537 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
2538 | /* Protected by rcu_tasks for reading, and direct_mutex for writing */ |
2539 | static struct ftrace_hash __rcu *direct_functions = EMPTY_HASH; |
2540 | static DEFINE_MUTEX(direct_mutex); |
2541 | int ftrace_direct_func_count; |
2542 | |
2543 | /* |
2544 | * Search the direct_functions hash to see if the given instruction pointer |
2545 | * has a direct caller attached to it. |
2546 | */ |
2547 | unsigned long ftrace_find_rec_direct(unsigned long ip) |
2548 | { |
2549 | struct ftrace_func_entry *entry; |
2550 | |
2551 | entry = __ftrace_lookup_ip(hash: direct_functions, ip); |
2552 | if (!entry) |
2553 | return 0; |
2554 | |
2555 | return entry->direct; |
2556 | } |
2557 | |
2558 | static void call_direct_funcs(unsigned long ip, unsigned long pip, |
2559 | struct ftrace_ops *ops, struct ftrace_regs *fregs) |
2560 | { |
2561 | unsigned long addr = READ_ONCE(ops->direct_call); |
2562 | |
2563 | if (!addr) |
2564 | return; |
2565 | |
2566 | arch_ftrace_set_direct_caller(fregs, addr); |
2567 | } |
2568 | #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
2569 | |
2570 | /** |
2571 | * ftrace_get_addr_new - Get the call address to set to |
2572 | * @rec: The ftrace record descriptor |
2573 | * |
2574 | * If the record has the FTRACE_FL_REGS set, that means that it |
2575 | * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS |
2576 | * is not set, then it wants to convert to the normal callback. |
2577 | * |
2578 | * Returns: the address of the trampoline to set to |
2579 | */ |
2580 | unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) |
2581 | { |
2582 | struct ftrace_ops *ops; |
2583 | unsigned long addr; |
2584 | |
2585 | if ((rec->flags & FTRACE_FL_DIRECT) && |
2586 | (ftrace_rec_count(rec) == 1)) { |
2587 | addr = ftrace_find_rec_direct(ip: rec->ip); |
2588 | if (addr) |
2589 | return addr; |
2590 | WARN_ON_ONCE(1); |
2591 | } |
2592 | |
2593 | /* Trampolines take precedence over regs */ |
2594 | if (rec->flags & FTRACE_FL_TRAMP) { |
2595 | ops = ftrace_find_tramp_ops_new(rec); |
2596 | if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { |
2597 | pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", |
2598 | (void *)rec->ip, (void *)rec->ip, rec->flags); |
2599 | /* Ftrace is shutting down, return anything */ |
2600 | return (unsigned long)FTRACE_ADDR; |
2601 | } |
2602 | return ops->trampoline; |
2603 | } |
2604 | |
2605 | if (rec->flags & FTRACE_FL_REGS) |
2606 | return (unsigned long)FTRACE_REGS_ADDR; |
2607 | else |
2608 | return (unsigned long)FTRACE_ADDR; |
2609 | } |
2610 | |
2611 | /** |
2612 | * ftrace_get_addr_curr - Get the call address that is already there |
2613 | * @rec: The ftrace record descriptor |
2614 | * |
2615 | * The FTRACE_FL_REGS_EN is set when the record already points to |
2616 | * a function that saves all the regs. Basically the '_EN' version |
2617 | * represents the current state of the function. |
2618 | * |
2619 | * Returns: the address of the trampoline that is currently being called |
2620 | */ |
2621 | unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) |
2622 | { |
2623 | struct ftrace_ops *ops; |
2624 | unsigned long addr; |
2625 | |
2626 | /* Direct calls take precedence over trampolines */ |
2627 | if (rec->flags & FTRACE_FL_DIRECT_EN) { |
2628 | addr = ftrace_find_rec_direct(ip: rec->ip); |
2629 | if (addr) |
2630 | return addr; |
2631 | WARN_ON_ONCE(1); |
2632 | } |
2633 | |
2634 | /* Trampolines take precedence over regs */ |
2635 | if (rec->flags & FTRACE_FL_TRAMP_EN) { |
2636 | ops = ftrace_find_tramp_ops_curr(rec); |
2637 | if (FTRACE_WARN_ON(!ops)) { |
2638 | pr_warn("Bad trampoline accounting at: %p (%pS)\n", |
2639 | (void *)rec->ip, (void *)rec->ip); |
2640 | /* Ftrace is shutting down, return anything */ |
2641 | return (unsigned long)FTRACE_ADDR; |
2642 | } |
2643 | return ops->trampoline; |
2644 | } |
2645 | |
2646 | if (rec->flags & FTRACE_FL_REGS_EN) |
2647 | return (unsigned long)FTRACE_REGS_ADDR; |
2648 | else |
2649 | return (unsigned long)FTRACE_ADDR; |
2650 | } |
2651 | |
2652 | static int |
2653 | __ftrace_replace_code(struct dyn_ftrace *rec, bool enable) |
2654 | { |
2655 | unsigned long ftrace_old_addr; |
2656 | unsigned long ftrace_addr; |
2657 | int ret; |
2658 | |
2659 | ftrace_addr = ftrace_get_addr_new(rec); |
2660 | |
2661 | /* This needs to be done before we call ftrace_update_record */ |
2662 | ftrace_old_addr = ftrace_get_addr_curr(rec); |
2663 | |
2664 | ret = ftrace_update_record(rec, enable); |
2665 | |
2666 | ftrace_bug_type = FTRACE_BUG_UNKNOWN; |
2667 | |
2668 | switch (ret) { |
2669 | case FTRACE_UPDATE_IGNORE: |
2670 | return 0; |
2671 | |
2672 | case FTRACE_UPDATE_MAKE_CALL: |
2673 | ftrace_bug_type = FTRACE_BUG_CALL; |
2674 | return ftrace_make_call(rec, addr: ftrace_addr); |
2675 | |
2676 | case FTRACE_UPDATE_MAKE_NOP: |
2677 | ftrace_bug_type = FTRACE_BUG_NOP; |
2678 | return ftrace_make_nop(NULL, rec, addr: ftrace_old_addr); |
2679 | |
2680 | case FTRACE_UPDATE_MODIFY_CALL: |
2681 | ftrace_bug_type = FTRACE_BUG_UPDATE; |
2682 | return ftrace_modify_call(rec, old_addr: ftrace_old_addr, addr: ftrace_addr); |
2683 | } |
2684 | |
2685 | return -1; /* unknown ftrace bug */ |
2686 | } |
2687 | |
2688 | void __weak ftrace_replace_code(int mod_flags) |
2689 | { |
2690 | struct dyn_ftrace *rec; |
2691 | struct ftrace_page *pg; |
2692 | bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL; |
2693 | int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL; |
2694 | int failed; |
2695 | |
2696 | if (unlikely(ftrace_disabled)) |
2697 | return; |
2698 | |
2699 | do_for_each_ftrace_rec(pg, rec) { |
2700 | |
2701 | if (skip_record(rec)) |
2702 | continue; |
2703 | |
2704 | failed = __ftrace_replace_code(rec, enable); |
2705 | if (failed) { |
2706 | ftrace_bug(failed, rec); |
2707 | /* Stop processing */ |
2708 | return; |
2709 | } |
2710 | if (schedulable) |
2711 | cond_resched(); |
2712 | } while_for_each_ftrace_rec(); |
2713 | } |
2714 | |
2715 | struct ftrace_rec_iter { |
2716 | struct ftrace_page *pg; |
2717 | int index; |
2718 | }; |
2719 | |
2720 | /** |
2721 | * ftrace_rec_iter_start - start up iterating over traced functions |
2722 | * |
2723 | * Returns: an iterator handle that is used to iterate over all |
2724 | * the records that represent address locations where functions |
2725 | * are traced. |
2726 | * |
2727 | * May return NULL if no records are available. |
2728 | */ |
2729 | struct ftrace_rec_iter *ftrace_rec_iter_start(void) |
2730 | { |
2731 | /* |
2732 | * We only use a single iterator. |
2733 | * Protected by the ftrace_lock mutex. |
2734 | */ |
2735 | static struct ftrace_rec_iter ftrace_rec_iter; |
2736 | struct ftrace_rec_iter *iter = &ftrace_rec_iter; |
2737 | |
2738 | iter->pg = ftrace_pages_start; |
2739 | iter->index = 0; |
2740 | |
2741 | /* Could have empty pages */ |
2742 | while (iter->pg && !iter->pg->index) |
2743 | iter->pg = iter->pg->next; |
2744 | |
2745 | if (!iter->pg) |
2746 | return NULL; |
2747 | |
2748 | return iter; |
2749 | } |
2750 | |
2751 | /** |
2752 | * ftrace_rec_iter_next - get the next record to process. |
2753 | * @iter: The handle to the iterator. |
2754 | * |
2755 | * Returns: the next iterator after the given iterator @iter. |
2756 | */ |
2757 | struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) |
2758 | { |
2759 | iter->index++; |
2760 | |
2761 | if (iter->index >= iter->pg->index) { |
2762 | iter->pg = iter->pg->next; |
2763 | iter->index = 0; |
2764 | |
2765 | /* Could have empty pages */ |
2766 | while (iter->pg && !iter->pg->index) |
2767 | iter->pg = iter->pg->next; |
2768 | } |
2769 | |
2770 | if (!iter->pg) |
2771 | return NULL; |
2772 | |
2773 | return iter; |
2774 | } |
2775 | |
2776 | /** |
2777 | * ftrace_rec_iter_record - get the record at the iterator location |
2778 | * @iter: The current iterator location |
2779 | * |
2780 | * Returns: the record that the current @iter is at. |
2781 | */ |
2782 | struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) |
2783 | { |
2784 | return &iter->pg->records[iter->index]; |
2785 | } |
2786 | |
2787 | static int |
2788 | ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec) |
2789 | { |
2790 | int ret; |
2791 | |
2792 | if (unlikely(ftrace_disabled)) |
2793 | return 0; |
2794 | |
2795 | ret = ftrace_init_nop(mod, rec); |
2796 | if (ret) { |
2797 | ftrace_bug_type = FTRACE_BUG_INIT; |
2798 | ftrace_bug(failed: ret, rec); |
2799 | return 0; |
2800 | } |
2801 | return 1; |
2802 | } |
2803 | |
2804 | /* |
2805 | * archs can override this function if they must do something |
2806 | * before the modifying code is performed. |
2807 | */ |
2808 | void __weak ftrace_arch_code_modify_prepare(void) |
2809 | { |
2810 | } |
2811 | |
2812 | /* |
2813 | * archs can override this function if they must do something |
2814 | * after the modifying code is performed. |
2815 | */ |
2816 | void __weak ftrace_arch_code_modify_post_process(void) |
2817 | { |
2818 | } |
2819 | |
2820 | static int update_ftrace_func(ftrace_func_t func) |
2821 | { |
2822 | static ftrace_func_t save_func; |
2823 | |
2824 | /* Avoid updating if it hasn't changed */ |
2825 | if (func == save_func) |
2826 | return 0; |
2827 | |
2828 | save_func = func; |
2829 | |
2830 | return ftrace_update_ftrace_func(func); |
2831 | } |
2832 | |
2833 | void ftrace_modify_all_code(int command) |
2834 | { |
2835 | int update = command & FTRACE_UPDATE_TRACE_FUNC; |
2836 | int mod_flags = 0; |
2837 | int err = 0; |
2838 | |
2839 | if (command & FTRACE_MAY_SLEEP) |
2840 | mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL; |
2841 | |
2842 | /* |
2843 | * If the ftrace_caller calls a ftrace_ops func directly, |
2844 | * we need to make sure that it only traces functions it |
2845 | * expects to trace. When doing the switch of functions, |
2846 | * we need to update to the ftrace_ops_list_func first |
2847 | * before the transition between old and new calls are set, |
2848 | * as the ftrace_ops_list_func will check the ops hashes |
2849 | * to make sure the ops are having the right functions |
2850 | * traced. |
2851 | */ |
2852 | if (update) { |
2853 | err = update_ftrace_func(func: ftrace_ops_list_func); |
2854 | if (FTRACE_WARN_ON(err)) |
2855 | return; |
2856 | } |
2857 | |
2858 | if (command & FTRACE_UPDATE_CALLS) |
2859 | ftrace_replace_code(mod_flags: mod_flags | FTRACE_MODIFY_ENABLE_FL); |
2860 | else if (command & FTRACE_DISABLE_CALLS) |
2861 | ftrace_replace_code(mod_flags); |
2862 | |
2863 | if (update && ftrace_trace_function != ftrace_ops_list_func) { |
2864 | function_trace_op = set_function_trace_op; |
2865 | smp_wmb(); |
2866 | /* If irqs are disabled, we are in stop machine */ |
2867 | if (!irqs_disabled()) |
2868 | smp_call_function(func: ftrace_sync_ipi, NULL, wait: 1); |
2869 | err = update_ftrace_func(func: ftrace_trace_function); |
2870 | if (FTRACE_WARN_ON(err)) |
2871 | return; |
2872 | } |
2873 | |
2874 | if (command & FTRACE_START_FUNC_RET) |
2875 | err = ftrace_enable_ftrace_graph_caller(); |
2876 | else if (command & FTRACE_STOP_FUNC_RET) |
2877 | err = ftrace_disable_ftrace_graph_caller(); |
2878 | FTRACE_WARN_ON(err); |
2879 | } |
2880 | |
2881 | static int __ftrace_modify_code(void *data) |
2882 | { |
2883 | int *command = data; |
2884 | |
2885 | ftrace_modify_all_code(command: *command); |
2886 | |
2887 | return 0; |
2888 | } |
2889 | |
2890 | /** |
2891 | * ftrace_run_stop_machine - go back to the stop machine method |
2892 | * @command: The command to tell ftrace what to do |
2893 | * |
2894 | * If an arch needs to fall back to the stop machine method, the |
2895 | * it can call this function. |
2896 | */ |
2897 | void ftrace_run_stop_machine(int command) |
2898 | { |
2899 | stop_machine(fn: __ftrace_modify_code, data: &command, NULL); |
2900 | } |
2901 | |
2902 | /** |
2903 | * arch_ftrace_update_code - modify the code to trace or not trace |
2904 | * @command: The command that needs to be done |
2905 | * |
2906 | * Archs can override this function if it does not need to |
2907 | * run stop_machine() to modify code. |
2908 | */ |
2909 | void __weak arch_ftrace_update_code(int command) |
2910 | { |
2911 | ftrace_run_stop_machine(command); |
2912 | } |
2913 | |
2914 | static void ftrace_run_update_code(int command) |
2915 | { |
2916 | ftrace_arch_code_modify_prepare(); |
2917 | |
2918 | /* |
2919 | * By default we use stop_machine() to modify the code. |
2920 | * But archs can do what ever they want as long as it |
2921 | * is safe. The stop_machine() is the safest, but also |
2922 | * produces the most overhead. |
2923 | */ |
2924 | arch_ftrace_update_code(command); |
2925 | |
2926 | ftrace_arch_code_modify_post_process(); |
2927 | } |
2928 | |
2929 | static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, |
2930 | struct ftrace_ops_hash *old_hash) |
2931 | { |
2932 | ops->flags |= FTRACE_OPS_FL_MODIFYING; |
2933 | ops->old_hash.filter_hash = old_hash->filter_hash; |
2934 | ops->old_hash.notrace_hash = old_hash->notrace_hash; |
2935 | ftrace_run_update_code(command); |
2936 | ops->old_hash.filter_hash = NULL; |
2937 | ops->old_hash.notrace_hash = NULL; |
2938 | ops->flags &= ~FTRACE_OPS_FL_MODIFYING; |
2939 | } |
2940 | |
2941 | static ftrace_func_t saved_ftrace_func; |
2942 | static int ftrace_start_up; |
2943 | |
2944 | void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) |
2945 | { |
2946 | } |
2947 | |
2948 | /* List of trace_ops that have allocated trampolines */ |
2949 | static LIST_HEAD(ftrace_ops_trampoline_list); |
2950 | |
2951 | static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops) |
2952 | { |
2953 | lockdep_assert_held(&ftrace_lock); |
2954 | list_add_rcu(new: &ops->list, head: &ftrace_ops_trampoline_list); |
2955 | } |
2956 | |
2957 | static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops) |
2958 | { |
2959 | lockdep_assert_held(&ftrace_lock); |
2960 | list_del_rcu(entry: &ops->list); |
2961 | synchronize_rcu(); |
2962 | } |
2963 | |
2964 | /* |
2965 | * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols |
2966 | * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is |
2967 | * not a module. |
2968 | */ |
2969 | #define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace" |
2970 | #define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline" |
2971 | |
2972 | static void ftrace_trampoline_free(struct ftrace_ops *ops) |
2973 | { |
2974 | if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) && |
2975 | ops->trampoline) { |
2976 | /* |
2977 | * Record the text poke event before the ksymbol unregister |
2978 | * event. |
2979 | */ |
2980 | perf_event_text_poke(addr: (void *)ops->trampoline, |
2981 | old_bytes: (void *)ops->trampoline, |
2982 | old_len: ops->trampoline_size, NULL, new_len: 0); |
2983 | perf_event_ksymbol(ksym_type: PERF_RECORD_KSYMBOL_TYPE_OOL, |
2984 | addr: ops->trampoline, len: ops->trampoline_size, |
2985 | unregister: true, FTRACE_TRAMPOLINE_SYM); |
2986 | /* Remove from kallsyms after the perf events */ |
2987 | ftrace_remove_trampoline_from_kallsyms(ops); |
2988 | } |
2989 | |
2990 | arch_ftrace_trampoline_free(ops); |
2991 | } |
2992 | |
2993 | static void ftrace_startup_enable(int command) |
2994 | { |
2995 | if (saved_ftrace_func != ftrace_trace_function) { |
2996 | saved_ftrace_func = ftrace_trace_function; |
2997 | command |= FTRACE_UPDATE_TRACE_FUNC; |
2998 | } |
2999 | |
3000 | if (!command || !ftrace_enabled) |
3001 | return; |
3002 | |
3003 | ftrace_run_update_code(command); |
3004 | } |
3005 | |
3006 | static void ftrace_startup_all(int command) |
3007 | { |
3008 | update_all_ops = true; |
3009 | ftrace_startup_enable(command); |
3010 | update_all_ops = false; |
3011 | } |
3012 | |
3013 | int ftrace_startup(struct ftrace_ops *ops, int command) |
3014 | { |
3015 | int ret; |
3016 | |
3017 | if (unlikely(ftrace_disabled)) |
3018 | return -ENODEV; |
3019 | |
3020 | ret = __register_ftrace_function(ops); |
3021 | if (ret) |
3022 | return ret; |
3023 | |
3024 | ftrace_start_up++; |
3025 | |
3026 | /* |
3027 | * Note that ftrace probes uses this to start up |
3028 | * and modify functions it will probe. But we still |
3029 | * set the ADDING flag for modification, as probes |
3030 | * do not have trampolines. If they add them in the |
3031 | * future, then the probes will need to distinguish |
3032 | * between adding and updating probes. |
3033 | */ |
3034 | ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; |
3035 | |
3036 | ret = ftrace_hash_ipmodify_enable(ops); |
3037 | if (ret < 0) { |
3038 | /* Rollback registration process */ |
3039 | __unregister_ftrace_function(ops); |
3040 | ftrace_start_up--; |
3041 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; |
3042 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) |
3043 | ftrace_trampoline_free(ops); |
3044 | return ret; |
3045 | } |
3046 | |
3047 | if (ftrace_hash_rec_enable(ops, filter_hash: 1)) |
3048 | command |= FTRACE_UPDATE_CALLS; |
3049 | |
3050 | ftrace_startup_enable(command); |
3051 | |
3052 | /* |
3053 | * If ftrace is in an undefined state, we just remove ops from list |
3054 | * to prevent the NULL pointer, instead of totally rolling it back and |
3055 | * free trampoline, because those actions could cause further damage. |
3056 | */ |
3057 | if (unlikely(ftrace_disabled)) { |
3058 | __unregister_ftrace_function(ops); |
3059 | return -ENODEV; |
3060 | } |
3061 | |
3062 | ops->flags &= ~FTRACE_OPS_FL_ADDING; |
3063 | |
3064 | return 0; |
3065 | } |
3066 | |
3067 | int ftrace_shutdown(struct ftrace_ops *ops, int command) |
3068 | { |
3069 | int ret; |
3070 | |
3071 | if (unlikely(ftrace_disabled)) |
3072 | return -ENODEV; |
3073 | |
3074 | ret = __unregister_ftrace_function(ops); |
3075 | if (ret) |
3076 | return ret; |
3077 | |
3078 | ftrace_start_up--; |
3079 | /* |
3080 | * Just warn in case of unbalance, no need to kill ftrace, it's not |
3081 | * critical but the ftrace_call callers may be never nopped again after |
3082 | * further ftrace uses. |
3083 | */ |
3084 | WARN_ON_ONCE(ftrace_start_up < 0); |
3085 | |
3086 | /* Disabling ipmodify never fails */ |
3087 | ftrace_hash_ipmodify_disable(ops); |
3088 | |
3089 | if (ftrace_hash_rec_disable(ops, filter_hash: 1)) |
3090 | command |= FTRACE_UPDATE_CALLS; |
3091 | |
3092 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; |
3093 | |
3094 | if (saved_ftrace_func != ftrace_trace_function) { |
3095 | saved_ftrace_func = ftrace_trace_function; |
3096 | command |= FTRACE_UPDATE_TRACE_FUNC; |
3097 | } |
3098 | |
3099 | if (!command || !ftrace_enabled) |
3100 | goto out; |
3101 | |
3102 | /* |
3103 | * If the ops uses a trampoline, then it needs to be |
3104 | * tested first on update. |
3105 | */ |
3106 | ops->flags |= FTRACE_OPS_FL_REMOVING; |
3107 | removed_ops = ops; |
3108 | |
3109 | /* The trampoline logic checks the old hashes */ |
3110 | ops->old_hash.filter_hash = ops->func_hash->filter_hash; |
3111 | ops->old_hash.notrace_hash = ops->func_hash->notrace_hash; |
3112 | |
3113 | ftrace_run_update_code(command); |
3114 | |
3115 | /* |
3116 | * If there's no more ops registered with ftrace, run a |
3117 | * sanity check to make sure all rec flags are cleared. |
3118 | */ |
3119 | if (rcu_dereference_protected(ftrace_ops_list, |
3120 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { |
3121 | struct ftrace_page *pg; |
3122 | struct dyn_ftrace *rec; |
3123 | |
3124 | do_for_each_ftrace_rec(pg, rec) { |
3125 | if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_NOCLEAR_FLAGS)) |
3126 | pr_warn(" %pS flags:%lx\n", |
3127 | (void *)rec->ip, rec->flags); |
3128 | } while_for_each_ftrace_rec(); |
3129 | } |
3130 | |
3131 | ops->old_hash.filter_hash = NULL; |
3132 | ops->old_hash.notrace_hash = NULL; |
3133 | |
3134 | removed_ops = NULL; |
3135 | ops->flags &= ~FTRACE_OPS_FL_REMOVING; |
3136 | |
3137 | out: |
3138 | /* |
3139 | * Dynamic ops may be freed, we must make sure that all |
3140 | * callers are done before leaving this function. |
3141 | */ |
3142 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) { |
3143 | /* |
3144 | * We need to do a hard force of sched synchronization. |
3145 | * This is because we use preempt_disable() to do RCU, but |
3146 | * the function tracers can be called where RCU is not watching |
3147 | * (like before user_exit()). We can not rely on the RCU |
3148 | * infrastructure to do the synchronization, thus we must do it |
3149 | * ourselves. |
3150 | */ |
3151 | synchronize_rcu_tasks_rude(); |
3152 | |
3153 | /* |
3154 | * When the kernel is preemptive, tasks can be preempted |
3155 | * while on a ftrace trampoline. Just scheduling a task on |
3156 | * a CPU is not good enough to flush them. Calling |
3157 | * synchronize_rcu_tasks() will wait for those tasks to |
3158 | * execute and either schedule voluntarily or enter user space. |
3159 | */ |
3160 | if (IS_ENABLED(CONFIG_PREEMPTION)) |
3161 | synchronize_rcu_tasks(); |
3162 | |
3163 | ftrace_trampoline_free(ops); |
3164 | } |
3165 | |
3166 | return 0; |
3167 | } |
3168 | |
3169 | static u64 ftrace_update_time; |
3170 | unsigned long ftrace_update_tot_cnt; |
3171 | unsigned long ftrace_number_of_pages; |
3172 | unsigned long ftrace_number_of_groups; |
3173 | |
3174 | static inline int ops_traces_mod(struct ftrace_ops *ops) |
3175 | { |
3176 | /* |
3177 | * Filter_hash being empty will default to trace module. |
3178 | * But notrace hash requires a test of individual module functions. |
3179 | */ |
3180 | return ftrace_hash_empty(hash: ops->func_hash->filter_hash) && |
3181 | ftrace_hash_empty(hash: ops->func_hash->notrace_hash); |
3182 | } |
3183 | |
3184 | static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) |
3185 | { |
3186 | bool init_nop = ftrace_need_init_nop(); |
3187 | struct ftrace_page *pg; |
3188 | struct dyn_ftrace *p; |
3189 | u64 start, stop; |
3190 | unsigned long update_cnt = 0; |
3191 | unsigned long rec_flags = 0; |
3192 | int i; |
3193 | |
3194 | start = ftrace_now(raw_smp_processor_id()); |
3195 | |
3196 | /* |
3197 | * When a module is loaded, this function is called to convert |
3198 | * the calls to mcount in its text to nops, and also to create |
3199 | * an entry in the ftrace data. Now, if ftrace is activated |
3200 | * after this call, but before the module sets its text to |
3201 | * read-only, the modification of enabling ftrace can fail if |
3202 | * the read-only is done while ftrace is converting the calls. |
3203 | * To prevent this, the module's records are set as disabled |
3204 | * and will be enabled after the call to set the module's text |
3205 | * to read-only. |
3206 | */ |
3207 | if (mod) |
3208 | rec_flags |= FTRACE_FL_DISABLED; |
3209 | |
3210 | for (pg = new_pgs; pg; pg = pg->next) { |
3211 | |
3212 | for (i = 0; i < pg->index; i++) { |
3213 | |
3214 | /* If something went wrong, bail without enabling anything */ |
3215 | if (unlikely(ftrace_disabled)) |
3216 | return -1; |
3217 | |
3218 | p = &pg->records[i]; |
3219 | p->flags = rec_flags; |
3220 | |
3221 | /* |
3222 | * Do the initial record conversion from mcount jump |
3223 | * to the NOP instructions. |
3224 | */ |
3225 | if (init_nop && !ftrace_nop_initialize(mod, rec: p)) |
3226 | break; |
3227 | |
3228 | update_cnt++; |
3229 | } |
3230 | } |
3231 | |
3232 | stop = ftrace_now(raw_smp_processor_id()); |
3233 | ftrace_update_time = stop - start; |
3234 | ftrace_update_tot_cnt += update_cnt; |
3235 | |
3236 | return 0; |
3237 | } |
3238 | |
3239 | static int ftrace_allocate_records(struct ftrace_page *pg, int count) |
3240 | { |
3241 | int order; |
3242 | int pages; |
3243 | int cnt; |
3244 | |
3245 | if (WARN_ON(!count)) |
3246 | return -EINVAL; |
3247 | |
3248 | /* We want to fill as much as possible, with no empty pages */ |
3249 | pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE); |
3250 | order = fls(x: pages) - 1; |
3251 | |
3252 | again: |
3253 | pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
3254 | |
3255 | if (!pg->records) { |
3256 | /* if we can't allocate this size, try something smaller */ |
3257 | if (!order) |
3258 | return -ENOMEM; |
3259 | order--; |
3260 | goto again; |
3261 | } |
3262 | |
3263 | ftrace_number_of_pages += 1 << order; |
3264 | ftrace_number_of_groups++; |
3265 | |
3266 | cnt = (PAGE_SIZE << order) / ENTRY_SIZE; |
3267 | pg->order = order; |
3268 | |
3269 | if (cnt > count) |
3270 | cnt = count; |
3271 | |
3272 | return cnt; |
3273 | } |
3274 | |
3275 | static void ftrace_free_pages(struct ftrace_page *pages) |
3276 | { |
3277 | struct ftrace_page *pg = pages; |
3278 | |
3279 | while (pg) { |
3280 | if (pg->records) { |
3281 | free_pages(addr: (unsigned long)pg->records, order: pg->order); |
3282 | ftrace_number_of_pages -= 1 << pg->order; |
3283 | } |
3284 | pages = pg->next; |
3285 | kfree(objp: pg); |
3286 | pg = pages; |
3287 | ftrace_number_of_groups--; |
3288 | } |
3289 | } |
3290 | |
3291 | static struct ftrace_page * |
3292 | ftrace_allocate_pages(unsigned long num_to_init) |
3293 | { |
3294 | struct ftrace_page *start_pg; |
3295 | struct ftrace_page *pg; |
3296 | int cnt; |
3297 | |
3298 | if (!num_to_init) |
3299 | return NULL; |
3300 | |
3301 | start_pg = pg = kzalloc(size: sizeof(*pg), GFP_KERNEL); |
3302 | if (!pg) |
3303 | return NULL; |
3304 | |
3305 | /* |
3306 | * Try to allocate as much as possible in one continues |
3307 | * location that fills in all of the space. We want to |
3308 | * waste as little space as possible. |
3309 | */ |
3310 | for (;;) { |
3311 | cnt = ftrace_allocate_records(pg, count: num_to_init); |
3312 | if (cnt < 0) |
3313 | goto free_pages; |
3314 | |
3315 | num_to_init -= cnt; |
3316 | if (!num_to_init) |
3317 | break; |
3318 | |
3319 | pg->next = kzalloc(size: sizeof(*pg), GFP_KERNEL); |
3320 | if (!pg->next) |
3321 | goto free_pages; |
3322 | |
3323 | pg = pg->next; |
3324 | } |
3325 | |
3326 | return start_pg; |
3327 | |
3328 | free_pages: |
3329 | ftrace_free_pages(pages: start_pg); |
3330 | pr_info("ftrace: FAILED to allocate memory for functions\n"); |
3331 | return NULL; |
3332 | } |
3333 | |
3334 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
3335 | |
3336 | struct ftrace_iterator { |
3337 | loff_t pos; |
3338 | loff_t func_pos; |
3339 | loff_t mod_pos; |
3340 | struct ftrace_page *pg; |
3341 | struct dyn_ftrace *func; |
3342 | struct ftrace_func_probe *probe; |
3343 | struct ftrace_func_entry *probe_entry; |
3344 | struct trace_parser parser; |
3345 | struct ftrace_hash *hash; |
3346 | struct ftrace_ops *ops; |
3347 | struct trace_array *tr; |
3348 | struct list_head *mod_list; |
3349 | int pidx; |
3350 | int idx; |
3351 | unsigned flags; |
3352 | }; |
3353 | |
3354 | static void * |
3355 | t_probe_next(struct seq_file *m, loff_t *pos) |
3356 | { |
3357 | struct ftrace_iterator *iter = m->private; |
3358 | struct trace_array *tr = iter->ops->private; |
3359 | struct list_head *func_probes; |
3360 | struct ftrace_hash *hash; |
3361 | struct list_head *next; |
3362 | struct hlist_node *hnd = NULL; |
3363 | struct hlist_head *hhd; |
3364 | int size; |
3365 | |
3366 | (*pos)++; |
3367 | iter->pos = *pos; |
3368 | |
3369 | if (!tr) |
3370 | return NULL; |
3371 | |
3372 | func_probes = &tr->func_probes; |
3373 | if (list_empty(head: func_probes)) |
3374 | return NULL; |
3375 | |
3376 | if (!iter->probe) { |
3377 | next = func_probes->next; |
3378 | iter->probe = list_entry(next, struct ftrace_func_probe, list); |
3379 | } |
3380 | |
3381 | if (iter->probe_entry) |
3382 | hnd = &iter->probe_entry->hlist; |
3383 | |
3384 | hash = iter->probe->ops.func_hash->filter_hash; |
3385 | |
3386 | /* |
3387 | * A probe being registered may temporarily have an empty hash |
3388 | * and it's at the end of the func_probes list. |
3389 | */ |
3390 | if (!hash || hash == EMPTY_HASH) |
3391 | return NULL; |
3392 | |
3393 | size = 1 << hash->size_bits; |
3394 | |
3395 | retry: |
3396 | if (iter->pidx >= size) { |
3397 | if (iter->probe->list.next == func_probes) |
3398 | return NULL; |
3399 | next = iter->probe->list.next; |
3400 | iter->probe = list_entry(next, struct ftrace_func_probe, list); |
3401 | hash = iter->probe->ops.func_hash->filter_hash; |
3402 | size = 1 << hash->size_bits; |
3403 | iter->pidx = 0; |
3404 | } |
3405 | |
3406 | hhd = &hash->buckets[iter->pidx]; |
3407 | |
3408 | if (hlist_empty(h: hhd)) { |
3409 | iter->pidx++; |
3410 | hnd = NULL; |
3411 | goto retry; |
3412 | } |
3413 | |
3414 | if (!hnd) |
3415 | hnd = hhd->first; |
3416 | else { |
3417 | hnd = hnd->next; |
3418 | if (!hnd) { |
3419 | iter->pidx++; |
3420 | goto retry; |
3421 | } |
3422 | } |
3423 | |
3424 | if (WARN_ON_ONCE(!hnd)) |
3425 | return NULL; |
3426 | |
3427 | iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist); |
3428 | |
3429 | return iter; |
3430 | } |
3431 | |
3432 | static void *t_probe_start(struct seq_file *m, loff_t *pos) |
3433 | { |
3434 | struct ftrace_iterator *iter = m->private; |
3435 | void *p = NULL; |
3436 | loff_t l; |
3437 | |
3438 | if (!(iter->flags & FTRACE_ITER_DO_PROBES)) |
3439 | return NULL; |
3440 | |
3441 | if (iter->mod_pos > *pos) |
3442 | return NULL; |
3443 | |
3444 | iter->probe = NULL; |
3445 | iter->probe_entry = NULL; |
3446 | iter->pidx = 0; |
3447 | for (l = 0; l <= (*pos - iter->mod_pos); ) { |
3448 | p = t_probe_next(m, pos: &l); |
3449 | if (!p) |
3450 | break; |
3451 | } |
3452 | if (!p) |
3453 | return NULL; |
3454 | |
3455 | /* Only set this if we have an item */ |
3456 | iter->flags |= FTRACE_ITER_PROBE; |
3457 | |
3458 | return iter; |
3459 | } |
3460 | |
3461 | static int |
3462 | t_probe_show(struct seq_file *m, struct ftrace_iterator *iter) |
3463 | { |
3464 | struct ftrace_func_entry *probe_entry; |
3465 | struct ftrace_probe_ops *probe_ops; |
3466 | struct ftrace_func_probe *probe; |
3467 | |
3468 | probe = iter->probe; |
3469 | probe_entry = iter->probe_entry; |
3470 | |
3471 | if (WARN_ON_ONCE(!probe || !probe_entry)) |
3472 | return -EIO; |
3473 | |
3474 | probe_ops = probe->probe_ops; |
3475 | |
3476 | if (probe_ops->print) |
3477 | return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data); |
3478 | |
3479 | seq_printf(m, fmt: "%ps:%ps\n", (void *)probe_entry->ip, |
3480 | (void *)probe_ops->func); |
3481 | |
3482 | return 0; |
3483 | } |
3484 | |
3485 | static void * |
3486 | t_mod_next(struct seq_file *m, loff_t *pos) |
3487 | { |
3488 | struct ftrace_iterator *iter = m->private; |
3489 | struct trace_array *tr = iter->tr; |
3490 | |
3491 | (*pos)++; |
3492 | iter->pos = *pos; |
3493 | |
3494 | iter->mod_list = iter->mod_list->next; |
3495 | |
3496 | if (iter->mod_list == &tr->mod_trace || |
3497 | iter->mod_list == &tr->mod_notrace) { |
3498 | iter->flags &= ~FTRACE_ITER_MOD; |
3499 | return NULL; |
3500 | } |
3501 | |
3502 | iter->mod_pos = *pos; |
3503 | |
3504 | return iter; |
3505 | } |
3506 | |
3507 | static void *t_mod_start(struct seq_file *m, loff_t *pos) |
3508 | { |
3509 | struct ftrace_iterator *iter = m->private; |
3510 | void *p = NULL; |
3511 | loff_t l; |
3512 | |
3513 | if (iter->func_pos > *pos) |
3514 | return NULL; |
3515 | |
3516 | iter->mod_pos = iter->func_pos; |
3517 | |
3518 | /* probes are only available if tr is set */ |
3519 | if (!iter->tr) |
3520 | return NULL; |
3521 | |
3522 | for (l = 0; l <= (*pos - iter->func_pos); ) { |
3523 | p = t_mod_next(m, pos: &l); |
3524 | if (!p) |
3525 | break; |
3526 | } |
3527 | if (!p) { |
3528 | iter->flags &= ~FTRACE_ITER_MOD; |
3529 | return t_probe_start(m, pos); |
3530 | } |
3531 | |
3532 | /* Only set this if we have an item */ |
3533 | iter->flags |= FTRACE_ITER_MOD; |
3534 | |
3535 | return iter; |
3536 | } |
3537 | |
3538 | static int |
3539 | t_mod_show(struct seq_file *m, struct ftrace_iterator *iter) |
3540 | { |
3541 | struct ftrace_mod_load *ftrace_mod; |
3542 | struct trace_array *tr = iter->tr; |
3543 | |
3544 | if (WARN_ON_ONCE(!iter->mod_list) || |
3545 | iter->mod_list == &tr->mod_trace || |
3546 | iter->mod_list == &tr->mod_notrace) |
3547 | return -EIO; |
3548 | |
3549 | ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list); |
3550 | |
3551 | if (ftrace_mod->func) |
3552 | seq_printf(m, fmt: "%s", ftrace_mod->func); |
3553 | else |
3554 | seq_putc(m, c: '*'); |
3555 | |
3556 | seq_printf(m, fmt: ":mod:%s\n", ftrace_mod->module); |
3557 | |
3558 | return 0; |
3559 | } |
3560 | |
3561 | static void * |
3562 | t_func_next(struct seq_file *m, loff_t *pos) |
3563 | { |
3564 | struct ftrace_iterator *iter = m->private; |
3565 | struct dyn_ftrace *rec = NULL; |
3566 | |
3567 | (*pos)++; |
3568 | |
3569 | retry: |
3570 | if (iter->idx >= iter->pg->index) { |
3571 | if (iter->pg->next) { |
3572 | iter->pg = iter->pg->next; |
3573 | iter->idx = 0; |
3574 | goto retry; |
3575 | } |
3576 | } else { |
3577 | rec = &iter->pg->records[iter->idx++]; |
3578 | if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && |
3579 | !ftrace_lookup_ip(hash: iter->hash, ip: rec->ip)) || |
3580 | |
3581 | ((iter->flags & FTRACE_ITER_ENABLED) && |
3582 | !(rec->flags & FTRACE_FL_ENABLED)) || |
3583 | |
3584 | ((iter->flags & FTRACE_ITER_TOUCHED) && |
3585 | !(rec->flags & FTRACE_FL_TOUCHED))) { |
3586 | |
3587 | rec = NULL; |
3588 | goto retry; |
3589 | } |
3590 | } |
3591 | |
3592 | if (!rec) |
3593 | return NULL; |
3594 | |
3595 | iter->pos = iter->func_pos = *pos; |
3596 | iter->func = rec; |
3597 | |
3598 | return iter; |
3599 | } |
3600 | |
3601 | static void * |
3602 | t_next(struct seq_file *m, void *v, loff_t *pos) |
3603 | { |
3604 | struct ftrace_iterator *iter = m->private; |
3605 | loff_t l = *pos; /* t_probe_start() must use original pos */ |
3606 | void *ret; |
3607 | |
3608 | if (unlikely(ftrace_disabled)) |
3609 | return NULL; |
3610 | |
3611 | if (iter->flags & FTRACE_ITER_PROBE) |
3612 | return t_probe_next(m, pos); |
3613 | |
3614 | if (iter->flags & FTRACE_ITER_MOD) |
3615 | return t_mod_next(m, pos); |
3616 | |
3617 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
3618 | /* next must increment pos, and t_probe_start does not */ |
3619 | (*pos)++; |
3620 | return t_mod_start(m, pos: &l); |
3621 | } |
3622 | |
3623 | ret = t_func_next(m, pos); |
3624 | |
3625 | if (!ret) |
3626 | return t_mod_start(m, pos: &l); |
3627 | |
3628 | return ret; |
3629 | } |
3630 | |
3631 | static void reset_iter_read(struct ftrace_iterator *iter) |
3632 | { |
3633 | iter->pos = 0; |
3634 | iter->func_pos = 0; |
3635 | iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD); |
3636 | } |
3637 | |
3638 | static void *t_start(struct seq_file *m, loff_t *pos) |
3639 | { |
3640 | struct ftrace_iterator *iter = m->private; |
3641 | void *p = NULL; |
3642 | loff_t l; |
3643 | |
3644 | mutex_lock(&ftrace_lock); |
3645 | |
3646 | if (unlikely(ftrace_disabled)) |
3647 | return NULL; |
3648 | |
3649 | /* |
3650 | * If an lseek was done, then reset and start from beginning. |
3651 | */ |
3652 | if (*pos < iter->pos) |
3653 | reset_iter_read(iter); |
3654 | |
3655 | /* |
3656 | * For set_ftrace_filter reading, if we have the filter |
3657 | * off, we can short cut and just print out that all |
3658 | * functions are enabled. |
3659 | */ |
3660 | if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && |
3661 | ftrace_hash_empty(hash: iter->hash)) { |
3662 | iter->func_pos = 1; /* Account for the message */ |
3663 | if (*pos > 0) |
3664 | return t_mod_start(m, pos); |
3665 | iter->flags |= FTRACE_ITER_PRINTALL; |
3666 | /* reset in case of seek/pread */ |
3667 | iter->flags &= ~FTRACE_ITER_PROBE; |
3668 | return iter; |
3669 | } |
3670 | |
3671 | if (iter->flags & FTRACE_ITER_MOD) |
3672 | return t_mod_start(m, pos); |
3673 | |
3674 | /* |
3675 | * Unfortunately, we need to restart at ftrace_pages_start |
3676 | * every time we let go of the ftrace_mutex. This is because |
3677 | * those pointers can change without the lock. |
3678 | */ |
3679 | iter->pg = ftrace_pages_start; |
3680 | iter->idx = 0; |
3681 | for (l = 0; l <= *pos; ) { |
3682 | p = t_func_next(m, pos: &l); |
3683 | if (!p) |
3684 | break; |
3685 | } |
3686 | |
3687 | if (!p) |
3688 | return t_mod_start(m, pos); |
3689 | |
3690 | return iter; |
3691 | } |
3692 | |
3693 | static void t_stop(struct seq_file *m, void *p) |
3694 | { |
3695 | mutex_unlock(lock: &ftrace_lock); |
3696 | } |
3697 | |
3698 | void * __weak |
3699 | arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) |
3700 | { |
3701 | return NULL; |
3702 | } |
3703 | |
3704 | static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops, |
3705 | struct dyn_ftrace *rec) |
3706 | { |
3707 | void *ptr; |
3708 | |
3709 | ptr = arch_ftrace_trampoline_func(ops, rec); |
3710 | if (ptr) |
3711 | seq_printf(m, fmt: " ->%pS", ptr); |
3712 | } |
3713 | |
3714 | #ifdef FTRACE_MCOUNT_MAX_OFFSET |
3715 | /* |
3716 | * Weak functions can still have an mcount/fentry that is saved in |
3717 | * the __mcount_loc section. These can be detected by having a |
3718 | * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the |
3719 | * symbol found by kallsyms is not the function that the mcount/fentry |
3720 | * is part of. The offset is much greater in these cases. |
3721 | * |
3722 | * Test the record to make sure that the ip points to a valid kallsyms |
3723 | * and if not, mark it disabled. |
3724 | */ |
3725 | static int test_for_valid_rec(struct dyn_ftrace *rec) |
3726 | { |
3727 | char str[KSYM_SYMBOL_LEN]; |
3728 | unsigned long offset; |
3729 | const char *ret; |
3730 | |
3731 | ret = kallsyms_lookup(addr: rec->ip, NULL, offset: &offset, NULL, namebuf: str); |
3732 | |
3733 | /* Weak functions can cause invalid addresses */ |
3734 | if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) { |
3735 | rec->flags |= FTRACE_FL_DISABLED; |
3736 | return 0; |
3737 | } |
3738 | return 1; |
3739 | } |
3740 | |
3741 | static struct workqueue_struct *ftrace_check_wq __initdata; |
3742 | static struct work_struct ftrace_check_work __initdata; |
3743 | |
3744 | /* |
3745 | * Scan all the mcount/fentry entries to make sure they are valid. |
3746 | */ |
3747 | static __init void ftrace_check_work_func(struct work_struct *work) |
3748 | { |
3749 | struct ftrace_page *pg; |
3750 | struct dyn_ftrace *rec; |
3751 | |
3752 | mutex_lock(&ftrace_lock); |
3753 | do_for_each_ftrace_rec(pg, rec) { |
3754 | test_for_valid_rec(rec); |
3755 | } while_for_each_ftrace_rec(); |
3756 | mutex_unlock(lock: &ftrace_lock); |
3757 | } |
3758 | |
3759 | static int __init ftrace_check_for_weak_functions(void) |
3760 | { |
3761 | INIT_WORK(&ftrace_check_work, ftrace_check_work_func); |
3762 | |
3763 | ftrace_check_wq = alloc_workqueue(fmt: "ftrace_check_wq", flags: WQ_UNBOUND, max_active: 0); |
3764 | |
3765 | queue_work(wq: ftrace_check_wq, work: &ftrace_check_work); |
3766 | return 0; |
3767 | } |
3768 | |
3769 | static int __init ftrace_check_sync(void) |
3770 | { |
3771 | /* Make sure the ftrace_check updates are finished */ |
3772 | if (ftrace_check_wq) |
3773 | destroy_workqueue(wq: ftrace_check_wq); |
3774 | return 0; |
3775 | } |
3776 | |
3777 | late_initcall_sync(ftrace_check_sync); |
3778 | subsys_initcall(ftrace_check_for_weak_functions); |
3779 | |
3780 | static int print_rec(struct seq_file *m, unsigned long ip) |
3781 | { |
3782 | unsigned long offset; |
3783 | char str[KSYM_SYMBOL_LEN]; |
3784 | char *modname; |
3785 | const char *ret; |
3786 | |
3787 | ret = kallsyms_lookup(addr: ip, NULL, offset: &offset, modname: &modname, namebuf: str); |
3788 | /* Weak functions can cause invalid addresses */ |
3789 | if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) { |
3790 | snprintf(buf: str, KSYM_SYMBOL_LEN, fmt: "%s_%ld", |
3791 | FTRACE_INVALID_FUNCTION, offset); |
3792 | ret = NULL; |
3793 | } |
3794 | |
3795 | seq_puts(m, s: str); |
3796 | if (modname) |
3797 | seq_printf(m, fmt: " [%s]", modname); |
3798 | return ret == NULL ? -1 : 0; |
3799 | } |
3800 | #else |
3801 | static inline int test_for_valid_rec(struct dyn_ftrace *rec) |
3802 | { |
3803 | return 1; |
3804 | } |
3805 | |
3806 | static inline int print_rec(struct seq_file *m, unsigned long ip) |
3807 | { |
3808 | seq_printf(m, "%ps", (void *)ip); |
3809 | return 0; |
3810 | } |
3811 | #endif |
3812 | |
3813 | static int t_show(struct seq_file *m, void *v) |
3814 | { |
3815 | struct ftrace_iterator *iter = m->private; |
3816 | struct dyn_ftrace *rec; |
3817 | |
3818 | if (iter->flags & FTRACE_ITER_PROBE) |
3819 | return t_probe_show(m, iter); |
3820 | |
3821 | if (iter->flags & FTRACE_ITER_MOD) |
3822 | return t_mod_show(m, iter); |
3823 | |
3824 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
3825 | if (iter->flags & FTRACE_ITER_NOTRACE) |
3826 | seq_puts(m, s: "#### no functions disabled ####\n"); |
3827 | else |
3828 | seq_puts(m, s: "#### all functions enabled ####\n"); |
3829 | return 0; |
3830 | } |
3831 | |
3832 | rec = iter->func; |
3833 | |
3834 | if (!rec) |
3835 | return 0; |
3836 | |
3837 | if (iter->flags & FTRACE_ITER_ADDRS) |
3838 | seq_printf(m, fmt: "%lx ", rec->ip); |
3839 | |
3840 | if (print_rec(m, ip: rec->ip)) { |
3841 | /* This should only happen when a rec is disabled */ |
3842 | WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED)); |
3843 | seq_putc(m, c: '\n'); |
3844 | return 0; |
3845 | } |
3846 | |
3847 | if (iter->flags & (FTRACE_ITER_ENABLED | FTRACE_ITER_TOUCHED)) { |
3848 | struct ftrace_ops *ops; |
3849 | |
3850 | seq_printf(m, fmt: " (%ld)%s%s%s%s%s", |
3851 | ftrace_rec_count(rec), |
3852 | rec->flags & FTRACE_FL_REGS ? " R": " ", |
3853 | rec->flags & FTRACE_FL_IPMODIFY ? " I": " ", |
3854 | rec->flags & FTRACE_FL_DIRECT ? " D": " ", |
3855 | rec->flags & FTRACE_FL_CALL_OPS ? " O": " ", |
3856 | rec->flags & FTRACE_FL_MODIFIED ? " M ": " "); |
3857 | if (rec->flags & FTRACE_FL_TRAMP_EN) { |
3858 | ops = ftrace_find_tramp_ops_any(rec); |
3859 | if (ops) { |
3860 | do { |
3861 | seq_printf(m, fmt: "\ttramp: %pS (%pS)", |
3862 | (void *)ops->trampoline, |
3863 | (void *)ops->func); |
3864 | add_trampoline_func(m, ops, rec); |
3865 | ops = ftrace_find_tramp_ops_next(rec, op: ops); |
3866 | } while (ops); |
3867 | } else |
3868 | seq_puts(m, s: "\ttramp: ERROR!"); |
3869 | } else { |
3870 | add_trampoline_func(m, NULL, rec); |
3871 | } |
3872 | if (rec->flags & FTRACE_FL_CALL_OPS_EN) { |
3873 | ops = ftrace_find_unique_ops(rec); |
3874 | if (ops) { |
3875 | seq_printf(m, fmt: "\tops: %pS (%pS)", |
3876 | ops, ops->func); |
3877 | } else { |
3878 | seq_puts(m, s: "\tops: ERROR!"); |
3879 | } |
3880 | } |
3881 | if (rec->flags & FTRACE_FL_DIRECT) { |
3882 | unsigned long direct; |
3883 | |
3884 | direct = ftrace_find_rec_direct(ip: rec->ip); |
3885 | if (direct) |
3886 | seq_printf(m, fmt: "\n\tdirect-->%pS", (void *)direct); |
3887 | } |
3888 | } |
3889 | |
3890 | seq_putc(m, c: '\n'); |
3891 | |
3892 | return 0; |
3893 | } |
3894 | |
3895 | static const struct seq_operations show_ftrace_seq_ops = { |
3896 | .start = t_start, |
3897 | .next = t_next, |
3898 | .stop = t_stop, |
3899 | .show = t_show, |
3900 | }; |
3901 | |
3902 | static int |
3903 | ftrace_avail_open(struct inode *inode, struct file *file) |
3904 | { |
3905 | struct ftrace_iterator *iter; |
3906 | int ret; |
3907 | |
3908 | ret = security_locked_down(what: LOCKDOWN_TRACEFS); |
3909 | if (ret) |
3910 | return ret; |
3911 | |
3912 | if (unlikely(ftrace_disabled)) |
3913 | return -ENODEV; |
3914 | |
3915 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
3916 | if (!iter) |
3917 | return -ENOMEM; |
3918 | |
3919 | iter->pg = ftrace_pages_start; |
3920 | iter->ops = &global_ops; |
3921 | |
3922 | return 0; |
3923 | } |
3924 | |
3925 | static int |
3926 | ftrace_enabled_open(struct inode *inode, struct file *file) |
3927 | { |
3928 | struct ftrace_iterator *iter; |
3929 | |
3930 | /* |
3931 | * This shows us what functions are currently being |
3932 | * traced and by what. Not sure if we want lockdown |
3933 | * to hide such critical information for an admin. |
3934 | * Although, perhaps it can show information we don't |
3935 | * want people to see, but if something is tracing |
3936 | * something, we probably want to know about it. |
3937 | */ |
3938 | |
3939 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
3940 | if (!iter) |
3941 | return -ENOMEM; |
3942 | |
3943 | iter->pg = ftrace_pages_start; |
3944 | iter->flags = FTRACE_ITER_ENABLED; |
3945 | iter->ops = &global_ops; |
3946 | |
3947 | return 0; |
3948 | } |
3949 | |
3950 | static int |
3951 | ftrace_touched_open(struct inode *inode, struct file *file) |
3952 | { |
3953 | struct ftrace_iterator *iter; |
3954 | |
3955 | /* |
3956 | * This shows us what functions have ever been enabled |
3957 | * (traced, direct, patched, etc). Not sure if we want lockdown |
3958 | * to hide such critical information for an admin. |
3959 | * Although, perhaps it can show information we don't |
3960 | * want people to see, but if something had traced |
3961 | * something, we probably want to know about it. |
3962 | */ |
3963 | |
3964 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
3965 | if (!iter) |
3966 | return -ENOMEM; |
3967 | |
3968 | iter->pg = ftrace_pages_start; |
3969 | iter->flags = FTRACE_ITER_TOUCHED; |
3970 | iter->ops = &global_ops; |
3971 | |
3972 | return 0; |
3973 | } |
3974 | |
3975 | static int |
3976 | ftrace_avail_addrs_open(struct inode *inode, struct file *file) |
3977 | { |
3978 | struct ftrace_iterator *iter; |
3979 | int ret; |
3980 | |
3981 | ret = security_locked_down(what: LOCKDOWN_TRACEFS); |
3982 | if (ret) |
3983 | return ret; |
3984 | |
3985 | if (unlikely(ftrace_disabled)) |
3986 | return -ENODEV; |
3987 | |
3988 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
3989 | if (!iter) |
3990 | return -ENOMEM; |
3991 | |
3992 | iter->pg = ftrace_pages_start; |
3993 | iter->flags = FTRACE_ITER_ADDRS; |
3994 | iter->ops = &global_ops; |
3995 | |
3996 | return 0; |
3997 | } |
3998 | |
3999 | /** |
4000 | * ftrace_regex_open - initialize function tracer filter files |
4001 | * @ops: The ftrace_ops that hold the hash filters |
4002 | * @flag: The type of filter to process |
4003 | * @inode: The inode, usually passed in to your open routine |
4004 | * @file: The file, usually passed in to your open routine |
4005 | * |
4006 | * ftrace_regex_open() initializes the filter files for the |
4007 | * @ops. Depending on @flag it may process the filter hash or |
4008 | * the notrace hash of @ops. With this called from the open |
4009 | * routine, you can use ftrace_filter_write() for the write |
4010 | * routine if @flag has FTRACE_ITER_FILTER set, or |
4011 | * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. |
4012 | * tracing_lseek() should be used as the lseek routine, and |
4013 | * release must call ftrace_regex_release(). |
4014 | * |
4015 | * Returns: 0 on success or a negative errno value on failure |
4016 | */ |
4017 | int |
4018 | ftrace_regex_open(struct ftrace_ops *ops, int flag, |
4019 | struct inode *inode, struct file *file) |
4020 | { |
4021 | struct ftrace_iterator *iter; |
4022 | struct ftrace_hash *hash; |
4023 | struct list_head *mod_head; |
4024 | struct trace_array *tr = ops->private; |
4025 | int ret = -ENOMEM; |
4026 | |
4027 | ftrace_ops_init(ops); |
4028 | |
4029 | if (unlikely(ftrace_disabled)) |
4030 | return -ENODEV; |
4031 | |
4032 | if (tracing_check_open_get_tr(tr)) |
4033 | return -ENODEV; |
4034 | |
4035 | iter = kzalloc(size: sizeof(*iter), GFP_KERNEL); |
4036 | if (!iter) |
4037 | goto out; |
4038 | |
4039 | if (trace_parser_get_init(parser: &iter->parser, FTRACE_BUFF_MAX)) |
4040 | goto out; |
4041 | |
4042 | iter->ops = ops; |
4043 | iter->flags = flag; |
4044 | iter->tr = tr; |
4045 | |
4046 | mutex_lock(&ops->func_hash->regex_lock); |
4047 | |
4048 | if (flag & FTRACE_ITER_NOTRACE) { |
4049 | hash = ops->func_hash->notrace_hash; |
4050 | mod_head = tr ? &tr->mod_notrace : NULL; |
4051 | } else { |
4052 | hash = ops->func_hash->filter_hash; |
4053 | mod_head = tr ? &tr->mod_trace : NULL; |
4054 | } |
4055 | |
4056 | iter->mod_list = mod_head; |
4057 | |
4058 | if (file->f_mode & FMODE_WRITE) { |
4059 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; |
4060 | |
4061 | if (file->f_flags & O_TRUNC) { |
4062 | iter->hash = alloc_ftrace_hash(size_bits); |
4063 | clear_ftrace_mod_list(head: mod_head); |
4064 | } else { |
4065 | iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); |
4066 | } |
4067 | |
4068 | if (!iter->hash) { |
4069 | trace_parser_put(parser: &iter->parser); |
4070 | goto out_unlock; |
4071 | } |
4072 | } else |
4073 | iter->hash = hash; |
4074 | |
4075 | ret = 0; |
4076 | |
4077 | if (file->f_mode & FMODE_READ) { |
4078 | iter->pg = ftrace_pages_start; |
4079 | |
4080 | ret = seq_open(file, &show_ftrace_seq_ops); |
4081 | if (!ret) { |
4082 | struct seq_file *m = file->private_data; |
4083 | m->private = iter; |
4084 | } else { |
4085 | /* Failed */ |
4086 | free_ftrace_hash(hash: iter->hash); |
4087 | trace_parser_put(parser: &iter->parser); |
4088 | } |
4089 | } else |
4090 | file->private_data = iter; |
4091 | |
4092 | out_unlock: |
4093 | mutex_unlock(lock: &ops->func_hash->regex_lock); |
4094 | |
4095 | out: |
4096 | if (ret) { |
4097 | kfree(objp: iter); |
4098 | if (tr) |
4099 | trace_array_put(tr); |
4100 | } |
4101 | |
4102 | return ret; |
4103 | } |
4104 | |
4105 | static int |
4106 | ftrace_filter_open(struct inode *inode, struct file *file) |
4107 | { |
4108 | struct ftrace_ops *ops = inode->i_private; |
4109 | |
4110 | /* Checks for tracefs lockdown */ |
4111 | return ftrace_regex_open(ops, |
4112 | flag: FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES, |
4113 | inode, file); |
4114 | } |
4115 | |
4116 | static int |
4117 | ftrace_notrace_open(struct inode *inode, struct file *file) |
4118 | { |
4119 | struct ftrace_ops *ops = inode->i_private; |
4120 | |
4121 | /* Checks for tracefs lockdown */ |
4122 | return ftrace_regex_open(ops, flag: FTRACE_ITER_NOTRACE, |
4123 | inode, file); |
4124 | } |
4125 | |
4126 | /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */ |
4127 | struct ftrace_glob { |
4128 | char *search; |
4129 | unsigned len; |
4130 | int type; |
4131 | }; |
4132 | |
4133 | /* |
4134 | * If symbols in an architecture don't correspond exactly to the user-visible |
4135 | * name of what they represent, it is possible to define this function to |
4136 | * perform the necessary adjustments. |
4137 | */ |
4138 | char * __weak arch_ftrace_match_adjust(char *str, const char *search) |
4139 | { |
4140 | return str; |
4141 | } |
4142 | |
4143 | static int ftrace_match(char *str, struct ftrace_glob *g) |
4144 | { |
4145 | int matched = 0; |
4146 | int slen; |
4147 | |
4148 | str = arch_ftrace_match_adjust(str, search: g->search); |
4149 | |
4150 | switch (g->type) { |
4151 | case MATCH_FULL: |
4152 | if (strcmp(str, g->search) == 0) |
4153 | matched = 1; |
4154 | break; |
4155 | case MATCH_FRONT_ONLY: |
4156 | if (strncmp(str, g->search, g->len) == 0) |
4157 | matched = 1; |
4158 | break; |
4159 | case MATCH_MIDDLE_ONLY: |
4160 | if (strstr(str, g->search)) |
4161 | matched = 1; |
4162 | break; |
4163 | case MATCH_END_ONLY: |
4164 | slen = strlen(str); |
4165 | if (slen >= g->len && |
4166 | memcmp(p: str + slen - g->len, q: g->search, size: g->len) == 0) |
4167 | matched = 1; |
4168 | break; |
4169 | case MATCH_GLOB: |
4170 | if (glob_match(pat: g->search, str)) |
4171 | matched = 1; |
4172 | break; |
4173 | } |
4174 | |
4175 | return matched; |
4176 | } |
4177 | |
4178 | static int |
4179 | enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter) |
4180 | { |
4181 | struct ftrace_func_entry *entry; |
4182 | int ret = 0; |
4183 | |
4184 | entry = ftrace_lookup_ip(hash, ip: rec->ip); |
4185 | if (clear_filter) { |
4186 | /* Do nothing if it doesn't exist */ |
4187 | if (!entry) |
4188 | return 0; |
4189 | |
4190 | free_hash_entry(hash, entry); |
4191 | } else { |
4192 | /* Do nothing if it exists */ |
4193 | if (entry) |
4194 | return 0; |
4195 | if (add_hash_entry(hash, ip: rec->ip) == NULL) |
4196 | ret = -ENOMEM; |
4197 | } |
4198 | return ret; |
4199 | } |
4200 | |
4201 | static int |
4202 | add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g, |
4203 | int clear_filter) |
4204 | { |
4205 | long index = simple_strtoul(func_g->search, NULL, 0); |
4206 | struct ftrace_page *pg; |
4207 | struct dyn_ftrace *rec; |
4208 | |
4209 | /* The index starts at 1 */ |
4210 | if (--index < 0) |
4211 | return 0; |
4212 | |
4213 | do_for_each_ftrace_rec(pg, rec) { |
4214 | if (pg->index <= index) { |
4215 | index -= pg->index; |
4216 | /* this is a double loop, break goes to the next page */ |
4217 | break; |
4218 | } |
4219 | rec = &pg->records[index]; |
4220 | enter_record(hash, rec, clear_filter); |
4221 | return 1; |
4222 | } while_for_each_ftrace_rec(); |
4223 | return 0; |
4224 | } |
4225 | |
4226 | #ifdef FTRACE_MCOUNT_MAX_OFFSET |
4227 | static int lookup_ip(unsigned long ip, char **modname, char *str) |
4228 | { |
4229 | unsigned long offset; |
4230 | |
4231 | kallsyms_lookup(addr: ip, NULL, offset: &offset, modname, namebuf: str); |
4232 | if (offset > FTRACE_MCOUNT_MAX_OFFSET) |
4233 | return -1; |
4234 | return 0; |
4235 | } |
4236 | #else |
4237 | static int lookup_ip(unsigned long ip, char **modname, char *str) |
4238 | { |
4239 | kallsyms_lookup(ip, NULL, NULL, modname, str); |
4240 | return 0; |
4241 | } |
4242 | #endif |
4243 | |
4244 | static int |
4245 | ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g, |
4246 | struct ftrace_glob *mod_g, int exclude_mod) |
4247 | { |
4248 | char str[KSYM_SYMBOL_LEN]; |
4249 | char *modname; |
4250 | |
4251 | if (lookup_ip(ip: rec->ip, modname: &modname, str)) { |
4252 | /* This should only happen when a rec is disabled */ |
4253 | WARN_ON_ONCE(system_state == SYSTEM_RUNNING && |
4254 | !(rec->flags & FTRACE_FL_DISABLED)); |
4255 | return 0; |
4256 | } |
4257 | |
4258 | if (mod_g) { |
4259 | int mod_matches = (modname) ? ftrace_match(str: modname, g: mod_g) : 0; |
4260 | |
4261 | /* blank module name to match all modules */ |
4262 | if (!mod_g->len) { |
4263 | /* blank module globbing: modname xor exclude_mod */ |
4264 | if (!exclude_mod != !modname) |
4265 | goto func_match; |
4266 | return 0; |
4267 | } |
4268 | |
4269 | /* |
4270 | * exclude_mod is set to trace everything but the given |
4271 | * module. If it is set and the module matches, then |
4272 | * return 0. If it is not set, and the module doesn't match |
4273 | * also return 0. Otherwise, check the function to see if |
4274 | * that matches. |
4275 | */ |
4276 | if (!mod_matches == !exclude_mod) |
4277 | return 0; |
4278 | func_match: |
4279 | /* blank search means to match all funcs in the mod */ |
4280 | if (!func_g->len) |
4281 | return 1; |
4282 | } |
4283 | |
4284 | return ftrace_match(str, g: func_g); |
4285 | } |
4286 | |
4287 | static int |
4288 | match_records(struct ftrace_hash *hash, char *func, int len, char *mod) |
4289 | { |
4290 | struct ftrace_page *pg; |
4291 | struct dyn_ftrace *rec; |
4292 | struct ftrace_glob func_g = { .type = MATCH_FULL }; |
4293 | struct ftrace_glob mod_g = { .type = MATCH_FULL }; |
4294 | struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL; |
4295 | int exclude_mod = 0; |
4296 | int found = 0; |
4297 | int ret; |
4298 | int clear_filter = 0; |
4299 | |
4300 | if (func) { |
4301 | func_g.type = filter_parse_regex(buff: func, len, search: &func_g.search, |
4302 | not: &clear_filter); |
4303 | func_g.len = strlen(func_g.search); |
4304 | } |
4305 | |
4306 | if (mod) { |
4307 | mod_g.type = filter_parse_regex(buff: mod, strlen(mod), |
4308 | search: &mod_g.search, not: &exclude_mod); |
4309 | mod_g.len = strlen(mod_g.search); |
4310 | } |
4311 | |
4312 | mutex_lock(&ftrace_lock); |
4313 | |
4314 | if (unlikely(ftrace_disabled)) |
4315 | goto out_unlock; |
4316 | |
4317 | if (func_g.type == MATCH_INDEX) { |
4318 | found = add_rec_by_index(hash, func_g: &func_g, clear_filter); |
4319 | goto out_unlock; |
4320 | } |
4321 | |
4322 | do_for_each_ftrace_rec(pg, rec) { |
4323 | |
4324 | if (rec->flags & FTRACE_FL_DISABLED) |
4325 | continue; |
4326 | |
4327 | if (ftrace_match_record(rec, func_g: &func_g, mod_g: mod_match, exclude_mod)) { |
4328 | ret = enter_record(hash, rec, clear_filter); |
4329 | if (ret < 0) { |
4330 | found = ret; |
4331 | goto out_unlock; |
4332 | } |
4333 | found = 1; |
4334 | } |
4335 | cond_resched(); |
4336 | } while_for_each_ftrace_rec(); |
4337 | out_unlock: |
4338 | mutex_unlock(lock: &ftrace_lock); |
4339 | |
4340 | return found; |
4341 | } |
4342 | |
4343 | static int |
4344 | ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) |
4345 | { |
4346 | return match_records(hash, func: buff, len, NULL); |
4347 | } |
4348 | |
4349 | static void ftrace_ops_update_code(struct ftrace_ops *ops, |
4350 | struct ftrace_ops_hash *old_hash) |
4351 | { |
4352 | struct ftrace_ops *op; |
4353 | |
4354 | if (!ftrace_enabled) |
4355 | return; |
4356 | |
4357 | if (ops->flags & FTRACE_OPS_FL_ENABLED) { |
4358 | ftrace_run_modify_code(ops, command: FTRACE_UPDATE_CALLS, old_hash); |
4359 | return; |
4360 | } |
4361 | |
4362 | /* |
4363 | * If this is the shared global_ops filter, then we need to |
4364 | * check if there is another ops that shares it, is enabled. |
4365 | * If so, we still need to run the modify code. |
4366 | */ |
4367 | if (ops->func_hash != &global_ops.local_hash) |
4368 | return; |
4369 | |
4370 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
4371 | if (op->func_hash == &global_ops.local_hash && |
4372 | op->flags & FTRACE_OPS_FL_ENABLED) { |
4373 | ftrace_run_modify_code(ops: op, command: FTRACE_UPDATE_CALLS, old_hash); |
4374 | /* Only need to do this once */ |
4375 | return; |
4376 | } |
4377 | } while_for_each_ftrace_op(op); |
4378 | } |
4379 | |
4380 | static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, |
4381 | struct ftrace_hash **orig_hash, |
4382 | struct ftrace_hash *hash, |
4383 | int enable) |
4384 | { |
4385 | struct ftrace_ops_hash old_hash_ops; |
4386 | struct ftrace_hash *old_hash; |
4387 | int ret; |
4388 | |
4389 | old_hash = *orig_hash; |
4390 | old_hash_ops.filter_hash = ops->func_hash->filter_hash; |
4391 | old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; |
4392 | ret = ftrace_hash_move(ops, enable, dst: orig_hash, src: hash); |
4393 | if (!ret) { |
4394 | ftrace_ops_update_code(ops, old_hash: &old_hash_ops); |
4395 | free_ftrace_hash_rcu(hash: old_hash); |
4396 | } |
4397 | return ret; |
4398 | } |
4399 | |
4400 | static bool module_exists(const char *module) |
4401 | { |
4402 | /* All modules have the symbol __this_module */ |
4403 | static const char this_mod[] = "__this_module"; |
4404 | char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; |
4405 | unsigned long val; |
4406 | int n; |
4407 | |
4408 | n = snprintf(buf: modname, size: sizeof(modname), fmt: "%s:%s", module, this_mod); |
4409 | |
4410 | if (n > sizeof(modname) - 1) |
4411 | return false; |
4412 | |
4413 | val = module_kallsyms_lookup_name(name: modname); |
4414 | return val != 0; |
4415 | } |
4416 | |
4417 | static int cache_mod(struct trace_array *tr, |
4418 | const char *func, char *module, int enable) |
4419 | { |
4420 | struct ftrace_mod_load *ftrace_mod, *n; |
4421 | struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace; |
4422 | int ret; |
4423 | |
4424 | mutex_lock(&ftrace_lock); |
4425 | |
4426 | /* We do not cache inverse filters */ |
4427 | if (func[0] == '!') { |
4428 | func++; |
4429 | ret = -EINVAL; |
4430 | |
4431 | /* Look to remove this hash */ |
4432 | list_for_each_entry_safe(ftrace_mod, n, head, list) { |
4433 | if (strcmp(ftrace_mod->module, module) != 0) |
4434 | continue; |
4435 | |
4436 | /* no func matches all */ |
4437 | if (strcmp(func, "*") == 0 || |
4438 | (ftrace_mod->func && |
4439 | strcmp(ftrace_mod->func, func) == 0)) { |
4440 | ret = 0; |
4441 | free_ftrace_mod(ftrace_mod); |
4442 | continue; |
4443 | } |
4444 | } |
4445 | goto out; |
4446 | } |
4447 | |
4448 | ret = -EINVAL; |
4449 | /* We only care about modules that have not been loaded yet */ |
4450 | if (module_exists(module)) |
4451 | goto out; |
4452 | |
4453 | /* Save this string off, and execute it when the module is loaded */ |
4454 | ret = ftrace_add_mod(tr, func, module, enable); |
4455 | out: |
4456 | mutex_unlock(lock: &ftrace_lock); |
4457 | |
4458 | return ret; |
4459 | } |
4460 | |
4461 | static int |
4462 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, |
4463 | int reset, int enable); |
4464 | |
4465 | #ifdef CONFIG_MODULES |
4466 | static void process_mod_list(struct list_head *head, struct ftrace_ops *ops, |
4467 | char *mod, bool enable) |
4468 | { |
4469 | struct ftrace_mod_load *ftrace_mod, *n; |
4470 | struct ftrace_hash **orig_hash, *new_hash; |
4471 | LIST_HEAD(process_mods); |
4472 | char *func; |
4473 | |
4474 | mutex_lock(&ops->func_hash->regex_lock); |
4475 | |
4476 | if (enable) |
4477 | orig_hash = &ops->func_hash->filter_hash; |
4478 | else |
4479 | orig_hash = &ops->func_hash->notrace_hash; |
4480 | |
4481 | new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, |
4482 | hash: *orig_hash); |
4483 | if (!new_hash) |
4484 | goto out; /* warn? */ |
4485 | |
4486 | mutex_lock(&ftrace_lock); |
4487 | |
4488 | list_for_each_entry_safe(ftrace_mod, n, head, list) { |
4489 | |
4490 | if (strcmp(ftrace_mod->module, mod) != 0) |
4491 | continue; |
4492 | |
4493 | if (ftrace_mod->func) |
4494 | func = kstrdup(s: ftrace_mod->func, GFP_KERNEL); |
4495 | else |
4496 | func = kstrdup(s: "*", GFP_KERNEL); |
4497 | |
4498 | if (!func) /* warn? */ |
4499 | continue; |
4500 | |
4501 | list_move(list: &ftrace_mod->list, head: &process_mods); |
4502 | |
4503 | /* Use the newly allocated func, as it may be "*" */ |
4504 | kfree(objp: ftrace_mod->func); |
4505 | ftrace_mod->func = func; |
4506 | } |
4507 | |
4508 | mutex_unlock(lock: &ftrace_lock); |
4509 | |
4510 | list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) { |
4511 | |
4512 | func = ftrace_mod->func; |
4513 | |
4514 | /* Grabs ftrace_lock, which is why we have this extra step */ |
4515 | match_records(hash: new_hash, func, strlen(func), mod); |
4516 | free_ftrace_mod(ftrace_mod); |
4517 | } |
4518 | |
4519 | if (enable && list_empty(head)) |
4520 | new_hash->flags &= ~FTRACE_HASH_FL_MOD; |
4521 | |
4522 | mutex_lock(&ftrace_lock); |
4523 | |
4524 | ftrace_hash_move_and_update_ops(ops, orig_hash, |
4525 | hash: new_hash, enable); |
4526 | mutex_unlock(lock: &ftrace_lock); |
4527 | |
4528 | out: |
4529 | mutex_unlock(lock: &ops->func_hash->regex_lock); |
4530 | |
4531 | free_ftrace_hash(hash: new_hash); |
4532 | } |
4533 | |
4534 | static void process_cached_mods(const char *mod_name) |
4535 | { |
4536 | struct trace_array *tr; |
4537 | char *mod; |
4538 | |
4539 | mod = kstrdup(s: mod_name, GFP_KERNEL); |
4540 | if (!mod) |
4541 | return; |
4542 | |
4543 | mutex_lock(&trace_types_lock); |
4544 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
4545 | if (!list_empty(head: &tr->mod_trace)) |
4546 | process_mod_list(head: &tr->mod_trace, ops: tr->ops, mod, enable: true); |
4547 | if (!list_empty(head: &tr->mod_notrace)) |
4548 | process_mod_list(head: &tr->mod_notrace, ops: tr->ops, mod, enable: false); |
4549 | } |
4550 | mutex_unlock(lock: &trace_types_lock); |
4551 | |
4552 | kfree(objp: mod); |
4553 | } |
4554 | #endif |
4555 | |
4556 | /* |
4557 | * We register the module command as a template to show others how |
4558 | * to register the a command as well. |
4559 | */ |
4560 | |
4561 | static int |
4562 | ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, |
4563 | char *func_orig, char *cmd, char *module, int enable) |
4564 | { |
4565 | char *func; |
4566 | int ret; |
4567 | |
4568 | /* match_records() modifies func, and we need the original */ |
4569 | func = kstrdup(s: func_orig, GFP_KERNEL); |
4570 | if (!func) |
4571 | return -ENOMEM; |
4572 | |
4573 | /* |
4574 | * cmd == 'mod' because we only registered this func |
4575 | * for the 'mod' ftrace_func_command. |
4576 | * But if you register one func with multiple commands, |
4577 | * you can tell which command was used by the cmd |
4578 | * parameter. |
4579 | */ |
4580 | ret = match_records(hash, func, strlen(func), mod: module); |
4581 | kfree(objp: func); |
4582 | |
4583 | if (!ret) |
4584 | return cache_mod(tr, func: func_orig, module, enable); |
4585 | if (ret < 0) |
4586 | return ret; |
4587 | return 0; |
4588 | } |
4589 | |
4590 | static struct ftrace_func_command ftrace_mod_cmd = { |
4591 | .name = "mod", |
4592 | .func = ftrace_mod_callback, |
4593 | }; |
4594 | |
4595 | static int __init ftrace_mod_cmd_init(void) |
4596 | { |
4597 | return register_ftrace_command(cmd: &ftrace_mod_cmd); |
4598 | } |
4599 | core_initcall(ftrace_mod_cmd_init); |
4600 | |
4601 | static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, |
4602 | struct ftrace_ops *op, struct ftrace_regs *fregs) |
4603 | { |
4604 | struct ftrace_probe_ops *probe_ops; |
4605 | struct ftrace_func_probe *probe; |
4606 | |
4607 | probe = container_of(op, struct ftrace_func_probe, ops); |
4608 | probe_ops = probe->probe_ops; |
4609 | |
4610 | /* |
4611 | * Disable preemption for these calls to prevent a RCU grace |
4612 | * period. This syncs the hash iteration and freeing of items |
4613 | * on the hash. rcu_read_lock is too dangerous here. |
4614 | */ |
4615 | preempt_disable_notrace(); |
4616 | probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data); |
4617 | preempt_enable_notrace(); |
4618 | } |
4619 | |
4620 | struct ftrace_func_map { |
4621 | struct ftrace_func_entry entry; |
4622 | void *data; |
4623 | }; |
4624 | |
4625 | struct ftrace_func_mapper { |
4626 | struct ftrace_hash hash; |
4627 | }; |
4628 | |
4629 | /** |
4630 | * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper |
4631 | * |
4632 | * Returns: a ftrace_func_mapper descriptor that can be used to map ips to data. |
4633 | */ |
4634 | struct ftrace_func_mapper *allocate_ftrace_func_mapper(void) |
4635 | { |
4636 | struct ftrace_hash *hash; |
4637 | |
4638 | /* |
4639 | * The mapper is simply a ftrace_hash, but since the entries |
4640 | * in the hash are not ftrace_func_entry type, we define it |
4641 | * as a separate structure. |
4642 | */ |
4643 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); |
4644 | return (struct ftrace_func_mapper *)hash; |
4645 | } |
4646 | |
4647 | /** |
4648 | * ftrace_func_mapper_find_ip - Find some data mapped to an ip |
4649 | * @mapper: The mapper that has the ip maps |
4650 | * @ip: the instruction pointer to find the data for |
4651 | * |
4652 | * Returns: the data mapped to @ip if found otherwise NULL. The return |
4653 | * is actually the address of the mapper data pointer. The address is |
4654 | * returned for use cases where the data is no bigger than a long, and |
4655 | * the user can use the data pointer as its data instead of having to |
4656 | * allocate more memory for the reference. |
4657 | */ |
4658 | void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, |
4659 | unsigned long ip) |
4660 | { |
4661 | struct ftrace_func_entry *entry; |
4662 | struct ftrace_func_map *map; |
4663 | |
4664 | entry = ftrace_lookup_ip(hash: &mapper->hash, ip); |
4665 | if (!entry) |
4666 | return NULL; |
4667 | |
4668 | map = (struct ftrace_func_map *)entry; |
4669 | return &map->data; |
4670 | } |
4671 | |
4672 | /** |
4673 | * ftrace_func_mapper_add_ip - Map some data to an ip |
4674 | * @mapper: The mapper that has the ip maps |
4675 | * @ip: The instruction pointer address to map @data to |
4676 | * @data: The data to map to @ip |
4677 | * |
4678 | * Returns: 0 on success otherwise an error. |
4679 | */ |
4680 | int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, |
4681 | unsigned long ip, void *data) |
4682 | { |
4683 | struct ftrace_func_entry *entry; |
4684 | struct ftrace_func_map *map; |
4685 | |
4686 | entry = ftrace_lookup_ip(hash: &mapper->hash, ip); |
4687 | if (entry) |
4688 | return -EBUSY; |
4689 | |
4690 | map = kmalloc(size: sizeof(*map), GFP_KERNEL); |
4691 | if (!map) |
4692 | return -ENOMEM; |
4693 | |
4694 | map->entry.ip = ip; |
4695 | map->data = data; |
4696 | |
4697 | __add_hash_entry(hash: &mapper->hash, entry: &map->entry); |
4698 | |
4699 | return 0; |
4700 | } |
4701 | |
4702 | /** |
4703 | * ftrace_func_mapper_remove_ip - Remove an ip from the mapping |
4704 | * @mapper: The mapper that has the ip maps |
4705 | * @ip: The instruction pointer address to remove the data from |
4706 | * |
4707 | * Returns: the data if it is found, otherwise NULL. |
4708 | * Note, if the data pointer is used as the data itself, (see |
4709 | * ftrace_func_mapper_find_ip(), then the return value may be meaningless, |
4710 | * if the data pointer was set to zero. |
4711 | */ |
4712 | void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, |
4713 | unsigned long ip) |
4714 | { |
4715 | struct ftrace_func_entry *entry; |
4716 | struct ftrace_func_map *map; |
4717 | void *data; |
4718 | |
4719 | entry = ftrace_lookup_ip(hash: &mapper->hash, ip); |
4720 | if (!entry) |
4721 | return NULL; |
4722 | |
4723 | map = (struct ftrace_func_map *)entry; |
4724 | data = map->data; |
4725 | |
4726 | remove_hash_entry(hash: &mapper->hash, entry); |
4727 | kfree(objp: entry); |
4728 | |
4729 | return data; |
4730 | } |
4731 | |
4732 | /** |
4733 | * free_ftrace_func_mapper - free a mapping of ips and data |
4734 | * @mapper: The mapper that has the ip maps |
4735 | * @free_func: A function to be called on each data item. |
4736 | * |
4737 | * This is used to free the function mapper. The @free_func is optional |
4738 | * and can be used if the data needs to be freed as well. |
4739 | */ |
4740 | void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, |
4741 | ftrace_mapper_func free_func) |
4742 | { |
4743 | struct ftrace_func_entry *entry; |
4744 | struct ftrace_func_map *map; |
4745 | struct hlist_head *hhd; |
4746 | int size, i; |
4747 | |
4748 | if (!mapper) |
4749 | return; |
4750 | |
4751 | if (free_func && mapper->hash.count) { |
4752 | size = 1 << mapper->hash.size_bits; |
4753 | for (i = 0; i < size; i++) { |
4754 | hhd = &mapper->hash.buckets[i]; |
4755 | hlist_for_each_entry(entry, hhd, hlist) { |
4756 | map = (struct ftrace_func_map *)entry; |
4757 | free_func(map); |
4758 | } |
4759 | } |
4760 | } |
4761 | free_ftrace_hash(hash: &mapper->hash); |
4762 | } |
4763 | |
4764 | static void release_probe(struct ftrace_func_probe *probe) |
4765 | { |
4766 | struct ftrace_probe_ops *probe_ops; |
4767 | |
4768 | mutex_lock(&ftrace_lock); |
4769 | |
4770 | WARN_ON(probe->ref <= 0); |
4771 | |
4772 | /* Subtract the ref that was used to protect this instance */ |
4773 | probe->ref--; |
4774 | |
4775 | if (!probe->ref) { |
4776 | probe_ops = probe->probe_ops; |
4777 | /* |
4778 | * Sending zero as ip tells probe_ops to free |
4779 | * the probe->data itself |
4780 | */ |
4781 | if (probe_ops->free) |
4782 | probe_ops->free(probe_ops, probe->tr, 0, probe->data); |
4783 | list_del(entry: &probe->list); |
4784 | kfree(objp: probe); |
4785 | } |
4786 | mutex_unlock(lock: &ftrace_lock); |
4787 | } |
4788 | |
4789 | static void acquire_probe_locked(struct ftrace_func_probe *probe) |
4790 | { |
4791 | /* |
4792 | * Add one ref to keep it from being freed when releasing the |
4793 | * ftrace_lock mutex. |
4794 | */ |
4795 | probe->ref++; |
4796 | } |
4797 | |
4798 | int |
4799 | register_ftrace_function_probe(char *glob, struct trace_array *tr, |
4800 | struct ftrace_probe_ops *probe_ops, |
4801 | void *data) |
4802 | { |
4803 | struct ftrace_func_probe *probe = NULL, *iter; |
4804 | struct ftrace_func_entry *entry; |
4805 | struct ftrace_hash **orig_hash; |
4806 | struct ftrace_hash *old_hash; |
4807 | struct ftrace_hash *hash; |
4808 | int count = 0; |
4809 | int size; |
4810 | int ret; |
4811 | int i; |
4812 | |
4813 | if (WARN_ON(!tr)) |
4814 | return -EINVAL; |
4815 | |
4816 | /* We do not support '!' for function probes */ |
4817 | if (WARN_ON(glob[0] == '!')) |
4818 | return -EINVAL; |
4819 | |
4820 | |
4821 | mutex_lock(&ftrace_lock); |
4822 | /* Check if the probe_ops is already registered */ |
4823 | list_for_each_entry(iter, &tr->func_probes, list) { |
4824 | if (iter->probe_ops == probe_ops) { |
4825 | probe = iter; |
4826 | break; |
4827 | } |
4828 | } |
4829 | if (!probe) { |
4830 | probe = kzalloc(size: sizeof(*probe), GFP_KERNEL); |
4831 | if (!probe) { |
4832 | mutex_unlock(lock: &ftrace_lock); |
4833 | return -ENOMEM; |
4834 | } |
4835 | probe->probe_ops = probe_ops; |
4836 | probe->ops.func = function_trace_probe_call; |
4837 | probe->tr = tr; |
4838 | ftrace_ops_init(ops: &probe->ops); |
4839 | list_add(new: &probe->list, head: &tr->func_probes); |
4840 | } |
4841 | |
4842 | acquire_probe_locked(probe); |
4843 | |
4844 | mutex_unlock(lock: &ftrace_lock); |
4845 | |
4846 | /* |
4847 | * Note, there's a small window here that the func_hash->filter_hash |
4848 | * may be NULL or empty. Need to be careful when reading the loop. |
4849 | */ |
4850 | mutex_lock(&probe->ops.func_hash->regex_lock); |
4851 | |
4852 | orig_hash = &probe->ops.func_hash->filter_hash; |
4853 | old_hash = *orig_hash; |
4854 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash: old_hash); |
4855 | |
4856 | if (!hash) { |
4857 | ret = -ENOMEM; |
4858 | goto out; |
4859 | } |
4860 | |
4861 | ret = ftrace_match_records(hash, buff: glob, strlen(glob)); |
4862 | |
4863 | /* Nothing found? */ |
4864 | if (!ret) |
4865 | ret = -EINVAL; |
4866 | |
4867 | if (ret < 0) |
4868 | goto out; |
4869 | |
4870 | size = 1 << hash->size_bits; |
4871 | for (i = 0; i < size; i++) { |
4872 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
4873 | if (ftrace_lookup_ip(hash: old_hash, ip: entry->ip)) |
4874 | continue; |
4875 | /* |
4876 | * The caller might want to do something special |
4877 | * for each function we find. We call the callback |
4878 | * to give the caller an opportunity to do so. |
4879 | */ |
4880 | if (probe_ops->init) { |
4881 | ret = probe_ops->init(probe_ops, tr, |
4882 | entry->ip, data, |
4883 | &probe->data); |
4884 | if (ret < 0) { |
4885 | if (probe_ops->free && count) |
4886 | probe_ops->free(probe_ops, tr, |
4887 | 0, probe->data); |
4888 | probe->data = NULL; |
4889 | goto out; |
4890 | } |
4891 | } |
4892 | count++; |
4893 | } |
4894 | } |
4895 | |
4896 | mutex_lock(&ftrace_lock); |
4897 | |
4898 | if (!count) { |
4899 | /* Nothing was added? */ |
4900 | ret = -EINVAL; |
4901 | goto out_unlock; |
4902 | } |
4903 | |
4904 | ret = ftrace_hash_move_and_update_ops(ops: &probe->ops, orig_hash, |
4905 | hash, enable: 1); |
4906 | if (ret < 0) |
4907 | goto err_unlock; |
4908 | |
4909 | /* One ref for each new function traced */ |
4910 | probe->ref += count; |
4911 | |
4912 | if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED)) |
4913 | ret = ftrace_startup(ops: &probe->ops, command: 0); |
4914 | |
4915 | out_unlock: |
4916 | mutex_unlock(lock: &ftrace_lock); |
4917 | |
4918 | if (!ret) |
4919 | ret = count; |
4920 | out: |
4921 | mutex_unlock(lock: &probe->ops.func_hash->regex_lock); |
4922 | free_ftrace_hash(hash); |
4923 | |
4924 | release_probe(probe); |
4925 | |
4926 | return ret; |
4927 | |
4928 | err_unlock: |
4929 | if (!probe_ops->free || !count) |
4930 | goto out_unlock; |
4931 | |
4932 | /* Failed to do the move, need to call the free functions */ |
4933 | for (i = 0; i < size; i++) { |
4934 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
4935 | if (ftrace_lookup_ip(hash: old_hash, ip: entry->ip)) |
4936 | continue; |
4937 | probe_ops->free(probe_ops, tr, entry->ip, probe->data); |
4938 | } |
4939 | } |
4940 | goto out_unlock; |
4941 | } |
4942 | |
4943 | int |
4944 | unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, |
4945 | struct ftrace_probe_ops *probe_ops) |
4946 | { |
4947 | struct ftrace_func_probe *probe = NULL, *iter; |
4948 | struct ftrace_ops_hash old_hash_ops; |
4949 | struct ftrace_func_entry *entry; |
4950 | struct ftrace_glob func_g; |
4951 | struct ftrace_hash **orig_hash; |
4952 | struct ftrace_hash *old_hash; |
4953 | struct ftrace_hash *hash = NULL; |
4954 | struct hlist_node *tmp; |
4955 | struct hlist_head hhd; |
4956 | char str[KSYM_SYMBOL_LEN]; |
4957 | int count = 0; |
4958 | int i, ret = -ENODEV; |
4959 | int size; |
4960 | |
4961 | if (!glob || !strlen(glob) || !strcmp(glob, "*")) |
4962 | func_g.search = NULL; |
4963 | else { |
4964 | int not; |
4965 | |
4966 | func_g.type = filter_parse_regex(buff: glob, strlen(glob), |
4967 | search: &func_g.search, not: ¬); |
4968 | func_g.len = strlen(func_g.search); |
4969 | |
4970 | /* we do not support '!' for function probes */ |
4971 | if (WARN_ON(not)) |
4972 | return -EINVAL; |
4973 | } |
4974 | |
4975 | mutex_lock(&ftrace_lock); |
4976 | /* Check if the probe_ops is already registered */ |
4977 | list_for_each_entry(iter, &tr->func_probes, list) { |
4978 | if (iter->probe_ops == probe_ops) { |
4979 | probe = iter; |
4980 | break; |
4981 | } |
4982 | } |
4983 | if (!probe) |
4984 | goto err_unlock_ftrace; |
4985 | |
4986 | ret = -EINVAL; |
4987 | if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED)) |
4988 | goto err_unlock_ftrace; |
4989 | |
4990 | acquire_probe_locked(probe); |
4991 | |
4992 | mutex_unlock(lock: &ftrace_lock); |
4993 | |
4994 | mutex_lock(&probe->ops.func_hash->regex_lock); |
4995 | |
4996 | orig_hash = &probe->ops.func_hash->filter_hash; |
4997 | old_hash = *orig_hash; |
4998 | |
4999 | if (ftrace_hash_empty(hash: old_hash)) |
5000 | goto out_unlock; |
5001 | |
5002 | old_hash_ops.filter_hash = old_hash; |
5003 | /* Probes only have filters */ |
5004 | old_hash_ops.notrace_hash = NULL; |
5005 | |
5006 | ret = -ENOMEM; |
5007 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash: old_hash); |
5008 | if (!hash) |
5009 | goto out_unlock; |
5010 | |
5011 | INIT_HLIST_HEAD(&hhd); |
5012 | |
5013 | size = 1 << hash->size_bits; |
5014 | for (i = 0; i < size; i++) { |
5015 | hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { |
5016 | |
5017 | if (func_g.search) { |
5018 | kallsyms_lookup(addr: entry->ip, NULL, NULL, |
5019 | NULL, namebuf: str); |
5020 | if (!ftrace_match(str, g: &func_g)) |
5021 | continue; |
5022 | } |
5023 | count++; |
5024 | remove_hash_entry(hash, entry); |
5025 | hlist_add_head(n: &entry->hlist, h: &hhd); |
5026 | } |
5027 | } |
5028 | |
5029 | /* Nothing found? */ |
5030 | if (!count) { |
5031 | ret = -EINVAL; |
5032 | goto out_unlock; |
5033 | } |
5034 | |
5035 | mutex_lock(&ftrace_lock); |
5036 | |
5037 | WARN_ON(probe->ref < count); |
5038 | |
5039 | probe->ref -= count; |
5040 | |
5041 | if (ftrace_hash_empty(hash)) |
5042 | ftrace_shutdown(ops: &probe->ops, command: 0); |
5043 | |
5044 | ret = ftrace_hash_move_and_update_ops(ops: &probe->ops, orig_hash, |
5045 | hash, enable: 1); |
5046 | |
5047 | /* still need to update the function call sites */ |
5048 | if (ftrace_enabled && !ftrace_hash_empty(hash)) |
5049 | ftrace_run_modify_code(ops: &probe->ops, command: FTRACE_UPDATE_CALLS, |
5050 | old_hash: &old_hash_ops); |
5051 | synchronize_rcu(); |
5052 | |
5053 | hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { |
5054 | hlist_del(n: &entry->hlist); |
5055 | if (probe_ops->free) |
5056 | probe_ops->free(probe_ops, tr, entry->ip, probe->data); |
5057 | kfree(objp: entry); |
5058 | } |
5059 | mutex_unlock(lock: &ftrace_lock); |
5060 | |
5061 | out_unlock: |
5062 | mutex_unlock(lock: &probe->ops.func_hash->regex_lock); |
5063 | free_ftrace_hash(hash); |
5064 | |
5065 | release_probe(probe); |
5066 | |
5067 | return ret; |
5068 | |
5069 | err_unlock_ftrace: |
5070 | mutex_unlock(lock: &ftrace_lock); |
5071 | return ret; |
5072 | } |
5073 | |
5074 | void clear_ftrace_function_probes(struct trace_array *tr) |
5075 | { |
5076 | struct ftrace_func_probe *probe, *n; |
5077 | |
5078 | list_for_each_entry_safe(probe, n, &tr->func_probes, list) |
5079 | unregister_ftrace_function_probe_func(NULL, tr, probe_ops: probe->probe_ops); |
5080 | } |
5081 | |
5082 | static LIST_HEAD(ftrace_commands); |
5083 | static DEFINE_MUTEX(ftrace_cmd_mutex); |
5084 | |
5085 | /* |
5086 | * Currently we only register ftrace commands from __init, so mark this |
5087 | * __init too. |
5088 | */ |
5089 | __init int register_ftrace_command(struct ftrace_func_command *cmd) |
5090 | { |
5091 | struct ftrace_func_command *p; |
5092 | int ret = 0; |
5093 | |
5094 | mutex_lock(&ftrace_cmd_mutex); |
5095 | list_for_each_entry(p, &ftrace_commands, list) { |
5096 | if (strcmp(cmd->name, p->name) == 0) { |
5097 | ret = -EBUSY; |
5098 | goto out_unlock; |
5099 | } |
5100 | } |
5101 | list_add(new: &cmd->list, head: &ftrace_commands); |
5102 | out_unlock: |
5103 | mutex_unlock(lock: &ftrace_cmd_mutex); |
5104 | |
5105 | return ret; |
5106 | } |
5107 | |
5108 | /* |
5109 | * Currently we only unregister ftrace commands from __init, so mark |
5110 | * this __init too. |
5111 | */ |
5112 | __init int unregister_ftrace_command(struct ftrace_func_command *cmd) |
5113 | { |
5114 | struct ftrace_func_command *p, *n; |
5115 | int ret = -ENODEV; |
5116 | |
5117 | mutex_lock(&ftrace_cmd_mutex); |
5118 | list_for_each_entry_safe(p, n, &ftrace_commands, list) { |
5119 | if (strcmp(cmd->name, p->name) == 0) { |
5120 | ret = 0; |
5121 | list_del_init(entry: &p->list); |
5122 | goto out_unlock; |
5123 | } |
5124 | } |
5125 | out_unlock: |
5126 | mutex_unlock(lock: &ftrace_cmd_mutex); |
5127 | |
5128 | return ret; |
5129 | } |
5130 | |
5131 | static int ftrace_process_regex(struct ftrace_iterator *iter, |
5132 | char *buff, int len, int enable) |
5133 | { |
5134 | struct ftrace_hash *hash = iter->hash; |
5135 | struct trace_array *tr = iter->ops->private; |
5136 | char *func, *command, *next = buff; |
5137 | struct ftrace_func_command *p; |
5138 | int ret = -EINVAL; |
5139 | |
5140 | func = strsep(&next, ":"); |
5141 | |
5142 | if (!next) { |
5143 | ret = ftrace_match_records(hash, buff: func, len); |
5144 | if (!ret) |
5145 | ret = -EINVAL; |
5146 | if (ret < 0) |
5147 | return ret; |
5148 | return 0; |
5149 | } |
5150 | |
5151 | /* command found */ |
5152 | |
5153 | command = strsep(&next, ":"); |
5154 | |
5155 | mutex_lock(&ftrace_cmd_mutex); |
5156 | list_for_each_entry(p, &ftrace_commands, list) { |
5157 | if (strcmp(p->name, command) == 0) { |
5158 | ret = p->func(tr, hash, func, command, next, enable); |
5159 | goto out_unlock; |
5160 | } |
5161 | } |
5162 | out_unlock: |
5163 | mutex_unlock(lock: &ftrace_cmd_mutex); |
5164 | |
5165 | return ret; |
5166 | } |
5167 | |
5168 | static ssize_t |
5169 | ftrace_regex_write(struct file *file, const char __user *ubuf, |
5170 | size_t cnt, loff_t *ppos, int enable) |
5171 | { |
5172 | struct ftrace_iterator *iter; |
5173 | struct trace_parser *parser; |
5174 | ssize_t ret, read; |
5175 | |
5176 | if (!cnt) |
5177 | return 0; |
5178 | |
5179 | if (file->f_mode & FMODE_READ) { |
5180 | struct seq_file *m = file->private_data; |
5181 | iter = m->private; |
5182 | } else |
5183 | iter = file->private_data; |
5184 | |
5185 | if (unlikely(ftrace_disabled)) |
5186 | return -ENODEV; |
5187 | |
5188 | /* iter->hash is a local copy, so we don't need regex_lock */ |
5189 | |
5190 | parser = &iter->parser; |
5191 | read = trace_get_user(parser, ubuf, cnt, ppos); |
5192 | |
5193 | if (read >= 0 && trace_parser_loaded(parser) && |
5194 | !trace_parser_cont(parser)) { |
5195 | ret = ftrace_process_regex(iter, buff: parser->buffer, |
5196 | len: parser->idx, enable); |
5197 | trace_parser_clear(parser); |
5198 | if (ret < 0) |
5199 | goto out; |
5200 | } |
5201 | |
5202 | ret = read; |
5203 | out: |
5204 | return ret; |
5205 | } |
5206 | |
5207 | ssize_t |
5208 | ftrace_filter_write(struct file *file, const char __user *ubuf, |
5209 | size_t cnt, loff_t *ppos) |
5210 | { |
5211 | return ftrace_regex_write(file, ubuf, cnt, ppos, enable: 1); |
5212 | } |
5213 | |
5214 | ssize_t |
5215 | ftrace_notrace_write(struct file *file, const char __user *ubuf, |
5216 | size_t cnt, loff_t *ppos) |
5217 | { |
5218 | return ftrace_regex_write(file, ubuf, cnt, ppos, enable: 0); |
5219 | } |
5220 | |
5221 | static int |
5222 | __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) |
5223 | { |
5224 | struct ftrace_func_entry *entry; |
5225 | |
5226 | ip = ftrace_location(ip); |
5227 | if (!ip) |
5228 | return -EINVAL; |
5229 | |
5230 | if (remove) { |
5231 | entry = ftrace_lookup_ip(hash, ip); |
5232 | if (!entry) |
5233 | return -ENOENT; |
5234 | free_hash_entry(hash, entry); |
5235 | return 0; |
5236 | } |
5237 | |
5238 | entry = add_hash_entry(hash, ip); |
5239 | return entry ? 0 : -ENOMEM; |
5240 | } |
5241 | |
5242 | static int |
5243 | ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips, |
5244 | unsigned int cnt, int remove) |
5245 | { |
5246 | unsigned int i; |
5247 | int err; |
5248 | |
5249 | for (i = 0; i < cnt; i++) { |
5250 | err = __ftrace_match_addr(hash, ip: ips[i], remove); |
5251 | if (err) { |
5252 | /* |
5253 | * This expects the @hash is a temporary hash and if this |
5254 | * fails the caller must free the @hash. |
5255 | */ |
5256 | return err; |
5257 | } |
5258 | } |
5259 | return 0; |
5260 | } |
5261 | |
5262 | static int |
5263 | ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, |
5264 | unsigned long *ips, unsigned int cnt, |
5265 | int remove, int reset, int enable) |
5266 | { |
5267 | struct ftrace_hash **orig_hash; |
5268 | struct ftrace_hash *hash; |
5269 | int ret; |
5270 | |
5271 | if (unlikely(ftrace_disabled)) |
5272 | return -ENODEV; |
5273 | |
5274 | mutex_lock(&ops->func_hash->regex_lock); |
5275 | |
5276 | if (enable) |
5277 | orig_hash = &ops->func_hash->filter_hash; |
5278 | else |
5279 | orig_hash = &ops->func_hash->notrace_hash; |
5280 | |
5281 | if (reset) |
5282 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); |
5283 | else |
5284 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash: *orig_hash); |
5285 | |
5286 | if (!hash) { |
5287 | ret = -ENOMEM; |
5288 | goto out_regex_unlock; |
5289 | } |
5290 | |
5291 | if (buf && !ftrace_match_records(hash, buff: buf, len)) { |
5292 | ret = -EINVAL; |
5293 | goto out_regex_unlock; |
5294 | } |
5295 | if (ips) { |
5296 | ret = ftrace_match_addr(hash, ips, cnt, remove); |
5297 | if (ret < 0) |
5298 | goto out_regex_unlock; |
5299 | } |
5300 | |
5301 | mutex_lock(&ftrace_lock); |
5302 | ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable); |
5303 | mutex_unlock(lock: &ftrace_lock); |
5304 | |
5305 | out_regex_unlock: |
5306 | mutex_unlock(lock: &ops->func_hash->regex_lock); |
5307 | |
5308 | free_ftrace_hash(hash); |
5309 | return ret; |
5310 | } |
5311 | |
5312 | static int |
5313 | ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt, |
5314 | int remove, int reset, int enable) |
5315 | { |
5316 | return ftrace_set_hash(ops, NULL, len: 0, ips, cnt, remove, reset, enable); |
5317 | } |
5318 | |
5319 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
5320 | |
5321 | struct ftrace_direct_func { |
5322 | struct list_head next; |
5323 | unsigned long addr; |
5324 | int count; |
5325 | }; |
5326 | |
5327 | static LIST_HEAD(ftrace_direct_funcs); |
5328 | |
5329 | static int register_ftrace_function_nolock(struct ftrace_ops *ops); |
5330 | |
5331 | /* |
5332 | * If there are multiple ftrace_ops, use SAVE_REGS by default, so that direct |
5333 | * call will be jumped from ftrace_regs_caller. Only if the architecture does |
5334 | * not support ftrace_regs_caller but direct_call, use SAVE_ARGS so that it |
5335 | * jumps from ftrace_caller for multiple ftrace_ops. |
5336 | */ |
5337 | #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS |
5338 | #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_ARGS) |
5339 | #else |
5340 | #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS) |
5341 | #endif |
5342 | |
5343 | static int check_direct_multi(struct ftrace_ops *ops) |
5344 | { |
5345 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) |
5346 | return -EINVAL; |
5347 | if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS) |
5348 | return -EINVAL; |
5349 | return 0; |
5350 | } |
5351 | |
5352 | static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr) |
5353 | { |
5354 | struct ftrace_func_entry *entry, *del; |
5355 | int size, i; |
5356 | |
5357 | size = 1 << hash->size_bits; |
5358 | for (i = 0; i < size; i++) { |
5359 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
5360 | del = __ftrace_lookup_ip(hash: direct_functions, ip: entry->ip); |
5361 | if (del && del->direct == addr) { |
5362 | remove_hash_entry(hash: direct_functions, entry: del); |
5363 | kfree(objp: del); |
5364 | } |
5365 | } |
5366 | } |
5367 | } |
5368 | |
5369 | /** |
5370 | * register_ftrace_direct - Call a custom trampoline directly |
5371 | * for multiple functions registered in @ops |
5372 | * @ops: The address of the struct ftrace_ops object |
5373 | * @addr: The address of the trampoline to call at @ops functions |
5374 | * |
5375 | * This is used to connect a direct calls to @addr from the nop locations |
5376 | * of the functions registered in @ops (with by ftrace_set_filter_ip |
5377 | * function). |
5378 | * |
5379 | * The location that it calls (@addr) must be able to handle a direct call, |
5380 | * and save the parameters of the function being traced, and restore them |
5381 | * (or inject new ones if needed), before returning. |
5382 | * |
5383 | * Returns: |
5384 | * 0 on success |
5385 | * -EINVAL - The @ops object was already registered with this call or |
5386 | * when there are no functions in @ops object. |
5387 | * -EBUSY - Another direct function is already attached (there can be only one) |
5388 | * -ENODEV - @ip does not point to a ftrace nop location (or not supported) |
5389 | * -ENOMEM - There was an allocation failure. |
5390 | */ |
5391 | int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) |
5392 | { |
5393 | struct ftrace_hash *hash, *new_hash = NULL, *free_hash = NULL; |
5394 | struct ftrace_func_entry *entry, *new; |
5395 | int err = -EBUSY, size, i; |
5396 | |
5397 | if (ops->func || ops->trampoline) |
5398 | return -EINVAL; |
5399 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) |
5400 | return -EINVAL; |
5401 | if (ops->flags & FTRACE_OPS_FL_ENABLED) |
5402 | return -EINVAL; |
5403 | |
5404 | hash = ops->func_hash->filter_hash; |
5405 | if (ftrace_hash_empty(hash)) |
5406 | return -EINVAL; |
5407 | |
5408 | mutex_lock(&direct_mutex); |
5409 | |
5410 | /* Make sure requested entries are not already registered.. */ |
5411 | size = 1 << hash->size_bits; |
5412 | for (i = 0; i < size; i++) { |
5413 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
5414 | if (ftrace_find_rec_direct(ip: entry->ip)) |
5415 | goto out_unlock; |
5416 | } |
5417 | } |
5418 | |
5419 | err = -ENOMEM; |
5420 | |
5421 | /* Make a copy hash to place the new and the old entries in */ |
5422 | size = hash->count + direct_functions->count; |
5423 | if (size > 32) |
5424 | size = 32; |
5425 | new_hash = alloc_ftrace_hash(size_bits: fls(x: size)); |
5426 | if (!new_hash) |
5427 | goto out_unlock; |
5428 | |
5429 | /* Now copy over the existing direct entries */ |
5430 | size = 1 << direct_functions->size_bits; |
5431 | for (i = 0; i < size; i++) { |
5432 | hlist_for_each_entry(entry, &direct_functions->buckets[i], hlist) { |
5433 | new = add_hash_entry(hash: new_hash, ip: entry->ip); |
5434 | if (!new) |
5435 | goto out_unlock; |
5436 | new->direct = entry->direct; |
5437 | } |
5438 | } |
5439 | |
5440 | /* ... and add the new entries */ |
5441 | size = 1 << hash->size_bits; |
5442 | for (i = 0; i < size; i++) { |
5443 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
5444 | new = add_hash_entry(hash: new_hash, ip: entry->ip); |
5445 | if (!new) |
5446 | goto out_unlock; |
5447 | /* Update both the copy and the hash entry */ |
5448 | new->direct = addr; |
5449 | entry->direct = addr; |
5450 | } |
5451 | } |
5452 | |
5453 | free_hash = direct_functions; |
5454 | rcu_assign_pointer(direct_functions, new_hash); |
5455 | new_hash = NULL; |
5456 | |
5457 | ops->func = call_direct_funcs; |
5458 | ops->flags = MULTI_FLAGS; |
5459 | ops->trampoline = FTRACE_REGS_ADDR; |
5460 | ops->direct_call = addr; |
5461 | |
5462 | err = register_ftrace_function_nolock(ops); |
5463 | |
5464 | out_unlock: |
5465 | mutex_unlock(lock: &direct_mutex); |
5466 | |
5467 | if (free_hash && free_hash != EMPTY_HASH) { |
5468 | synchronize_rcu_tasks(); |
5469 | free_ftrace_hash(hash: free_hash); |
5470 | } |
5471 | |
5472 | if (new_hash) |
5473 | free_ftrace_hash(hash: new_hash); |
5474 | |
5475 | return err; |
5476 | } |
5477 | EXPORT_SYMBOL_GPL(register_ftrace_direct); |
5478 | |
5479 | /** |
5480 | * unregister_ftrace_direct - Remove calls to custom trampoline |
5481 | * previously registered by register_ftrace_direct for @ops object. |
5482 | * @ops: The address of the struct ftrace_ops object |
5483 | * |
5484 | * This is used to remove a direct calls to @addr from the nop locations |
5485 | * of the functions registered in @ops (with by ftrace_set_filter_ip |
5486 | * function). |
5487 | * |
5488 | * Returns: |
5489 | * 0 on success |
5490 | * -EINVAL - The @ops object was not properly registered. |
5491 | */ |
5492 | int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, |
5493 | bool free_filters) |
5494 | { |
5495 | struct ftrace_hash *hash = ops->func_hash->filter_hash; |
5496 | int err; |
5497 | |
5498 | if (check_direct_multi(ops)) |
5499 | return -EINVAL; |
5500 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) |
5501 | return -EINVAL; |
5502 | |
5503 | mutex_lock(&direct_mutex); |
5504 | err = unregister_ftrace_function(ops); |
5505 | remove_direct_functions_hash(hash, addr); |
5506 | mutex_unlock(lock: &direct_mutex); |
5507 | |
5508 | /* cleanup for possible another register call */ |
5509 | ops->func = NULL; |
5510 | ops->trampoline = 0; |
5511 | |
5512 | if (free_filters) |
5513 | ftrace_free_filter(ops); |
5514 | return err; |
5515 | } |
5516 | EXPORT_SYMBOL_GPL(unregister_ftrace_direct); |
5517 | |
5518 | static int |
5519 | __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) |
5520 | { |
5521 | struct ftrace_hash *hash; |
5522 | struct ftrace_func_entry *entry, *iter; |
5523 | static struct ftrace_ops tmp_ops = { |
5524 | .func = ftrace_stub, |
5525 | .flags = FTRACE_OPS_FL_STUB, |
5526 | }; |
5527 | int i, size; |
5528 | int err; |
5529 | |
5530 | lockdep_assert_held_once(&direct_mutex); |
5531 | |
5532 | /* Enable the tmp_ops to have the same functions as the direct ops */ |
5533 | ftrace_ops_init(ops: &tmp_ops); |
5534 | tmp_ops.func_hash = ops->func_hash; |
5535 | tmp_ops.direct_call = addr; |
5536 | |
5537 | err = register_ftrace_function_nolock(ops: &tmp_ops); |
5538 | if (err) |
5539 | return err; |
5540 | |
5541 | /* |
5542 | * Now the ftrace_ops_list_func() is called to do the direct callers. |
5543 | * We can safely change the direct functions attached to each entry. |
5544 | */ |
5545 | mutex_lock(&ftrace_lock); |
5546 | |
5547 | hash = ops->func_hash->filter_hash; |
5548 | size = 1 << hash->size_bits; |
5549 | for (i = 0; i < size; i++) { |
5550 | hlist_for_each_entry(iter, &hash->buckets[i], hlist) { |
5551 | entry = __ftrace_lookup_ip(hash: direct_functions, ip: iter->ip); |
5552 | if (!entry) |
5553 | continue; |
5554 | entry->direct = addr; |
5555 | } |
5556 | } |
5557 | /* Prevent store tearing if a trampoline concurrently accesses the value */ |
5558 | WRITE_ONCE(ops->direct_call, addr); |
5559 | |
5560 | mutex_unlock(lock: &ftrace_lock); |
5561 | |
5562 | /* Removing the tmp_ops will add the updated direct callers to the functions */ |
5563 | unregister_ftrace_function(ops: &tmp_ops); |
5564 | |
5565 | return err; |
5566 | } |
5567 | |
5568 | /** |
5569 | * modify_ftrace_direct_nolock - Modify an existing direct 'multi' call |
5570 | * to call something else |
5571 | * @ops: The address of the struct ftrace_ops object |
5572 | * @addr: The address of the new trampoline to call at @ops functions |
5573 | * |
5574 | * This is used to unregister currently registered direct caller and |
5575 | * register new one @addr on functions registered in @ops object. |
5576 | * |
5577 | * Note there's window between ftrace_shutdown and ftrace_startup calls |
5578 | * where there will be no callbacks called. |
5579 | * |
5580 | * Caller should already have direct_mutex locked, so we don't lock |
5581 | * direct_mutex here. |
5582 | * |
5583 | * Returns: zero on success. Non zero on error, which includes: |
5584 | * -EINVAL - The @ops object was not properly registered. |
5585 | */ |
5586 | int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr) |
5587 | { |
5588 | if (check_direct_multi(ops)) |
5589 | return -EINVAL; |
5590 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) |
5591 | return -EINVAL; |
5592 | |
5593 | return __modify_ftrace_direct(ops, addr); |
5594 | } |
5595 | EXPORT_SYMBOL_GPL(modify_ftrace_direct_nolock); |
5596 | |
5597 | /** |
5598 | * modify_ftrace_direct - Modify an existing direct 'multi' call |
5599 | * to call something else |
5600 | * @ops: The address of the struct ftrace_ops object |
5601 | * @addr: The address of the new trampoline to call at @ops functions |
5602 | * |
5603 | * This is used to unregister currently registered direct caller and |
5604 | * register new one @addr on functions registered in @ops object. |
5605 | * |
5606 | * Note there's window between ftrace_shutdown and ftrace_startup calls |
5607 | * where there will be no callbacks called. |
5608 | * |
5609 | * Returns: zero on success. Non zero on error, which includes: |
5610 | * -EINVAL - The @ops object was not properly registered. |
5611 | */ |
5612 | int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) |
5613 | { |
5614 | int err; |
5615 | |
5616 | if (check_direct_multi(ops)) |
5617 | return -EINVAL; |
5618 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) |
5619 | return -EINVAL; |
5620 | |
5621 | mutex_lock(&direct_mutex); |
5622 | err = __modify_ftrace_direct(ops, addr); |
5623 | mutex_unlock(lock: &direct_mutex); |
5624 | return err; |
5625 | } |
5626 | EXPORT_SYMBOL_GPL(modify_ftrace_direct); |
5627 | #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
5628 | |
5629 | /** |
5630 | * ftrace_set_filter_ip - set a function to filter on in ftrace by address |
5631 | * @ops: the ops to set the filter with |
5632 | * @ip: the address to add to or remove from the filter. |
5633 | * @remove: non zero to remove the ip from the filter |
5634 | * @reset: non zero to reset all filters before applying this filter. |
5635 | * |
5636 | * Filters denote which functions should be enabled when tracing is enabled |
5637 | * If @ip is NULL, it fails to update filter. |
5638 | * |
5639 | * This can allocate memory which must be freed before @ops can be freed, |
5640 | * either by removing each filtered addr or by using |
5641 | * ftrace_free_filter(@ops). |
5642 | */ |
5643 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, |
5644 | int remove, int reset) |
5645 | { |
5646 | ftrace_ops_init(ops); |
5647 | return ftrace_set_addr(ops, ips: &ip, cnt: 1, remove, reset, enable: 1); |
5648 | } |
5649 | EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); |
5650 | |
5651 | /** |
5652 | * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses |
5653 | * @ops: the ops to set the filter with |
5654 | * @ips: the array of addresses to add to or remove from the filter. |
5655 | * @cnt: the number of addresses in @ips |
5656 | * @remove: non zero to remove ips from the filter |
5657 | * @reset: non zero to reset all filters before applying this filter. |
5658 | * |
5659 | * Filters denote which functions should be enabled when tracing is enabled |
5660 | * If @ips array or any ip specified within is NULL , it fails to update filter. |
5661 | * |
5662 | * This can allocate memory which must be freed before @ops can be freed, |
5663 | * either by removing each filtered addr or by using |
5664 | * ftrace_free_filter(@ops). |
5665 | */ |
5666 | int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips, |
5667 | unsigned int cnt, int remove, int reset) |
5668 | { |
5669 | ftrace_ops_init(ops); |
5670 | return ftrace_set_addr(ops, ips, cnt, remove, reset, enable: 1); |
5671 | } |
5672 | EXPORT_SYMBOL_GPL(ftrace_set_filter_ips); |
5673 | |
5674 | /** |
5675 | * ftrace_ops_set_global_filter - setup ops to use global filters |
5676 | * @ops: the ops which will use the global filters |
5677 | * |
5678 | * ftrace users who need global function trace filtering should call this. |
5679 | * It can set the global filter only if ops were not initialized before. |
5680 | */ |
5681 | void ftrace_ops_set_global_filter(struct ftrace_ops *ops) |
5682 | { |
5683 | if (ops->flags & FTRACE_OPS_FL_INITIALIZED) |
5684 | return; |
5685 | |
5686 | ftrace_ops_init(ops); |
5687 | ops->func_hash = &global_ops.local_hash; |
5688 | } |
5689 | EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter); |
5690 | |
5691 | static int |
5692 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, |
5693 | int reset, int enable) |
5694 | { |
5695 | return ftrace_set_hash(ops, buf, len, NULL, cnt: 0, remove: 0, reset, enable); |
5696 | } |
5697 | |
5698 | /** |
5699 | * ftrace_set_filter - set a function to filter on in ftrace |
5700 | * @ops: the ops to set the filter with |
5701 | * @buf: the string that holds the function filter text. |
5702 | * @len: the length of the string. |
5703 | * @reset: non-zero to reset all filters before applying this filter. |
5704 | * |
5705 | * Filters denote which functions should be enabled when tracing is enabled. |
5706 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. |
5707 | * |
5708 | * This can allocate memory which must be freed before @ops can be freed, |
5709 | * either by removing each filtered addr or by using |
5710 | * ftrace_free_filter(@ops). |
5711 | */ |
5712 | int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
5713 | int len, int reset) |
5714 | { |
5715 | ftrace_ops_init(ops); |
5716 | return ftrace_set_regex(ops, buf, len, reset, enable: 1); |
5717 | } |
5718 | EXPORT_SYMBOL_GPL(ftrace_set_filter); |
5719 | |
5720 | /** |
5721 | * ftrace_set_notrace - set a function to not trace in ftrace |
5722 | * @ops: the ops to set the notrace filter with |
5723 | * @buf: the string that holds the function notrace text. |
5724 | * @len: the length of the string. |
5725 | * @reset: non-zero to reset all filters before applying this filter. |
5726 | * |
5727 | * Notrace Filters denote which functions should not be enabled when tracing |
5728 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled |
5729 | * for tracing. |
5730 | * |
5731 | * This can allocate memory which must be freed before @ops can be freed, |
5732 | * either by removing each filtered addr or by using |
5733 | * ftrace_free_filter(@ops). |
5734 | */ |
5735 | int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
5736 | int len, int reset) |
5737 | { |
5738 | ftrace_ops_init(ops); |
5739 | return ftrace_set_regex(ops, buf, len, reset, enable: 0); |
5740 | } |
5741 | EXPORT_SYMBOL_GPL(ftrace_set_notrace); |
5742 | /** |
5743 | * ftrace_set_global_filter - set a function to filter on with global tracers |
5744 | * @buf: the string that holds the function filter text. |
5745 | * @len: the length of the string. |
5746 | * @reset: non-zero to reset all filters before applying this filter. |
5747 | * |
5748 | * Filters denote which functions should be enabled when tracing is enabled. |
5749 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. |
5750 | */ |
5751 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset) |
5752 | { |
5753 | ftrace_set_regex(ops: &global_ops, buf, len, reset, enable: 1); |
5754 | } |
5755 | EXPORT_SYMBOL_GPL(ftrace_set_global_filter); |
5756 | |
5757 | /** |
5758 | * ftrace_set_global_notrace - set a function to not trace with global tracers |
5759 | * @buf: the string that holds the function notrace text. |
5760 | * @len: the length of the string. |
5761 | * @reset: non-zero to reset all filters before applying this filter. |
5762 | * |
5763 | * Notrace Filters denote which functions should not be enabled when tracing |
5764 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled |
5765 | * for tracing. |
5766 | */ |
5767 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) |
5768 | { |
5769 | ftrace_set_regex(ops: &global_ops, buf, len, reset, enable: 0); |
5770 | } |
5771 | EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); |
5772 | |
5773 | /* |
5774 | * command line interface to allow users to set filters on boot up. |
5775 | */ |
5776 | #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE |
5777 | static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; |
5778 | static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; |
5779 | |
5780 | /* Used by function selftest to not test if filter is set */ |
5781 | bool ftrace_filter_param __initdata; |
5782 | |
5783 | static int __init set_ftrace_notrace(char *str) |
5784 | { |
5785 | ftrace_filter_param = true; |
5786 | strscpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); |
5787 | return 1; |
5788 | } |
5789 | __setup("ftrace_notrace=", set_ftrace_notrace); |
5790 | |
5791 | static int __init set_ftrace_filter(char *str) |
5792 | { |
5793 | ftrace_filter_param = true; |
5794 | strscpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); |
5795 | return 1; |
5796 | } |
5797 | __setup("ftrace_filter=", set_ftrace_filter); |
5798 | |
5799 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
5800 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; |
5801 | static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; |
5802 | static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); |
5803 | |
5804 | static int __init set_graph_function(char *str) |
5805 | { |
5806 | strscpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); |
5807 | return 1; |
5808 | } |
5809 | __setup("ftrace_graph_filter=", set_graph_function); |
5810 | |
5811 | static int __init set_graph_notrace_function(char *str) |
5812 | { |
5813 | strscpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE); |
5814 | return 1; |
5815 | } |
5816 | __setup("ftrace_graph_notrace=", set_graph_notrace_function); |
5817 | |
5818 | static int __init set_graph_max_depth_function(char *str) |
5819 | { |
5820 | if (!str) |
5821 | return 0; |
5822 | fgraph_max_depth = simple_strtoul(str, NULL, 0); |
5823 | return 1; |
5824 | } |
5825 | __setup("ftrace_graph_max_depth=", set_graph_max_depth_function); |
5826 | |
5827 | static void __init set_ftrace_early_graph(char *buf, int enable) |
5828 | { |
5829 | int ret; |
5830 | char *func; |
5831 | struct ftrace_hash *hash; |
5832 | |
5833 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); |
5834 | if (MEM_FAIL(!hash, "Failed to allocate hash\n")) |
5835 | return; |
5836 | |
5837 | while (buf) { |
5838 | func = strsep(&buf, ","); |
5839 | /* we allow only one expression at a time */ |
5840 | ret = ftrace_graph_set_hash(hash, buffer: func); |
5841 | if (ret) |
5842 | printk(KERN_DEBUG "ftrace: function %s not " |
5843 | "traceable\n", func); |
5844 | } |
5845 | |
5846 | if (enable) |
5847 | ftrace_graph_hash = hash; |
5848 | else |
5849 | ftrace_graph_notrace_hash = hash; |
5850 | } |
5851 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
5852 | |
5853 | void __init |
5854 | ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) |
5855 | { |
5856 | char *func; |
5857 | |
5858 | ftrace_ops_init(ops); |
5859 | |
5860 | while (buf) { |
5861 | func = strsep(&buf, ","); |
5862 | ftrace_set_regex(ops, buf: func, strlen(func), reset: 0, enable); |
5863 | } |
5864 | } |
5865 | |
5866 | static void __init set_ftrace_early_filters(void) |
5867 | { |
5868 | if (ftrace_filter_buf[0]) |
5869 | ftrace_set_early_filter(ops: &global_ops, buf: ftrace_filter_buf, enable: 1); |
5870 | if (ftrace_notrace_buf[0]) |
5871 | ftrace_set_early_filter(ops: &global_ops, buf: ftrace_notrace_buf, enable: 0); |
5872 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
5873 | if (ftrace_graph_buf[0]) |
5874 | set_ftrace_early_graph(buf: ftrace_graph_buf, enable: 1); |
5875 | if (ftrace_graph_notrace_buf[0]) |
5876 | set_ftrace_early_graph(buf: ftrace_graph_notrace_buf, enable: 0); |
5877 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
5878 | } |
5879 | |
5880 | int ftrace_regex_release(struct inode *inode, struct file *file) |
5881 | { |
5882 | struct seq_file *m = (struct seq_file *)file->private_data; |
5883 | struct ftrace_iterator *iter; |
5884 | struct ftrace_hash **orig_hash; |
5885 | struct trace_parser *parser; |
5886 | int filter_hash; |
5887 | |
5888 | if (file->f_mode & FMODE_READ) { |
5889 | iter = m->private; |
5890 | seq_release(inode, file); |
5891 | } else |
5892 | iter = file->private_data; |
5893 | |
5894 | parser = &iter->parser; |
5895 | if (trace_parser_loaded(parser)) { |
5896 | int enable = !(iter->flags & FTRACE_ITER_NOTRACE); |
5897 | |
5898 | ftrace_process_regex(iter, buff: parser->buffer, |
5899 | len: parser->idx, enable); |
5900 | } |
5901 | |
5902 | trace_parser_put(parser); |
5903 | |
5904 | mutex_lock(&iter->ops->func_hash->regex_lock); |
5905 | |
5906 | if (file->f_mode & FMODE_WRITE) { |
5907 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); |
5908 | |
5909 | if (filter_hash) { |
5910 | orig_hash = &iter->ops->func_hash->filter_hash; |
5911 | if (iter->tr) { |
5912 | if (list_empty(head: &iter->tr->mod_trace)) |
5913 | iter->hash->flags &= ~FTRACE_HASH_FL_MOD; |
5914 | else |
5915 | iter->hash->flags |= FTRACE_HASH_FL_MOD; |
5916 | } |
5917 | } else |
5918 | orig_hash = &iter->ops->func_hash->notrace_hash; |
5919 | |
5920 | mutex_lock(&ftrace_lock); |
5921 | ftrace_hash_move_and_update_ops(ops: iter->ops, orig_hash, |
5922 | hash: iter->hash, enable: filter_hash); |
5923 | mutex_unlock(lock: &ftrace_lock); |
5924 | } else { |
5925 | /* For read only, the hash is the ops hash */ |
5926 | iter->hash = NULL; |
5927 | } |
5928 | |
5929 | mutex_unlock(lock: &iter->ops->func_hash->regex_lock); |
5930 | free_ftrace_hash(hash: iter->hash); |
5931 | if (iter->tr) |
5932 | trace_array_put(tr: iter->tr); |
5933 | kfree(objp: iter); |
5934 | |
5935 | return 0; |
5936 | } |
5937 | |
5938 | static const struct file_operations ftrace_avail_fops = { |
5939 | .open = ftrace_avail_open, |
5940 | .read = seq_read, |
5941 | .llseek = seq_lseek, |
5942 | .release = seq_release_private, |
5943 | }; |
5944 | |
5945 | static const struct file_operations ftrace_enabled_fops = { |
5946 | .open = ftrace_enabled_open, |
5947 | .read = seq_read, |
5948 | .llseek = seq_lseek, |
5949 | .release = seq_release_private, |
5950 | }; |
5951 | |
5952 | static const struct file_operations ftrace_touched_fops = { |
5953 | .open = ftrace_touched_open, |
5954 | .read = seq_read, |
5955 | .llseek = seq_lseek, |
5956 | .release = seq_release_private, |
5957 | }; |
5958 | |
5959 | static const struct file_operations ftrace_avail_addrs_fops = { |
5960 | .open = ftrace_avail_addrs_open, |
5961 | .read = seq_read, |
5962 | .llseek = seq_lseek, |
5963 | .release = seq_release_private, |
5964 | }; |
5965 | |
5966 | static const struct file_operations ftrace_filter_fops = { |
5967 | .open = ftrace_filter_open, |
5968 | .read = seq_read, |
5969 | .write = ftrace_filter_write, |
5970 | .llseek = tracing_lseek, |
5971 | .release = ftrace_regex_release, |
5972 | }; |
5973 | |
5974 | static const struct file_operations ftrace_notrace_fops = { |
5975 | .open = ftrace_notrace_open, |
5976 | .read = seq_read, |
5977 | .write = ftrace_notrace_write, |
5978 | .llseek = tracing_lseek, |
5979 | .release = ftrace_regex_release, |
5980 | }; |
5981 | |
5982 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
5983 | |
5984 | static DEFINE_MUTEX(graph_lock); |
5985 | |
5986 | struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH; |
5987 | struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH; |
5988 | |
5989 | enum graph_filter_type { |
5990 | GRAPH_FILTER_NOTRACE = 0, |
5991 | GRAPH_FILTER_FUNCTION, |
5992 | }; |
5993 | |
5994 | #define FTRACE_GRAPH_EMPTY ((void *)1) |
5995 | |
5996 | struct ftrace_graph_data { |
5997 | struct ftrace_hash *hash; |
5998 | struct ftrace_func_entry *entry; |
5999 | int idx; /* for hash table iteration */ |
6000 | enum graph_filter_type type; |
6001 | struct ftrace_hash *new_hash; |
6002 | const struct seq_operations *seq_ops; |
6003 | struct trace_parser parser; |
6004 | }; |
6005 | |
6006 | static void * |
6007 | __g_next(struct seq_file *m, loff_t *pos) |
6008 | { |
6009 | struct ftrace_graph_data *fgd = m->private; |
6010 | struct ftrace_func_entry *entry = fgd->entry; |
6011 | struct hlist_head *head; |
6012 | int i, idx = fgd->idx; |
6013 | |
6014 | if (*pos >= fgd->hash->count) |
6015 | return NULL; |
6016 | |
6017 | if (entry) { |
6018 | hlist_for_each_entry_continue(entry, hlist) { |
6019 | fgd->entry = entry; |
6020 | return entry; |
6021 | } |
6022 | |
6023 | idx++; |
6024 | } |
6025 | |
6026 | for (i = idx; i < 1 << fgd->hash->size_bits; i++) { |
6027 | head = &fgd->hash->buckets[i]; |
6028 | hlist_for_each_entry(entry, head, hlist) { |
6029 | fgd->entry = entry; |
6030 | fgd->idx = i; |
6031 | return entry; |
6032 | } |
6033 | } |
6034 | return NULL; |
6035 | } |
6036 | |
6037 | static void * |
6038 | g_next(struct seq_file *m, void *v, loff_t *pos) |
6039 | { |
6040 | (*pos)++; |
6041 | return __g_next(m, pos); |
6042 | } |
6043 | |
6044 | static void *g_start(struct seq_file *m, loff_t *pos) |
6045 | { |
6046 | struct ftrace_graph_data *fgd = m->private; |
6047 | |
6048 | mutex_lock(&graph_lock); |
6049 | |
6050 | if (fgd->type == GRAPH_FILTER_FUNCTION) |
6051 | fgd->hash = rcu_dereference_protected(ftrace_graph_hash, |
6052 | lockdep_is_held(&graph_lock)); |
6053 | else |
6054 | fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, |
6055 | lockdep_is_held(&graph_lock)); |
6056 | |
6057 | /* Nothing, tell g_show to print all functions are enabled */ |
6058 | if (ftrace_hash_empty(hash: fgd->hash) && !*pos) |
6059 | return FTRACE_GRAPH_EMPTY; |
6060 | |
6061 | fgd->idx = 0; |
6062 | fgd->entry = NULL; |
6063 | return __g_next(m, pos); |
6064 | } |
6065 | |
6066 | static void g_stop(struct seq_file *m, void *p) |
6067 | { |
6068 | mutex_unlock(lock: &graph_lock); |
6069 | } |
6070 | |
6071 | static int g_show(struct seq_file *m, void *v) |
6072 | { |
6073 | struct ftrace_func_entry *entry = v; |
6074 | |
6075 | if (!entry) |
6076 | return 0; |
6077 | |
6078 | if (entry == FTRACE_GRAPH_EMPTY) { |
6079 | struct ftrace_graph_data *fgd = m->private; |
6080 | |
6081 | if (fgd->type == GRAPH_FILTER_FUNCTION) |
6082 | seq_puts(m, s: "#### all functions enabled ####\n"); |
6083 | else |
6084 | seq_puts(m, s: "#### no functions disabled ####\n"); |
6085 | return 0; |
6086 | } |
6087 | |
6088 | seq_printf(m, fmt: "%ps\n", (void *)entry->ip); |
6089 | |
6090 | return 0; |
6091 | } |
6092 | |
6093 | static const struct seq_operations ftrace_graph_seq_ops = { |
6094 | .start = g_start, |
6095 | .next = g_next, |
6096 | .stop = g_stop, |
6097 | .show = g_show, |
6098 | }; |
6099 | |
6100 | static int |
6101 | __ftrace_graph_open(struct inode *inode, struct file *file, |
6102 | struct ftrace_graph_data *fgd) |
6103 | { |
6104 | int ret; |
6105 | struct ftrace_hash *new_hash = NULL; |
6106 | |
6107 | ret = security_locked_down(what: LOCKDOWN_TRACEFS); |
6108 | if (ret) |
6109 | return ret; |
6110 | |
6111 | if (file->f_mode & FMODE_WRITE) { |
6112 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; |
6113 | |
6114 | if (trace_parser_get_init(parser: &fgd->parser, FTRACE_BUFF_MAX)) |
6115 | return -ENOMEM; |
6116 | |
6117 | if (file->f_flags & O_TRUNC) |
6118 | new_hash = alloc_ftrace_hash(size_bits); |
6119 | else |
6120 | new_hash = alloc_and_copy_ftrace_hash(size_bits, |
6121 | hash: fgd->hash); |
6122 | if (!new_hash) { |
6123 | ret = -ENOMEM; |
6124 | goto out; |
6125 | } |
6126 | } |
6127 | |
6128 | if (file->f_mode & FMODE_READ) { |
6129 | ret = seq_open(file, &ftrace_graph_seq_ops); |
6130 | if (!ret) { |
6131 | struct seq_file *m = file->private_data; |
6132 | m->private = fgd; |
6133 | } else { |
6134 | /* Failed */ |
6135 | free_ftrace_hash(hash: new_hash); |
6136 | new_hash = NULL; |
6137 | } |
6138 | } else |
6139 | file->private_data = fgd; |
6140 | |
6141 | out: |
6142 | if (ret < 0 && file->f_mode & FMODE_WRITE) |
6143 | trace_parser_put(parser: &fgd->parser); |
6144 | |
6145 | fgd->new_hash = new_hash; |
6146 | |
6147 | /* |
6148 | * All uses of fgd->hash must be taken with the graph_lock |
6149 | * held. The graph_lock is going to be released, so force |
6150 | * fgd->hash to be reinitialized when it is taken again. |
6151 | */ |
6152 | fgd->hash = NULL; |
6153 | |
6154 | return ret; |
6155 | } |
6156 | |
6157 | static int |
6158 | ftrace_graph_open(struct inode *inode, struct file *file) |
6159 | { |
6160 | struct ftrace_graph_data *fgd; |
6161 | int ret; |
6162 | |
6163 | if (unlikely(ftrace_disabled)) |
6164 | return -ENODEV; |
6165 | |
6166 | fgd = kmalloc(size: sizeof(*fgd), GFP_KERNEL); |
6167 | if (fgd == NULL) |
6168 | return -ENOMEM; |
6169 | |
6170 | mutex_lock(&graph_lock); |
6171 | |
6172 | fgd->hash = rcu_dereference_protected(ftrace_graph_hash, |
6173 | lockdep_is_held(&graph_lock)); |
6174 | fgd->type = GRAPH_FILTER_FUNCTION; |
6175 | fgd->seq_ops = &ftrace_graph_seq_ops; |
6176 | |
6177 | ret = __ftrace_graph_open(inode, file, fgd); |
6178 | if (ret < 0) |
6179 | kfree(objp: fgd); |
6180 | |
6181 | mutex_unlock(lock: &graph_lock); |
6182 | return ret; |
6183 | } |
6184 | |
6185 | static int |
6186 | ftrace_graph_notrace_open(struct inode *inode, struct file *file) |
6187 | { |
6188 | struct ftrace_graph_data *fgd; |
6189 | int ret; |
6190 | |
6191 | if (unlikely(ftrace_disabled)) |
6192 | return -ENODEV; |
6193 | |
6194 | fgd = kmalloc(size: sizeof(*fgd), GFP_KERNEL); |
6195 | if (fgd == NULL) |
6196 | return -ENOMEM; |
6197 | |
6198 | mutex_lock(&graph_lock); |
6199 | |
6200 | fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, |
6201 | lockdep_is_held(&graph_lock)); |
6202 | fgd->type = GRAPH_FILTER_NOTRACE; |
6203 | fgd->seq_ops = &ftrace_graph_seq_ops; |
6204 | |
6205 | ret = __ftrace_graph_open(inode, file, fgd); |
6206 | if (ret < 0) |
6207 | kfree(objp: fgd); |
6208 | |
6209 | mutex_unlock(lock: &graph_lock); |
6210 | return ret; |
6211 | } |
6212 | |
6213 | static int |
6214 | ftrace_graph_release(struct inode *inode, struct file *file) |
6215 | { |
6216 | struct ftrace_graph_data *fgd; |
6217 | struct ftrace_hash *old_hash, *new_hash; |
6218 | struct trace_parser *parser; |
6219 | int ret = 0; |
6220 | |
6221 | if (file->f_mode & FMODE_READ) { |
6222 | struct seq_file *m = file->private_data; |
6223 | |
6224 | fgd = m->private; |
6225 | seq_release(inode, file); |
6226 | } else { |
6227 | fgd = file->private_data; |
6228 | } |
6229 | |
6230 | |
6231 | if (file->f_mode & FMODE_WRITE) { |
6232 | |
6233 | parser = &fgd->parser; |
6234 | |
6235 | if (trace_parser_loaded((parser))) { |
6236 | ret = ftrace_graph_set_hash(hash: fgd->new_hash, |
6237 | buffer: parser->buffer); |
6238 | } |
6239 | |
6240 | trace_parser_put(parser); |
6241 | |
6242 | new_hash = __ftrace_hash_move(src: fgd->new_hash); |
6243 | if (!new_hash) { |
6244 | ret = -ENOMEM; |
6245 | goto out; |
6246 | } |
6247 | |
6248 | mutex_lock(&graph_lock); |
6249 | |
6250 | if (fgd->type == GRAPH_FILTER_FUNCTION) { |
6251 | old_hash = rcu_dereference_protected(ftrace_graph_hash, |
6252 | lockdep_is_held(&graph_lock)); |
6253 | rcu_assign_pointer(ftrace_graph_hash, new_hash); |
6254 | } else { |
6255 | old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, |
6256 | lockdep_is_held(&graph_lock)); |
6257 | rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash); |
6258 | } |
6259 | |
6260 | mutex_unlock(lock: &graph_lock); |
6261 | |
6262 | /* |
6263 | * We need to do a hard force of sched synchronization. |
6264 | * This is because we use preempt_disable() to do RCU, but |
6265 | * the function tracers can be called where RCU is not watching |
6266 | * (like before user_exit()). We can not rely on the RCU |
6267 | * infrastructure to do the synchronization, thus we must do it |
6268 | * ourselves. |
6269 | */ |
6270 | if (old_hash != EMPTY_HASH) |
6271 | synchronize_rcu_tasks_rude(); |
6272 | |
6273 | free_ftrace_hash(hash: old_hash); |
6274 | } |
6275 | |
6276 | out: |
6277 | free_ftrace_hash(hash: fgd->new_hash); |
6278 | kfree(objp: fgd); |
6279 | |
6280 | return ret; |
6281 | } |
6282 | |
6283 | static int |
6284 | ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer) |
6285 | { |
6286 | struct ftrace_glob func_g; |
6287 | struct dyn_ftrace *rec; |
6288 | struct ftrace_page *pg; |
6289 | struct ftrace_func_entry *entry; |
6290 | int fail = 1; |
6291 | int not; |
6292 | |
6293 | /* decode regex */ |
6294 | func_g.type = filter_parse_regex(buff: buffer, strlen(buffer), |
6295 | search: &func_g.search, not: ¬); |
6296 | |
6297 | func_g.len = strlen(func_g.search); |
6298 | |
6299 | mutex_lock(&ftrace_lock); |
6300 | |
6301 | if (unlikely(ftrace_disabled)) { |
6302 | mutex_unlock(lock: &ftrace_lock); |
6303 | return -ENODEV; |
6304 | } |
6305 | |
6306 | do_for_each_ftrace_rec(pg, rec) { |
6307 | |
6308 | if (rec->flags & FTRACE_FL_DISABLED) |
6309 | continue; |
6310 | |
6311 | if (ftrace_match_record(rec, func_g: &func_g, NULL, exclude_mod: 0)) { |
6312 | entry = ftrace_lookup_ip(hash, ip: rec->ip); |
6313 | |
6314 | if (!not) { |
6315 | fail = 0; |
6316 | |
6317 | if (entry) |
6318 | continue; |
6319 | if (add_hash_entry(hash, ip: rec->ip) == NULL) |
6320 | goto out; |
6321 | } else { |
6322 | if (entry) { |
6323 | free_hash_entry(hash, entry); |
6324 | fail = 0; |
6325 | } |
6326 | } |
6327 | } |
6328 | } while_for_each_ftrace_rec(); |
6329 | out: |
6330 | mutex_unlock(lock: &ftrace_lock); |
6331 | |
6332 | if (fail) |
6333 | return -EINVAL; |
6334 | |
6335 | return 0; |
6336 | } |
6337 | |
6338 | static ssize_t |
6339 | ftrace_graph_write(struct file *file, const char __user *ubuf, |
6340 | size_t cnt, loff_t *ppos) |
6341 | { |
6342 | ssize_t read, ret = 0; |
6343 | struct ftrace_graph_data *fgd = file->private_data; |
6344 | struct trace_parser *parser; |
6345 | |
6346 | if (!cnt) |
6347 | return 0; |
6348 | |
6349 | /* Read mode uses seq functions */ |
6350 | if (file->f_mode & FMODE_READ) { |
6351 | struct seq_file *m = file->private_data; |
6352 | fgd = m->private; |
6353 | } |
6354 | |
6355 | parser = &fgd->parser; |
6356 | |
6357 | read = trace_get_user(parser, ubuf, cnt, ppos); |
6358 | |
6359 | if (read >= 0 && trace_parser_loaded(parser) && |
6360 | !trace_parser_cont(parser)) { |
6361 | |
6362 | ret = ftrace_graph_set_hash(hash: fgd->new_hash, |
6363 | buffer: parser->buffer); |
6364 | trace_parser_clear(parser); |
6365 | } |
6366 | |
6367 | if (!ret) |
6368 | ret = read; |
6369 | |
6370 | return ret; |
6371 | } |
6372 | |
6373 | static const struct file_operations ftrace_graph_fops = { |
6374 | .open = ftrace_graph_open, |
6375 | .read = seq_read, |
6376 | .write = ftrace_graph_write, |
6377 | .llseek = tracing_lseek, |
6378 | .release = ftrace_graph_release, |
6379 | }; |
6380 | |
6381 | static const struct file_operations ftrace_graph_notrace_fops = { |
6382 | .open = ftrace_graph_notrace_open, |
6383 | .read = seq_read, |
6384 | .write = ftrace_graph_write, |
6385 | .llseek = tracing_lseek, |
6386 | .release = ftrace_graph_release, |
6387 | }; |
6388 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
6389 | |
6390 | void ftrace_create_filter_files(struct ftrace_ops *ops, |
6391 | struct dentry *parent) |
6392 | { |
6393 | |
6394 | trace_create_file(name: "set_ftrace_filter", TRACE_MODE_WRITE, parent, |
6395 | data: ops, fops: &ftrace_filter_fops); |
6396 | |
6397 | trace_create_file(name: "set_ftrace_notrace", TRACE_MODE_WRITE, parent, |
6398 | data: ops, fops: &ftrace_notrace_fops); |
6399 | } |
6400 | |
6401 | /* |
6402 | * The name "destroy_filter_files" is really a misnomer. Although |
6403 | * in the future, it may actually delete the files, but this is |
6404 | * really intended to make sure the ops passed in are disabled |
6405 | * and that when this function returns, the caller is free to |
6406 | * free the ops. |
6407 | * |
6408 | * The "destroy" name is only to match the "create" name that this |
6409 | * should be paired with. |
6410 | */ |
6411 | void ftrace_destroy_filter_files(struct ftrace_ops *ops) |
6412 | { |
6413 | mutex_lock(&ftrace_lock); |
6414 | if (ops->flags & FTRACE_OPS_FL_ENABLED) |
6415 | ftrace_shutdown(ops, command: 0); |
6416 | ops->flags |= FTRACE_OPS_FL_DELETED; |
6417 | ftrace_free_filter(ops); |
6418 | mutex_unlock(lock: &ftrace_lock); |
6419 | } |
6420 | |
6421 | static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer) |
6422 | { |
6423 | |
6424 | trace_create_file(name: "available_filter_functions", TRACE_MODE_READ, |
6425 | parent: d_tracer, NULL, fops: &ftrace_avail_fops); |
6426 | |
6427 | trace_create_file(name: "available_filter_functions_addrs", TRACE_MODE_READ, |
6428 | parent: d_tracer, NULL, fops: &ftrace_avail_addrs_fops); |
6429 | |
6430 | trace_create_file(name: "enabled_functions", TRACE_MODE_READ, |
6431 | parent: d_tracer, NULL, fops: &ftrace_enabled_fops); |
6432 | |
6433 | trace_create_file(name: "touched_functions", TRACE_MODE_READ, |
6434 | parent: d_tracer, NULL, fops: &ftrace_touched_fops); |
6435 | |
6436 | ftrace_create_filter_files(ops: &global_ops, parent: d_tracer); |
6437 | |
6438 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
6439 | trace_create_file(name: "set_graph_function", TRACE_MODE_WRITE, parent: d_tracer, |
6440 | NULL, |
6441 | fops: &ftrace_graph_fops); |
6442 | trace_create_file(name: "set_graph_notrace", TRACE_MODE_WRITE, parent: d_tracer, |
6443 | NULL, |
6444 | fops: &ftrace_graph_notrace_fops); |
6445 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
6446 | |
6447 | return 0; |
6448 | } |
6449 | |
6450 | static int ftrace_cmp_ips(const void *a, const void *b) |
6451 | { |
6452 | const unsigned long *ipa = a; |
6453 | const unsigned long *ipb = b; |
6454 | |
6455 | if (*ipa > *ipb) |
6456 | return 1; |
6457 | if (*ipa < *ipb) |
6458 | return -1; |
6459 | return 0; |
6460 | } |
6461 | |
6462 | #ifdef CONFIG_FTRACE_SORT_STARTUP_TEST |
6463 | static void test_is_sorted(unsigned long *start, unsigned long count) |
6464 | { |
6465 | int i; |
6466 | |
6467 | for (i = 1; i < count; i++) { |
6468 | if (WARN(start[i - 1] > start[i], |
6469 | "[%d] %pS at %lx is not sorted with %pS at %lx\n", i, |
6470 | (void *)start[i - 1], start[i - 1], |
6471 | (void *)start[i], start[i])) |
6472 | break; |
6473 | } |
6474 | if (i == count) |
6475 | pr_info("ftrace section at %px sorted properly\n", start); |
6476 | } |
6477 | #else |
6478 | static void test_is_sorted(unsigned long *start, unsigned long count) |
6479 | { |
6480 | } |
6481 | #endif |
6482 | |
6483 | static int ftrace_process_locs(struct module *mod, |
6484 | unsigned long *start, |
6485 | unsigned long *end) |
6486 | { |
6487 | struct ftrace_page *pg_unuse = NULL; |
6488 | struct ftrace_page *start_pg; |
6489 | struct ftrace_page *pg; |
6490 | struct dyn_ftrace *rec; |
6491 | unsigned long skipped = 0; |
6492 | unsigned long count; |
6493 | unsigned long *p; |
6494 | unsigned long addr; |
6495 | unsigned long flags = 0; /* Shut up gcc */ |
6496 | int ret = -ENOMEM; |
6497 | |
6498 | count = end - start; |
6499 | |
6500 | if (!count) |
6501 | return 0; |
6502 | |
6503 | /* |
6504 | * Sorting mcount in vmlinux at build time depend on |
6505 | * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in |
6506 | * modules can not be sorted at build time. |
6507 | */ |
6508 | if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) { |
6509 | sort(base: start, num: count, size: sizeof(*start), |
6510 | cmp_func: ftrace_cmp_ips, NULL); |
6511 | } else { |
6512 | test_is_sorted(start, count); |
6513 | } |
6514 | |
6515 | start_pg = ftrace_allocate_pages(num_to_init: count); |
6516 | if (!start_pg) |
6517 | return -ENOMEM; |
6518 | |
6519 | mutex_lock(&ftrace_lock); |
6520 | |
6521 | /* |
6522 | * Core and each module needs their own pages, as |
6523 | * modules will free them when they are removed. |
6524 | * Force a new page to be allocated for modules. |
6525 | */ |
6526 | if (!mod) { |
6527 | WARN_ON(ftrace_pages || ftrace_pages_start); |
6528 | /* First initialization */ |
6529 | ftrace_pages = ftrace_pages_start = start_pg; |
6530 | } else { |
6531 | if (!ftrace_pages) |
6532 | goto out; |
6533 | |
6534 | if (WARN_ON(ftrace_pages->next)) { |
6535 | /* Hmm, we have free pages? */ |
6536 | while (ftrace_pages->next) |
6537 | ftrace_pages = ftrace_pages->next; |
6538 | } |
6539 | |
6540 | ftrace_pages->next = start_pg; |
6541 | } |
6542 | |
6543 | p = start; |
6544 | pg = start_pg; |
6545 | while (p < end) { |
6546 | unsigned long end_offset; |
6547 | addr = ftrace_call_adjust(addr: *p++); |
6548 | /* |
6549 | * Some architecture linkers will pad between |
6550 | * the different mcount_loc sections of different |
6551 | * object files to satisfy alignments. |
6552 | * Skip any NULL pointers. |
6553 | */ |
6554 | if (!addr) { |
6555 | skipped++; |
6556 | continue; |
6557 | } |
6558 | |
6559 | end_offset = (pg->index+1) * sizeof(pg->records[0]); |
6560 | if (end_offset > PAGE_SIZE << pg->order) { |
6561 | /* We should have allocated enough */ |
6562 | if (WARN_ON(!pg->next)) |
6563 | break; |
6564 | pg = pg->next; |
6565 | } |
6566 | |
6567 | rec = &pg->records[pg->index++]; |
6568 | rec->ip = addr; |
6569 | } |
6570 | |
6571 | if (pg->next) { |
6572 | pg_unuse = pg->next; |
6573 | pg->next = NULL; |
6574 | } |
6575 | |
6576 | /* Assign the last page to ftrace_pages */ |
6577 | ftrace_pages = pg; |
6578 | |
6579 | /* |
6580 | * We only need to disable interrupts on start up |
6581 | * because we are modifying code that an interrupt |
6582 | * may execute, and the modification is not atomic. |
6583 | * But for modules, nothing runs the code we modify |
6584 | * until we are finished with it, and there's no |
6585 | * reason to cause large interrupt latencies while we do it. |
6586 | */ |
6587 | if (!mod) |
6588 | local_irq_save(flags); |
6589 | ftrace_update_code(mod, new_pgs: start_pg); |
6590 | if (!mod) |
6591 | local_irq_restore(flags); |
6592 | ret = 0; |
6593 | out: |
6594 | mutex_unlock(lock: &ftrace_lock); |
6595 | |
6596 | /* We should have used all pages unless we skipped some */ |
6597 | if (pg_unuse) { |
6598 | WARN_ON(!skipped); |
6599 | ftrace_free_pages(pages: pg_unuse); |
6600 | } |
6601 | return ret; |
6602 | } |
6603 | |
6604 | struct ftrace_mod_func { |
6605 | struct list_head list; |
6606 | char *name; |
6607 | unsigned long ip; |
6608 | unsigned int size; |
6609 | }; |
6610 | |
6611 | struct ftrace_mod_map { |
6612 | struct rcu_head rcu; |
6613 | struct list_head list; |
6614 | struct module *mod; |
6615 | unsigned long start_addr; |
6616 | unsigned long end_addr; |
6617 | struct list_head funcs; |
6618 | unsigned int num_funcs; |
6619 | }; |
6620 | |
6621 | static int ftrace_get_trampoline_kallsym(unsigned int symnum, |
6622 | unsigned long *value, char *type, |
6623 | char *name, char *module_name, |
6624 | int *exported) |
6625 | { |
6626 | struct ftrace_ops *op; |
6627 | |
6628 | list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) { |
6629 | if (!op->trampoline || symnum--) |
6630 | continue; |
6631 | *value = op->trampoline; |
6632 | *type = 't'; |
6633 | strscpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN); |
6634 | strscpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN); |
6635 | *exported = 0; |
6636 | return 0; |
6637 | } |
6638 | |
6639 | return -ERANGE; |
6640 | } |
6641 | |
6642 | #if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES) |
6643 | /* |
6644 | * Check if the current ops references the given ip. |
6645 | * |
6646 | * If the ops traces all functions, then it was already accounted for. |
6647 | * If the ops does not trace the current record function, skip it. |
6648 | * If the ops ignores the function via notrace filter, skip it. |
6649 | */ |
6650 | static bool |
6651 | ops_references_ip(struct ftrace_ops *ops, unsigned long ip) |
6652 | { |
6653 | /* If ops isn't enabled, ignore it */ |
6654 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) |
6655 | return false; |
6656 | |
6657 | /* If ops traces all then it includes this function */ |
6658 | if (ops_traces_mod(ops)) |
6659 | return true; |
6660 | |
6661 | /* The function must be in the filter */ |
6662 | if (!ftrace_hash_empty(hash: ops->func_hash->filter_hash) && |
6663 | !__ftrace_lookup_ip(hash: ops->func_hash->filter_hash, ip)) |
6664 | return false; |
6665 | |
6666 | /* If in notrace hash, we ignore it too */ |
6667 | if (ftrace_lookup_ip(hash: ops->func_hash->notrace_hash, ip)) |
6668 | return false; |
6669 | |
6670 | return true; |
6671 | } |
6672 | #endif |
6673 | |
6674 | #ifdef CONFIG_MODULES |
6675 | |
6676 | #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) |
6677 | |
6678 | static LIST_HEAD(ftrace_mod_maps); |
6679 | |
6680 | static int referenced_filters(struct dyn_ftrace *rec) |
6681 | { |
6682 | struct ftrace_ops *ops; |
6683 | int cnt = 0; |
6684 | |
6685 | for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { |
6686 | if (ops_references_ip(ops, ip: rec->ip)) { |
6687 | if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT)) |
6688 | continue; |
6689 | if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY)) |
6690 | continue; |
6691 | cnt++; |
6692 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) |
6693 | rec->flags |= FTRACE_FL_REGS; |
6694 | if (cnt == 1 && ops->trampoline) |
6695 | rec->flags |= FTRACE_FL_TRAMP; |
6696 | else |
6697 | rec->flags &= ~FTRACE_FL_TRAMP; |
6698 | } |
6699 | } |
6700 | |
6701 | return cnt; |
6702 | } |
6703 | |
6704 | static void |
6705 | clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash) |
6706 | { |
6707 | struct ftrace_func_entry *entry; |
6708 | struct dyn_ftrace *rec; |
6709 | int i; |
6710 | |
6711 | if (ftrace_hash_empty(hash)) |
6712 | return; |
6713 | |
6714 | for (i = 0; i < pg->index; i++) { |
6715 | rec = &pg->records[i]; |
6716 | entry = __ftrace_lookup_ip(hash, ip: rec->ip); |
6717 | /* |
6718 | * Do not allow this rec to match again. |
6719 | * Yeah, it may waste some memory, but will be removed |
6720 | * if/when the hash is modified again. |
6721 | */ |
6722 | if (entry) |
6723 | entry->ip = 0; |
6724 | } |
6725 | } |
6726 | |
6727 | /* Clear any records from hashes */ |
6728 | static void clear_mod_from_hashes(struct ftrace_page *pg) |
6729 | { |
6730 | struct trace_array *tr; |
6731 | |
6732 | mutex_lock(&trace_types_lock); |
6733 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
6734 | if (!tr->ops || !tr->ops->func_hash) |
6735 | continue; |
6736 | mutex_lock(&tr->ops->func_hash->regex_lock); |
6737 | clear_mod_from_hash(pg, hash: tr->ops->func_hash->filter_hash); |
6738 | clear_mod_from_hash(pg, hash: tr->ops->func_hash->notrace_hash); |
6739 | mutex_unlock(lock: &tr->ops->func_hash->regex_lock); |
6740 | } |
6741 | mutex_unlock(lock: &trace_types_lock); |
6742 | } |
6743 | |
6744 | static void ftrace_free_mod_map(struct rcu_head *rcu) |
6745 | { |
6746 | struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu); |
6747 | struct ftrace_mod_func *mod_func; |
6748 | struct ftrace_mod_func *n; |
6749 | |
6750 | /* All the contents of mod_map are now not visible to readers */ |
6751 | list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) { |
6752 | kfree(objp: mod_func->name); |
6753 | list_del(entry: &mod_func->list); |
6754 | kfree(objp: mod_func); |
6755 | } |
6756 | |
6757 | kfree(objp: mod_map); |
6758 | } |
6759 | |
6760 | void ftrace_release_mod(struct module *mod) |
6761 | { |
6762 | struct ftrace_mod_map *mod_map; |
6763 | struct ftrace_mod_map *n; |
6764 | struct dyn_ftrace *rec; |
6765 | struct ftrace_page **last_pg; |
6766 | struct ftrace_page *tmp_page = NULL; |
6767 | struct ftrace_page *pg; |
6768 | |
6769 | mutex_lock(&ftrace_lock); |
6770 | |
6771 | if (ftrace_disabled) |
6772 | goto out_unlock; |
6773 | |
6774 | list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { |
6775 | if (mod_map->mod == mod) { |
6776 | list_del_rcu(entry: &mod_map->list); |
6777 | call_rcu(head: &mod_map->rcu, func: ftrace_free_mod_map); |
6778 | break; |
6779 | } |
6780 | } |
6781 | |
6782 | /* |
6783 | * Each module has its own ftrace_pages, remove |
6784 | * them from the list. |
6785 | */ |
6786 | last_pg = &ftrace_pages_start; |
6787 | for (pg = ftrace_pages_start; pg; pg = *last_pg) { |
6788 | rec = &pg->records[0]; |
6789 | if (within_module(addr: rec->ip, mod)) { |
6790 | /* |
6791 | * As core pages are first, the first |
6792 | * page should never be a module page. |
6793 | */ |
6794 | if (WARN_ON(pg == ftrace_pages_start)) |
6795 | goto out_unlock; |
6796 | |
6797 | /* Check if we are deleting the last page */ |
6798 | if (pg == ftrace_pages) |
6799 | ftrace_pages = next_to_ftrace_page(last_pg); |
6800 | |
6801 | ftrace_update_tot_cnt -= pg->index; |
6802 | *last_pg = pg->next; |
6803 | |
6804 | pg->next = tmp_page; |
6805 | tmp_page = pg; |
6806 | } else |
6807 | last_pg = &pg->next; |
6808 | } |
6809 | out_unlock: |
6810 | mutex_unlock(lock: &ftrace_lock); |
6811 | |
6812 | for (pg = tmp_page; pg; pg = tmp_page) { |
6813 | |
6814 | /* Needs to be called outside of ftrace_lock */ |
6815 | clear_mod_from_hashes(pg); |
6816 | |
6817 | if (pg->records) { |
6818 | free_pages(addr: (unsigned long)pg->records, order: pg->order); |
6819 | ftrace_number_of_pages -= 1 << pg->order; |
6820 | } |
6821 | tmp_page = pg->next; |
6822 | kfree(objp: pg); |
6823 | ftrace_number_of_groups--; |
6824 | } |
6825 | } |
6826 | |
6827 | void ftrace_module_enable(struct module *mod) |
6828 | { |
6829 | struct dyn_ftrace *rec; |
6830 | struct ftrace_page *pg; |
6831 | |
6832 | mutex_lock(&ftrace_lock); |
6833 | |
6834 | if (ftrace_disabled) |
6835 | goto out_unlock; |
6836 | |
6837 | /* |
6838 | * If the tracing is enabled, go ahead and enable the record. |
6839 | * |
6840 | * The reason not to enable the record immediately is the |
6841 | * inherent check of ftrace_make_nop/ftrace_make_call for |
6842 | * correct previous instructions. Making first the NOP |
6843 | * conversion puts the module to the correct state, thus |
6844 | * passing the ftrace_make_call check. |
6845 | * |
6846 | * We also delay this to after the module code already set the |
6847 | * text to read-only, as we now need to set it back to read-write |
6848 | * so that we can modify the text. |
6849 | */ |
6850 | if (ftrace_start_up) |
6851 | ftrace_arch_code_modify_prepare(); |
6852 | |
6853 | do_for_each_ftrace_rec(pg, rec) { |
6854 | int cnt; |
6855 | /* |
6856 | * do_for_each_ftrace_rec() is a double loop. |
6857 | * module text shares the pg. If a record is |
6858 | * not part of this module, then skip this pg, |
6859 | * which the "break" will do. |
6860 | */ |
6861 | if (!within_module(addr: rec->ip, mod)) |
6862 | break; |
6863 | |
6864 | /* Weak functions should still be ignored */ |
6865 | if (!test_for_valid_rec(rec)) { |
6866 | /* Clear all other flags. Should not be enabled anyway */ |
6867 | rec->flags = FTRACE_FL_DISABLED; |
6868 | continue; |
6869 | } |
6870 | |
6871 | cnt = 0; |
6872 | |
6873 | /* |
6874 | * When adding a module, we need to check if tracers are |
6875 | * currently enabled and if they are, and can trace this record, |
6876 | * we need to enable the module functions as well as update the |
6877 | * reference counts for those function records. |
6878 | */ |
6879 | if (ftrace_start_up) |
6880 | cnt += referenced_filters(rec); |
6881 | |
6882 | rec->flags &= ~FTRACE_FL_DISABLED; |
6883 | rec->flags += cnt; |
6884 | |
6885 | if (ftrace_start_up && cnt) { |
6886 | int failed = __ftrace_replace_code(rec, enable: 1); |
6887 | if (failed) { |
6888 | ftrace_bug(failed, rec); |
6889 | goto out_loop; |
6890 | } |
6891 | } |
6892 | |
6893 | } while_for_each_ftrace_rec(); |
6894 | |
6895 | out_loop: |
6896 | if (ftrace_start_up) |
6897 | ftrace_arch_code_modify_post_process(); |
6898 | |
6899 | out_unlock: |
6900 | mutex_unlock(lock: &ftrace_lock); |
6901 | |
6902 | process_cached_mods(mod_name: mod->name); |
6903 | } |
6904 | |
6905 | void ftrace_module_init(struct module *mod) |
6906 | { |
6907 | int ret; |
6908 | |
6909 | if (ftrace_disabled || !mod->num_ftrace_callsites) |
6910 | return; |
6911 | |
6912 | ret = ftrace_process_locs(mod, start: mod->ftrace_callsites, |
6913 | end: mod->ftrace_callsites + mod->num_ftrace_callsites); |
6914 | if (ret) |
6915 | pr_warn("ftrace: failed to allocate entries for module '%s' functions\n", |
6916 | mod->name); |
6917 | } |
6918 | |
6919 | static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, |
6920 | struct dyn_ftrace *rec) |
6921 | { |
6922 | struct ftrace_mod_func *mod_func; |
6923 | unsigned long symsize; |
6924 | unsigned long offset; |
6925 | char str[KSYM_SYMBOL_LEN]; |
6926 | char *modname; |
6927 | const char *ret; |
6928 | |
6929 | ret = kallsyms_lookup(addr: rec->ip, symbolsize: &symsize, offset: &offset, modname: &modname, namebuf: str); |
6930 | if (!ret) |
6931 | return; |
6932 | |
6933 | mod_func = kmalloc(size: sizeof(*mod_func), GFP_KERNEL); |
6934 | if (!mod_func) |
6935 | return; |
6936 | |
6937 | mod_func->name = kstrdup(s: str, GFP_KERNEL); |
6938 | if (!mod_func->name) { |
6939 | kfree(objp: mod_func); |
6940 | return; |
6941 | } |
6942 | |
6943 | mod_func->ip = rec->ip - offset; |
6944 | mod_func->size = symsize; |
6945 | |
6946 | mod_map->num_funcs++; |
6947 | |
6948 | list_add_rcu(new: &mod_func->list, head: &mod_map->funcs); |
6949 | } |
6950 | |
6951 | static struct ftrace_mod_map * |
6952 | allocate_ftrace_mod_map(struct module *mod, |
6953 | unsigned long start, unsigned long end) |
6954 | { |
6955 | struct ftrace_mod_map *mod_map; |
6956 | |
6957 | mod_map = kmalloc(size: sizeof(*mod_map), GFP_KERNEL); |
6958 | if (!mod_map) |
6959 | return NULL; |
6960 | |
6961 | mod_map->mod = mod; |
6962 | mod_map->start_addr = start; |
6963 | mod_map->end_addr = end; |
6964 | mod_map->num_funcs = 0; |
6965 | |
6966 | INIT_LIST_HEAD_RCU(list: &mod_map->funcs); |
6967 | |
6968 | list_add_rcu(new: &mod_map->list, head: &ftrace_mod_maps); |
6969 | |
6970 | return mod_map; |
6971 | } |
6972 | |
6973 | static const char * |
6974 | ftrace_func_address_lookup(struct ftrace_mod_map *mod_map, |
6975 | unsigned long addr, unsigned long *size, |
6976 | unsigned long *off, char *sym) |
6977 | { |
6978 | struct ftrace_mod_func *found_func = NULL; |
6979 | struct ftrace_mod_func *mod_func; |
6980 | |
6981 | list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { |
6982 | if (addr >= mod_func->ip && |
6983 | addr < mod_func->ip + mod_func->size) { |
6984 | found_func = mod_func; |
6985 | break; |
6986 | } |
6987 | } |
6988 | |
6989 | if (found_func) { |
6990 | if (size) |
6991 | *size = found_func->size; |
6992 | if (off) |
6993 | *off = addr - found_func->ip; |
6994 | if (sym) |
6995 | strscpy(sym, found_func->name, KSYM_NAME_LEN); |
6996 | |
6997 | return found_func->name; |
6998 | } |
6999 | |
7000 | return NULL; |
7001 | } |
7002 | |
7003 | const char * |
7004 | ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, |
7005 | unsigned long *off, char **modname, char *sym) |
7006 | { |
7007 | struct ftrace_mod_map *mod_map; |
7008 | const char *ret = NULL; |
7009 | |
7010 | /* mod_map is freed via call_rcu() */ |
7011 | preempt_disable(); |
7012 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { |
7013 | ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); |
7014 | if (ret) { |
7015 | if (modname) |
7016 | *modname = mod_map->mod->name; |
7017 | break; |
7018 | } |
7019 | } |
7020 | preempt_enable(); |
7021 | |
7022 | return ret; |
7023 | } |
7024 | |
7025 | int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, |
7026 | char *type, char *name, |
7027 | char *module_name, int *exported) |
7028 | { |
7029 | struct ftrace_mod_map *mod_map; |
7030 | struct ftrace_mod_func *mod_func; |
7031 | int ret; |
7032 | |
7033 | preempt_disable(); |
7034 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { |
7035 | |
7036 | if (symnum >= mod_map->num_funcs) { |
7037 | symnum -= mod_map->num_funcs; |
7038 | continue; |
7039 | } |
7040 | |
7041 | list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { |
7042 | if (symnum > 1) { |
7043 | symnum--; |
7044 | continue; |
7045 | } |
7046 | |
7047 | *value = mod_func->ip; |
7048 | *type = 'T'; |
7049 | strscpy(name, mod_func->name, KSYM_NAME_LEN); |
7050 | strscpy(module_name, mod_map->mod->name, MODULE_NAME_LEN); |
7051 | *exported = 1; |
7052 | preempt_enable(); |
7053 | return 0; |
7054 | } |
7055 | WARN_ON(1); |
7056 | break; |
7057 | } |
7058 | ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, |
7059 | module_name, exported); |
7060 | preempt_enable(); |
7061 | return ret; |
7062 | } |
7063 | |
7064 | #else |
7065 | static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, |
7066 | struct dyn_ftrace *rec) { } |
7067 | static inline struct ftrace_mod_map * |
7068 | allocate_ftrace_mod_map(struct module *mod, |
7069 | unsigned long start, unsigned long end) |
7070 | { |
7071 | return NULL; |
7072 | } |
7073 | int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, |
7074 | char *type, char *name, char *module_name, |
7075 | int *exported) |
7076 | { |
7077 | int ret; |
7078 | |
7079 | preempt_disable(); |
7080 | ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, |
7081 | module_name, exported); |
7082 | preempt_enable(); |
7083 | return ret; |
7084 | } |
7085 | #endif /* CONFIG_MODULES */ |
7086 | |
7087 | struct ftrace_init_func { |
7088 | struct list_head list; |
7089 | unsigned long ip; |
7090 | }; |
7091 | |
7092 | /* Clear any init ips from hashes */ |
7093 | static void |
7094 | clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash) |
7095 | { |
7096 | struct ftrace_func_entry *entry; |
7097 | |
7098 | entry = ftrace_lookup_ip(hash, ip: func->ip); |
7099 | /* |
7100 | * Do not allow this rec to match again. |
7101 | * Yeah, it may waste some memory, but will be removed |
7102 | * if/when the hash is modified again. |
7103 | */ |
7104 | if (entry) |
7105 | entry->ip = 0; |
7106 | } |
7107 | |
7108 | static void |
7109 | clear_func_from_hashes(struct ftrace_init_func *func) |
7110 | { |
7111 | struct trace_array *tr; |
7112 | |
7113 | mutex_lock(&trace_types_lock); |
7114 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
7115 | if (!tr->ops || !tr->ops->func_hash) |
7116 | continue; |
7117 | mutex_lock(&tr->ops->func_hash->regex_lock); |
7118 | clear_func_from_hash(func, hash: tr->ops->func_hash->filter_hash); |
7119 | clear_func_from_hash(func, hash: tr->ops->func_hash->notrace_hash); |
7120 | mutex_unlock(lock: &tr->ops->func_hash->regex_lock); |
7121 | } |
7122 | mutex_unlock(lock: &trace_types_lock); |
7123 | } |
7124 | |
7125 | static void add_to_clear_hash_list(struct list_head *clear_list, |
7126 | struct dyn_ftrace *rec) |
7127 | { |
7128 | struct ftrace_init_func *func; |
7129 | |
7130 | func = kmalloc(size: sizeof(*func), GFP_KERNEL); |
7131 | if (!func) { |
7132 | MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n"); |
7133 | return; |
7134 | } |
7135 | |
7136 | func->ip = rec->ip; |
7137 | list_add(new: &func->list, head: clear_list); |
7138 | } |
7139 | |
7140 | void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) |
7141 | { |
7142 | unsigned long start = (unsigned long)(start_ptr); |
7143 | unsigned long end = (unsigned long)(end_ptr); |
7144 | struct ftrace_page **last_pg = &ftrace_pages_start; |
7145 | struct ftrace_page *pg; |
7146 | struct dyn_ftrace *rec; |
7147 | struct dyn_ftrace key; |
7148 | struct ftrace_mod_map *mod_map = NULL; |
7149 | struct ftrace_init_func *func, *func_next; |
7150 | LIST_HEAD(clear_hash); |
7151 | |
7152 | key.ip = start; |
7153 | key.flags = end; /* overload flags, as it is unsigned long */ |
7154 | |
7155 | mutex_lock(&ftrace_lock); |
7156 | |
7157 | /* |
7158 | * If we are freeing module init memory, then check if |
7159 | * any tracer is active. If so, we need to save a mapping of |
7160 | * the module functions being freed with the address. |
7161 | */ |
7162 | if (mod && ftrace_ops_list != &ftrace_list_end) |
7163 | mod_map = allocate_ftrace_mod_map(mod, start, end); |
7164 | |
7165 | for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { |
7166 | if (end < pg->records[0].ip || |
7167 | start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) |
7168 | continue; |
7169 | again: |
7170 | rec = bsearch(key: &key, base: pg->records, num: pg->index, |
7171 | size: sizeof(struct dyn_ftrace), |
7172 | cmp: ftrace_cmp_recs); |
7173 | if (!rec) |
7174 | continue; |
7175 | |
7176 | /* rec will be cleared from hashes after ftrace_lock unlock */ |
7177 | add_to_clear_hash_list(clear_list: &clear_hash, rec); |
7178 | |
7179 | if (mod_map) |
7180 | save_ftrace_mod_rec(mod_map, rec); |
7181 | |
7182 | pg->index--; |
7183 | ftrace_update_tot_cnt--; |
7184 | if (!pg->index) { |
7185 | *last_pg = pg->next; |
7186 | if (pg->records) { |
7187 | free_pages(addr: (unsigned long)pg->records, order: pg->order); |
7188 | ftrace_number_of_pages -= 1 << pg->order; |
7189 | } |
7190 | ftrace_number_of_groups--; |
7191 | kfree(objp: pg); |
7192 | pg = container_of(last_pg, struct ftrace_page, next); |
7193 | if (!(*last_pg)) |
7194 | ftrace_pages = pg; |
7195 | continue; |
7196 | } |
7197 | memmove(rec, rec + 1, |
7198 | (pg->index - (rec - pg->records)) * sizeof(*rec)); |
7199 | /* More than one function may be in this block */ |
7200 | goto again; |
7201 | } |
7202 | mutex_unlock(lock: &ftrace_lock); |
7203 | |
7204 | list_for_each_entry_safe(func, func_next, &clear_hash, list) { |
7205 | clear_func_from_hashes(func); |
7206 | kfree(objp: func); |
7207 | } |
7208 | } |
7209 | |
7210 | void __init ftrace_free_init_mem(void) |
7211 | { |
7212 | void *start = (void *)(&__init_begin); |
7213 | void *end = (void *)(&__init_end); |
7214 | |
7215 | ftrace_boot_snapshot(); |
7216 | |
7217 | ftrace_free_mem(NULL, start_ptr: start, end_ptr: end); |
7218 | } |
7219 | |
7220 | int __init __weak ftrace_dyn_arch_init(void) |
7221 | { |
7222 | return 0; |
7223 | } |
7224 | |
7225 | void __init ftrace_init(void) |
7226 | { |
7227 | extern unsigned long __start_mcount_loc[]; |
7228 | extern unsigned long __stop_mcount_loc[]; |
7229 | unsigned long count, flags; |
7230 | int ret; |
7231 | |
7232 | local_irq_save(flags); |
7233 | ret = ftrace_dyn_arch_init(); |
7234 | local_irq_restore(flags); |
7235 | if (ret) |
7236 | goto failed; |
7237 | |
7238 | count = __stop_mcount_loc - __start_mcount_loc; |
7239 | if (!count) { |
7240 | pr_info("ftrace: No functions to be traced?\n"); |
7241 | goto failed; |
7242 | } |
7243 | |
7244 | pr_info("ftrace: allocating %ld entries in %ld pages\n", |
7245 | count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); |
7246 | |
7247 | ret = ftrace_process_locs(NULL, |
7248 | start: __start_mcount_loc, |
7249 | end: __stop_mcount_loc); |
7250 | if (ret) { |
7251 | pr_warn("ftrace: failed to allocate entries for functions\n"); |
7252 | goto failed; |
7253 | } |
7254 | |
7255 | pr_info("ftrace: allocated %ld pages with %ld groups\n", |
7256 | ftrace_number_of_pages, ftrace_number_of_groups); |
7257 | |
7258 | last_ftrace_enabled = ftrace_enabled = 1; |
7259 | |
7260 | set_ftrace_early_filters(); |
7261 | |
7262 | return; |
7263 | failed: |
7264 | ftrace_disabled = 1; |
7265 | } |
7266 | |
7267 | /* Do nothing if arch does not support this */ |
7268 | void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) |
7269 | { |
7270 | } |
7271 | |
7272 | static void ftrace_update_trampoline(struct ftrace_ops *ops) |
7273 | { |
7274 | unsigned long trampoline = ops->trampoline; |
7275 | |
7276 | arch_ftrace_update_trampoline(ops); |
7277 | if (ops->trampoline && ops->trampoline != trampoline && |
7278 | (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) { |
7279 | /* Add to kallsyms before the perf events */ |
7280 | ftrace_add_trampoline_to_kallsyms(ops); |
7281 | perf_event_ksymbol(ksym_type: PERF_RECORD_KSYMBOL_TYPE_OOL, |
7282 | addr: ops->trampoline, len: ops->trampoline_size, unregister: false, |
7283 | FTRACE_TRAMPOLINE_SYM); |
7284 | /* |
7285 | * Record the perf text poke event after the ksymbol register |
7286 | * event. |
7287 | */ |
7288 | perf_event_text_poke(addr: (void *)ops->trampoline, NULL, old_len: 0, |
7289 | new_bytes: (void *)ops->trampoline, |
7290 | new_len: ops->trampoline_size); |
7291 | } |
7292 | } |
7293 | |
7294 | void ftrace_init_trace_array(struct trace_array *tr) |
7295 | { |
7296 | INIT_LIST_HEAD(list: &tr->func_probes); |
7297 | INIT_LIST_HEAD(list: &tr->mod_trace); |
7298 | INIT_LIST_HEAD(list: &tr->mod_notrace); |
7299 | } |
7300 | #else |
7301 | |
7302 | struct ftrace_ops global_ops = { |
7303 | .func = ftrace_stub, |
7304 | .flags = FTRACE_OPS_FL_INITIALIZED | |
7305 | FTRACE_OPS_FL_PID, |
7306 | }; |
7307 | |
7308 | static int __init ftrace_nodyn_init(void) |
7309 | { |
7310 | ftrace_enabled = 1; |
7311 | return 0; |
7312 | } |
7313 | core_initcall(ftrace_nodyn_init); |
7314 | |
7315 | static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } |
7316 | static inline void ftrace_startup_all(int command) { } |
7317 | |
7318 | static void ftrace_update_trampoline(struct ftrace_ops *ops) |
7319 | { |
7320 | } |
7321 | |
7322 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
7323 | |
7324 | __init void ftrace_init_global_array_ops(struct trace_array *tr) |
7325 | { |
7326 | tr->ops = &global_ops; |
7327 | tr->ops->private = tr; |
7328 | ftrace_init_trace_array(tr); |
7329 | } |
7330 | |
7331 | void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) |
7332 | { |
7333 | /* If we filter on pids, update to use the pid function */ |
7334 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { |
7335 | if (WARN_ON(tr->ops->func != ftrace_stub)) |
7336 | printk("ftrace ops had %pS for function\n", |
7337 | tr->ops->func); |
7338 | } |
7339 | tr->ops->func = func; |
7340 | tr->ops->private = tr; |
7341 | } |
7342 | |
7343 | void ftrace_reset_array_ops(struct trace_array *tr) |
7344 | { |
7345 | tr->ops->func = ftrace_stub; |
7346 | } |
7347 | |
7348 | static nokprobe_inline void |
7349 | __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
7350 | struct ftrace_ops *ignored, struct ftrace_regs *fregs) |
7351 | { |
7352 | struct pt_regs *regs = ftrace_get_regs(fregs); |
7353 | struct ftrace_ops *op; |
7354 | int bit; |
7355 | |
7356 | /* |
7357 | * The ftrace_test_and_set_recursion() will disable preemption, |
7358 | * which is required since some of the ops may be dynamically |
7359 | * allocated, they must be freed after a synchronize_rcu(). |
7360 | */ |
7361 | bit = trace_test_and_set_recursion(ip, pip: parent_ip, TRACE_LIST_START); |
7362 | if (bit < 0) |
7363 | return; |
7364 | |
7365 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
7366 | /* Stub functions don't need to be called nor tested */ |
7367 | if (op->flags & FTRACE_OPS_FL_STUB) |
7368 | continue; |
7369 | /* |
7370 | * Check the following for each ops before calling their func: |
7371 | * if RCU flag is set, then rcu_is_watching() must be true |
7372 | * Otherwise test if the ip matches the ops filter |
7373 | * |
7374 | * If any of the above fails then the op->func() is not executed. |
7375 | */ |
7376 | if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && |
7377 | ftrace_ops_test(ops: op, ip, regs)) { |
7378 | if (FTRACE_WARN_ON(!op->func)) { |
7379 | pr_warn("op=%p %pS\n", op, op); |
7380 | goto out; |
7381 | } |
7382 | op->func(ip, parent_ip, op, fregs); |
7383 | } |
7384 | } while_for_each_ftrace_op(op); |
7385 | out: |
7386 | trace_clear_recursion(bit); |
7387 | } |
7388 | |
7389 | /* |
7390 | * Some archs only support passing ip and parent_ip. Even though |
7391 | * the list function ignores the op parameter, we do not want any |
7392 | * C side effects, where a function is called without the caller |
7393 | * sending a third parameter. |
7394 | * Archs are to support both the regs and ftrace_ops at the same time. |
7395 | * If they support ftrace_ops, it is assumed they support regs. |
7396 | * If call backs want to use regs, they must either check for regs |
7397 | * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. |
7398 | * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. |
7399 | * An architecture can pass partial regs with ftrace_ops and still |
7400 | * set the ARCH_SUPPORTS_FTRACE_OPS. |
7401 | * |
7402 | * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be |
7403 | * arch_ftrace_ops_list_func. |
7404 | */ |
7405 | #if ARCH_SUPPORTS_FTRACE_OPS |
7406 | void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
7407 | struct ftrace_ops *op, struct ftrace_regs *fregs) |
7408 | { |
7409 | __ftrace_ops_list_func(ip, parent_ip, NULL, fregs); |
7410 | } |
7411 | #else |
7412 | void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) |
7413 | { |
7414 | __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); |
7415 | } |
7416 | #endif |
7417 | NOKPROBE_SYMBOL(arch_ftrace_ops_list_func); |
7418 | |
7419 | /* |
7420 | * If there's only one function registered but it does not support |
7421 | * recursion, needs RCU protection, then this function will be called |
7422 | * by the mcount trampoline. |
7423 | */ |
7424 | static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, |
7425 | struct ftrace_ops *op, struct ftrace_regs *fregs) |
7426 | { |
7427 | int bit; |
7428 | |
7429 | bit = trace_test_and_set_recursion(ip, pip: parent_ip, TRACE_LIST_START); |
7430 | if (bit < 0) |
7431 | return; |
7432 | |
7433 | if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) |
7434 | op->func(ip, parent_ip, op, fregs); |
7435 | |
7436 | trace_clear_recursion(bit); |
7437 | } |
7438 | NOKPROBE_SYMBOL(ftrace_ops_assist_func); |
7439 | |
7440 | /** |
7441 | * ftrace_ops_get_func - get the function a trampoline should call |
7442 | * @ops: the ops to get the function for |
7443 | * |
7444 | * Normally the mcount trampoline will call the ops->func, but there |
7445 | * are times that it should not. For example, if the ops does not |
7446 | * have its own recursion protection, then it should call the |
7447 | * ftrace_ops_assist_func() instead. |
7448 | * |
7449 | * Returns: the function that the trampoline should call for @ops. |
7450 | */ |
7451 | ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) |
7452 | { |
7453 | /* |
7454 | * If the function does not handle recursion or needs to be RCU safe, |
7455 | * then we need to call the assist handler. |
7456 | */ |
7457 | if (ops->flags & (FTRACE_OPS_FL_RECURSION | |
7458 | FTRACE_OPS_FL_RCU)) |
7459 | return ftrace_ops_assist_func; |
7460 | |
7461 | return ops->func; |
7462 | } |
7463 | |
7464 | static void |
7465 | ftrace_filter_pid_sched_switch_probe(void *data, bool preempt, |
7466 | struct task_struct *prev, |
7467 | struct task_struct *next, |
7468 | unsigned int prev_state) |
7469 | { |
7470 | struct trace_array *tr = data; |
7471 | struct trace_pid_list *pid_list; |
7472 | struct trace_pid_list *no_pid_list; |
7473 | |
7474 | pid_list = rcu_dereference_sched(tr->function_pids); |
7475 | no_pid_list = rcu_dereference_sched(tr->function_no_pids); |
7476 | |
7477 | if (trace_ignore_this_task(filtered_pids: pid_list, filtered_no_pids: no_pid_list, task: next)) |
7478 | this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
7479 | FTRACE_PID_IGNORE); |
7480 | else |
7481 | this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
7482 | next->pid); |
7483 | } |
7484 | |
7485 | static void |
7486 | ftrace_pid_follow_sched_process_fork(void *data, |
7487 | struct task_struct *self, |
7488 | struct task_struct *task) |
7489 | { |
7490 | struct trace_pid_list *pid_list; |
7491 | struct trace_array *tr = data; |
7492 | |
7493 | pid_list = rcu_dereference_sched(tr->function_pids); |
7494 | trace_filter_add_remove_task(pid_list, self, task); |
7495 | |
7496 | pid_list = rcu_dereference_sched(tr->function_no_pids); |
7497 | trace_filter_add_remove_task(pid_list, self, task); |
7498 | } |
7499 | |
7500 | static void |
7501 | ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task) |
7502 | { |
7503 | struct trace_pid_list *pid_list; |
7504 | struct trace_array *tr = data; |
7505 | |
7506 | pid_list = rcu_dereference_sched(tr->function_pids); |
7507 | trace_filter_add_remove_task(pid_list, NULL, task); |
7508 | |
7509 | pid_list = rcu_dereference_sched(tr->function_no_pids); |
7510 | trace_filter_add_remove_task(pid_list, NULL, task); |
7511 | } |
7512 | |
7513 | void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) |
7514 | { |
7515 | if (enable) { |
7516 | register_trace_sched_process_fork(probe: ftrace_pid_follow_sched_process_fork, |
7517 | data: tr); |
7518 | register_trace_sched_process_free(probe: ftrace_pid_follow_sched_process_exit, |
7519 | data: tr); |
7520 | } else { |
7521 | unregister_trace_sched_process_fork(probe: ftrace_pid_follow_sched_process_fork, |
7522 | data: tr); |
7523 | unregister_trace_sched_process_free(probe: ftrace_pid_follow_sched_process_exit, |
7524 | data: tr); |
7525 | } |
7526 | } |
7527 | |
7528 | static void clear_ftrace_pids(struct trace_array *tr, int type) |
7529 | { |
7530 | struct trace_pid_list *pid_list; |
7531 | struct trace_pid_list *no_pid_list; |
7532 | int cpu; |
7533 | |
7534 | pid_list = rcu_dereference_protected(tr->function_pids, |
7535 | lockdep_is_held(&ftrace_lock)); |
7536 | no_pid_list = rcu_dereference_protected(tr->function_no_pids, |
7537 | lockdep_is_held(&ftrace_lock)); |
7538 | |
7539 | /* Make sure there's something to do */ |
7540 | if (!pid_type_enabled(type, pid_list, no_pid_list)) |
7541 | return; |
7542 | |
7543 | /* See if the pids still need to be checked after this */ |
7544 | if (!still_need_pid_events(type, pid_list, no_pid_list)) { |
7545 | unregister_trace_sched_switch(probe: ftrace_filter_pid_sched_switch_probe, data: tr); |
7546 | for_each_possible_cpu(cpu) |
7547 | per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE; |
7548 | } |
7549 | |
7550 | if (type & TRACE_PIDS) |
7551 | rcu_assign_pointer(tr->function_pids, NULL); |
7552 | |
7553 | if (type & TRACE_NO_PIDS) |
7554 | rcu_assign_pointer(tr->function_no_pids, NULL); |
7555 | |
7556 | /* Wait till all users are no longer using pid filtering */ |
7557 | synchronize_rcu(); |
7558 | |
7559 | if ((type & TRACE_PIDS) && pid_list) |
7560 | trace_pid_list_free(pid_list); |
7561 | |
7562 | if ((type & TRACE_NO_PIDS) && no_pid_list) |
7563 | trace_pid_list_free(pid_list: no_pid_list); |
7564 | } |
7565 | |
7566 | void ftrace_clear_pids(struct trace_array *tr) |
7567 | { |
7568 | mutex_lock(&ftrace_lock); |
7569 | |
7570 | clear_ftrace_pids(tr, type: TRACE_PIDS | TRACE_NO_PIDS); |
7571 | |
7572 | mutex_unlock(lock: &ftrace_lock); |
7573 | } |
7574 | |
7575 | static void ftrace_pid_reset(struct trace_array *tr, int type) |
7576 | { |
7577 | mutex_lock(&ftrace_lock); |
7578 | clear_ftrace_pids(tr, type); |
7579 | |
7580 | ftrace_update_pid_func(); |
7581 | ftrace_startup_all(command: 0); |
7582 | |
7583 | mutex_unlock(lock: &ftrace_lock); |
7584 | } |
7585 | |
7586 | /* Greater than any max PID */ |
7587 | #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1) |
7588 | |
7589 | static void *fpid_start(struct seq_file *m, loff_t *pos) |
7590 | __acquires(RCU) |
7591 | { |
7592 | struct trace_pid_list *pid_list; |
7593 | struct trace_array *tr = m->private; |
7594 | |
7595 | mutex_lock(&ftrace_lock); |
7596 | rcu_read_lock_sched(); |
7597 | |
7598 | pid_list = rcu_dereference_sched(tr->function_pids); |
7599 | |
7600 | if (!pid_list) |
7601 | return !(*pos) ? FTRACE_NO_PIDS : NULL; |
7602 | |
7603 | return trace_pid_start(pid_list, pos); |
7604 | } |
7605 | |
7606 | static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) |
7607 | { |
7608 | struct trace_array *tr = m->private; |
7609 | struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); |
7610 | |
7611 | if (v == FTRACE_NO_PIDS) { |
7612 | (*pos)++; |
7613 | return NULL; |
7614 | } |
7615 | return trace_pid_next(pid_list, v, pos); |
7616 | } |
7617 | |
7618 | static void fpid_stop(struct seq_file *m, void *p) |
7619 | __releases(RCU) |
7620 | { |
7621 | rcu_read_unlock_sched(); |
7622 | mutex_unlock(lock: &ftrace_lock); |
7623 | } |
7624 | |
7625 | static int fpid_show(struct seq_file *m, void *v) |
7626 | { |
7627 | if (v == FTRACE_NO_PIDS) { |
7628 | seq_puts(m, s: "no pid\n"); |
7629 | return 0; |
7630 | } |
7631 | |
7632 | return trace_pid_show(m, v); |
7633 | } |
7634 | |
7635 | static const struct seq_operations ftrace_pid_sops = { |
7636 | .start = fpid_start, |
7637 | .next = fpid_next, |
7638 | .stop = fpid_stop, |
7639 | .show = fpid_show, |
7640 | }; |
7641 | |
7642 | static void *fnpid_start(struct seq_file *m, loff_t *pos) |
7643 | __acquires(RCU) |
7644 | { |
7645 | struct trace_pid_list *pid_list; |
7646 | struct trace_array *tr = m->private; |
7647 | |
7648 | mutex_lock(&ftrace_lock); |
7649 | rcu_read_lock_sched(); |
7650 | |
7651 | pid_list = rcu_dereference_sched(tr->function_no_pids); |
7652 | |
7653 | if (!pid_list) |
7654 | return !(*pos) ? FTRACE_NO_PIDS : NULL; |
7655 | |
7656 | return trace_pid_start(pid_list, pos); |
7657 | } |
7658 | |
7659 | static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos) |
7660 | { |
7661 | struct trace_array *tr = m->private; |
7662 | struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids); |
7663 | |
7664 | if (v == FTRACE_NO_PIDS) { |
7665 | (*pos)++; |
7666 | return NULL; |
7667 | } |
7668 | return trace_pid_next(pid_list, v, pos); |
7669 | } |
7670 | |
7671 | static const struct seq_operations ftrace_no_pid_sops = { |
7672 | .start = fnpid_start, |
7673 | .next = fnpid_next, |
7674 | .stop = fpid_stop, |
7675 | .show = fpid_show, |
7676 | }; |
7677 | |
7678 | static int pid_open(struct inode *inode, struct file *file, int type) |
7679 | { |
7680 | const struct seq_operations *seq_ops; |
7681 | struct trace_array *tr = inode->i_private; |
7682 | struct seq_file *m; |
7683 | int ret = 0; |
7684 | |
7685 | ret = tracing_check_open_get_tr(tr); |
7686 | if (ret) |
7687 | return ret; |
7688 | |
7689 | if ((file->f_mode & FMODE_WRITE) && |
7690 | (file->f_flags & O_TRUNC)) |
7691 | ftrace_pid_reset(tr, type); |
7692 | |
7693 | switch (type) { |
7694 | case TRACE_PIDS: |
7695 | seq_ops = &ftrace_pid_sops; |
7696 | break; |
7697 | case TRACE_NO_PIDS: |
7698 | seq_ops = &ftrace_no_pid_sops; |
7699 | break; |
7700 | default: |
7701 | trace_array_put(tr); |
7702 | WARN_ON_ONCE(1); |
7703 | return -EINVAL; |
7704 | } |
7705 | |
7706 | ret = seq_open(file, seq_ops); |
7707 | if (ret < 0) { |
7708 | trace_array_put(tr); |
7709 | } else { |
7710 | m = file->private_data; |
7711 | /* copy tr over to seq ops */ |
7712 | m->private = tr; |
7713 | } |
7714 | |
7715 | return ret; |
7716 | } |
7717 | |
7718 | static int |
7719 | ftrace_pid_open(struct inode *inode, struct file *file) |
7720 | { |
7721 | return pid_open(inode, file, type: TRACE_PIDS); |
7722 | } |
7723 | |
7724 | static int |
7725 | ftrace_no_pid_open(struct inode *inode, struct file *file) |
7726 | { |
7727 | return pid_open(inode, file, type: TRACE_NO_PIDS); |
7728 | } |
7729 | |
7730 | static void ignore_task_cpu(void *data) |
7731 | { |
7732 | struct trace_array *tr = data; |
7733 | struct trace_pid_list *pid_list; |
7734 | struct trace_pid_list *no_pid_list; |
7735 | |
7736 | /* |
7737 | * This function is called by on_each_cpu() while the |
7738 | * event_mutex is held. |
7739 | */ |
7740 | pid_list = rcu_dereference_protected(tr->function_pids, |
7741 | mutex_is_locked(&ftrace_lock)); |
7742 | no_pid_list = rcu_dereference_protected(tr->function_no_pids, |
7743 | mutex_is_locked(&ftrace_lock)); |
7744 | |
7745 | if (trace_ignore_this_task(filtered_pids: pid_list, filtered_no_pids: no_pid_list, current)) |
7746 | this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
7747 | FTRACE_PID_IGNORE); |
7748 | else |
7749 | this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
7750 | current->pid); |
7751 | } |
7752 | |
7753 | static ssize_t |
7754 | pid_write(struct file *filp, const char __user *ubuf, |
7755 | size_t cnt, loff_t *ppos, int type) |
7756 | { |
7757 | struct seq_file *m = filp->private_data; |
7758 | struct trace_array *tr = m->private; |
7759 | struct trace_pid_list *filtered_pids; |
7760 | struct trace_pid_list *other_pids; |
7761 | struct trace_pid_list *pid_list; |
7762 | ssize_t ret; |
7763 | |
7764 | if (!cnt) |
7765 | return 0; |
7766 | |
7767 | mutex_lock(&ftrace_lock); |
7768 | |
7769 | switch (type) { |
7770 | case TRACE_PIDS: |
7771 | filtered_pids = rcu_dereference_protected(tr->function_pids, |
7772 | lockdep_is_held(&ftrace_lock)); |
7773 | other_pids = rcu_dereference_protected(tr->function_no_pids, |
7774 | lockdep_is_held(&ftrace_lock)); |
7775 | break; |
7776 | case TRACE_NO_PIDS: |
7777 | filtered_pids = rcu_dereference_protected(tr->function_no_pids, |
7778 | lockdep_is_held(&ftrace_lock)); |
7779 | other_pids = rcu_dereference_protected(tr->function_pids, |
7780 | lockdep_is_held(&ftrace_lock)); |
7781 | break; |
7782 | default: |
7783 | ret = -EINVAL; |
7784 | WARN_ON_ONCE(1); |
7785 | goto out; |
7786 | } |
7787 | |
7788 | ret = trace_pid_write(filtered_pids, new_pid_list: &pid_list, ubuf, cnt); |
7789 | if (ret < 0) |
7790 | goto out; |
7791 | |
7792 | switch (type) { |
7793 | case TRACE_PIDS: |
7794 | rcu_assign_pointer(tr->function_pids, pid_list); |
7795 | break; |
7796 | case TRACE_NO_PIDS: |
7797 | rcu_assign_pointer(tr->function_no_pids, pid_list); |
7798 | break; |
7799 | } |
7800 | |
7801 | |
7802 | if (filtered_pids) { |
7803 | synchronize_rcu(); |
7804 | trace_pid_list_free(pid_list: filtered_pids); |
7805 | } else if (pid_list && !other_pids) { |
7806 | /* Register a probe to set whether to ignore the tracing of a task */ |
7807 | register_trace_sched_switch(probe: ftrace_filter_pid_sched_switch_probe, data: tr); |
7808 | } |
7809 | |
7810 | /* |
7811 | * Ignoring of pids is done at task switch. But we have to |
7812 | * check for those tasks that are currently running. |
7813 | * Always do this in case a pid was appended or removed. |
7814 | */ |
7815 | on_each_cpu(func: ignore_task_cpu, info: tr, wait: 1); |
7816 | |
7817 | ftrace_update_pid_func(); |
7818 | ftrace_startup_all(command: 0); |
7819 | out: |
7820 | mutex_unlock(lock: &ftrace_lock); |
7821 | |
7822 | if (ret > 0) |
7823 | *ppos += ret; |
7824 | |
7825 | return ret; |
7826 | } |
7827 | |
7828 | static ssize_t |
7829 | ftrace_pid_write(struct file *filp, const char __user *ubuf, |
7830 | size_t cnt, loff_t *ppos) |
7831 | { |
7832 | return pid_write(filp, ubuf, cnt, ppos, type: TRACE_PIDS); |
7833 | } |
7834 | |
7835 | static ssize_t |
7836 | ftrace_no_pid_write(struct file *filp, const char __user *ubuf, |
7837 | size_t cnt, loff_t *ppos) |
7838 | { |
7839 | return pid_write(filp, ubuf, cnt, ppos, type: TRACE_NO_PIDS); |
7840 | } |
7841 | |
7842 | static int |
7843 | ftrace_pid_release(struct inode *inode, struct file *file) |
7844 | { |
7845 | struct trace_array *tr = inode->i_private; |
7846 | |
7847 | trace_array_put(tr); |
7848 | |
7849 | return seq_release(inode, file); |
7850 | } |
7851 | |
7852 | static const struct file_operations ftrace_pid_fops = { |
7853 | .open = ftrace_pid_open, |
7854 | .write = ftrace_pid_write, |
7855 | .read = seq_read, |
7856 | .llseek = tracing_lseek, |
7857 | .release = ftrace_pid_release, |
7858 | }; |
7859 | |
7860 | static const struct file_operations ftrace_no_pid_fops = { |
7861 | .open = ftrace_no_pid_open, |
7862 | .write = ftrace_no_pid_write, |
7863 | .read = seq_read, |
7864 | .llseek = tracing_lseek, |
7865 | .release = ftrace_pid_release, |
7866 | }; |
7867 | |
7868 | void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer) |
7869 | { |
7870 | trace_create_file(name: "set_ftrace_pid", TRACE_MODE_WRITE, parent: d_tracer, |
7871 | data: tr, fops: &ftrace_pid_fops); |
7872 | trace_create_file(name: "set_ftrace_notrace_pid", TRACE_MODE_WRITE, |
7873 | parent: d_tracer, data: tr, fops: &ftrace_no_pid_fops); |
7874 | } |
7875 | |
7876 | void __init ftrace_init_tracefs_toplevel(struct trace_array *tr, |
7877 | struct dentry *d_tracer) |
7878 | { |
7879 | /* Only the top level directory has the dyn_tracefs and profile */ |
7880 | WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); |
7881 | |
7882 | ftrace_init_dyn_tracefs(d_tracer); |
7883 | ftrace_profile_tracefs(d_tracer); |
7884 | } |
7885 | |
7886 | /** |
7887 | * ftrace_kill - kill ftrace |
7888 | * |
7889 | * This function should be used by panic code. It stops ftrace |
7890 | * but in a not so nice way. If you need to simply kill ftrace |
7891 | * from a non-atomic section, use ftrace_kill. |
7892 | */ |
7893 | void ftrace_kill(void) |
7894 | { |
7895 | ftrace_disabled = 1; |
7896 | ftrace_enabled = 0; |
7897 | ftrace_trace_function = ftrace_stub; |
7898 | } |
7899 | |
7900 | /** |
7901 | * ftrace_is_dead - Test if ftrace is dead or not. |
7902 | * |
7903 | * Returns: 1 if ftrace is "dead", zero otherwise. |
7904 | */ |
7905 | int ftrace_is_dead(void) |
7906 | { |
7907 | return ftrace_disabled; |
7908 | } |
7909 | |
7910 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
7911 | /* |
7912 | * When registering ftrace_ops with IPMODIFY, it is necessary to make sure |
7913 | * it doesn't conflict with any direct ftrace_ops. If there is existing |
7914 | * direct ftrace_ops on a kernel function being patched, call |
7915 | * FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER on it to enable sharing. |
7916 | * |
7917 | * @ops: ftrace_ops being registered. |
7918 | * |
7919 | * Returns: |
7920 | * 0 on success; |
7921 | * Negative on failure. |
7922 | */ |
7923 | static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops) |
7924 | { |
7925 | struct ftrace_func_entry *entry; |
7926 | struct ftrace_hash *hash; |
7927 | struct ftrace_ops *op; |
7928 | int size, i, ret; |
7929 | |
7930 | lockdep_assert_held_once(&direct_mutex); |
7931 | |
7932 | if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) |
7933 | return 0; |
7934 | |
7935 | hash = ops->func_hash->filter_hash; |
7936 | size = 1 << hash->size_bits; |
7937 | for (i = 0; i < size; i++) { |
7938 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
7939 | unsigned long ip = entry->ip; |
7940 | bool found_op = false; |
7941 | |
7942 | mutex_lock(&ftrace_lock); |
7943 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
7944 | if (!(op->flags & FTRACE_OPS_FL_DIRECT)) |
7945 | continue; |
7946 | if (ops_references_ip(ops: op, ip)) { |
7947 | found_op = true; |
7948 | break; |
7949 | } |
7950 | } while_for_each_ftrace_op(op); |
7951 | mutex_unlock(lock: &ftrace_lock); |
7952 | |
7953 | if (found_op) { |
7954 | if (!op->ops_func) |
7955 | return -EBUSY; |
7956 | |
7957 | ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER); |
7958 | if (ret) |
7959 | return ret; |
7960 | } |
7961 | } |
7962 | } |
7963 | |
7964 | return 0; |
7965 | } |
7966 | |
7967 | /* |
7968 | * Similar to prepare_direct_functions_for_ipmodify, clean up after ops |
7969 | * with IPMODIFY is unregistered. The cleanup is optional for most DIRECT |
7970 | * ops. |
7971 | */ |
7972 | static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops) |
7973 | { |
7974 | struct ftrace_func_entry *entry; |
7975 | struct ftrace_hash *hash; |
7976 | struct ftrace_ops *op; |
7977 | int size, i; |
7978 | |
7979 | if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) |
7980 | return; |
7981 | |
7982 | mutex_lock(&direct_mutex); |
7983 | |
7984 | hash = ops->func_hash->filter_hash; |
7985 | size = 1 << hash->size_bits; |
7986 | for (i = 0; i < size; i++) { |
7987 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
7988 | unsigned long ip = entry->ip; |
7989 | bool found_op = false; |
7990 | |
7991 | mutex_lock(&ftrace_lock); |
7992 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
7993 | if (!(op->flags & FTRACE_OPS_FL_DIRECT)) |
7994 | continue; |
7995 | if (ops_references_ip(ops: op, ip)) { |
7996 | found_op = true; |
7997 | break; |
7998 | } |
7999 | } while_for_each_ftrace_op(op); |
8000 | mutex_unlock(lock: &ftrace_lock); |
8001 | |
8002 | /* The cleanup is optional, ignore any errors */ |
8003 | if (found_op && op->ops_func) |
8004 | op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER); |
8005 | } |
8006 | } |
8007 | mutex_unlock(lock: &direct_mutex); |
8008 | } |
8009 | |
8010 | #define lock_direct_mutex() mutex_lock(&direct_mutex) |
8011 | #define unlock_direct_mutex() mutex_unlock(&direct_mutex) |
8012 | |
8013 | #else /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
8014 | |
8015 | static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops) |
8016 | { |
8017 | return 0; |
8018 | } |
8019 | |
8020 | static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops) |
8021 | { |
8022 | } |
8023 | |
8024 | #define lock_direct_mutex() do { } while (0) |
8025 | #define unlock_direct_mutex() do { } while (0) |
8026 | |
8027 | #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
8028 | |
8029 | /* |
8030 | * Similar to register_ftrace_function, except we don't lock direct_mutex. |
8031 | */ |
8032 | static int register_ftrace_function_nolock(struct ftrace_ops *ops) |
8033 | { |
8034 | int ret; |
8035 | |
8036 | ftrace_ops_init(ops); |
8037 | |
8038 | mutex_lock(&ftrace_lock); |
8039 | |
8040 | ret = ftrace_startup(ops, command: 0); |
8041 | |
8042 | mutex_unlock(lock: &ftrace_lock); |
8043 | |
8044 | return ret; |
8045 | } |
8046 | |
8047 | /** |
8048 | * register_ftrace_function - register a function for profiling |
8049 | * @ops: ops structure that holds the function for profiling. |
8050 | * |
8051 | * Register a function to be called by all functions in the |
8052 | * kernel. |
8053 | * |
8054 | * Note: @ops->func and all the functions it calls must be labeled |
8055 | * with "notrace", otherwise it will go into a |
8056 | * recursive loop. |
8057 | */ |
8058 | int register_ftrace_function(struct ftrace_ops *ops) |
8059 | { |
8060 | int ret; |
8061 | |
8062 | lock_direct_mutex(); |
8063 | ret = prepare_direct_functions_for_ipmodify(ops); |
8064 | if (ret < 0) |
8065 | goto out_unlock; |
8066 | |
8067 | ret = register_ftrace_function_nolock(ops); |
8068 | |
8069 | out_unlock: |
8070 | unlock_direct_mutex(); |
8071 | return ret; |
8072 | } |
8073 | EXPORT_SYMBOL_GPL(register_ftrace_function); |
8074 | |
8075 | /** |
8076 | * unregister_ftrace_function - unregister a function for profiling. |
8077 | * @ops: ops structure that holds the function to unregister |
8078 | * |
8079 | * Unregister a function that was added to be called by ftrace profiling. |
8080 | */ |
8081 | int unregister_ftrace_function(struct ftrace_ops *ops) |
8082 | { |
8083 | int ret; |
8084 | |
8085 | mutex_lock(&ftrace_lock); |
8086 | ret = ftrace_shutdown(ops, command: 0); |
8087 | mutex_unlock(lock: &ftrace_lock); |
8088 | |
8089 | cleanup_direct_functions_after_ipmodify(ops); |
8090 | return ret; |
8091 | } |
8092 | EXPORT_SYMBOL_GPL(unregister_ftrace_function); |
8093 | |
8094 | static int symbols_cmp(const void *a, const void *b) |
8095 | { |
8096 | const char **str_a = (const char **) a; |
8097 | const char **str_b = (const char **) b; |
8098 | |
8099 | return strcmp(*str_a, *str_b); |
8100 | } |
8101 | |
8102 | struct kallsyms_data { |
8103 | unsigned long *addrs; |
8104 | const char **syms; |
8105 | size_t cnt; |
8106 | size_t found; |
8107 | }; |
8108 | |
8109 | /* This function gets called for all kernel and module symbols |
8110 | * and returns 1 in case we resolved all the requested symbols, |
8111 | * 0 otherwise. |
8112 | */ |
8113 | static int kallsyms_callback(void *data, const char *name, unsigned long addr) |
8114 | { |
8115 | struct kallsyms_data *args = data; |
8116 | const char **sym; |
8117 | int idx; |
8118 | |
8119 | sym = bsearch(key: &name, base: args->syms, num: args->cnt, size: sizeof(*args->syms), cmp: symbols_cmp); |
8120 | if (!sym) |
8121 | return 0; |
8122 | |
8123 | idx = sym - args->syms; |
8124 | if (args->addrs[idx]) |
8125 | return 0; |
8126 | |
8127 | if (!ftrace_location(ip: addr)) |
8128 | return 0; |
8129 | |
8130 | args->addrs[idx] = addr; |
8131 | args->found++; |
8132 | return args->found == args->cnt ? 1 : 0; |
8133 | } |
8134 | |
8135 | /** |
8136 | * ftrace_lookup_symbols - Lookup addresses for array of symbols |
8137 | * |
8138 | * @sorted_syms: array of symbols pointers symbols to resolve, |
8139 | * must be alphabetically sorted |
8140 | * @cnt: number of symbols/addresses in @syms/@addrs arrays |
8141 | * @addrs: array for storing resulting addresses |
8142 | * |
8143 | * This function looks up addresses for array of symbols provided in |
8144 | * @syms array (must be alphabetically sorted) and stores them in |
8145 | * @addrs array, which needs to be big enough to store at least @cnt |
8146 | * addresses. |
8147 | * |
8148 | * Returns: 0 if all provided symbols are found, -ESRCH otherwise. |
8149 | */ |
8150 | int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs) |
8151 | { |
8152 | struct kallsyms_data args; |
8153 | int found_all; |
8154 | |
8155 | memset(addrs, 0, sizeof(*addrs) * cnt); |
8156 | args.addrs = addrs; |
8157 | args.syms = sorted_syms; |
8158 | args.cnt = cnt; |
8159 | args.found = 0; |
8160 | |
8161 | found_all = kallsyms_on_each_symbol(fn: kallsyms_callback, data: &args); |
8162 | if (found_all) |
8163 | return 0; |
8164 | found_all = module_kallsyms_on_each_symbol(NULL, fn: kallsyms_callback, data: &args); |
8165 | return found_all ? 0 : -ESRCH; |
8166 | } |
8167 | |
8168 | #ifdef CONFIG_SYSCTL |
8169 | |
8170 | #ifdef CONFIG_DYNAMIC_FTRACE |
8171 | static void ftrace_startup_sysctl(void) |
8172 | { |
8173 | int command; |
8174 | |
8175 | if (unlikely(ftrace_disabled)) |
8176 | return; |
8177 | |
8178 | /* Force update next time */ |
8179 | saved_ftrace_func = NULL; |
8180 | /* ftrace_start_up is true if we want ftrace running */ |
8181 | if (ftrace_start_up) { |
8182 | command = FTRACE_UPDATE_CALLS; |
8183 | if (ftrace_graph_active) |
8184 | command |= FTRACE_START_FUNC_RET; |
8185 | ftrace_startup_enable(command); |
8186 | } |
8187 | } |
8188 | |
8189 | static void ftrace_shutdown_sysctl(void) |
8190 | { |
8191 | int command; |
8192 | |
8193 | if (unlikely(ftrace_disabled)) |
8194 | return; |
8195 | |
8196 | /* ftrace_start_up is true if ftrace is running */ |
8197 | if (ftrace_start_up) { |
8198 | command = FTRACE_DISABLE_CALLS; |
8199 | if (ftrace_graph_active) |
8200 | command |= FTRACE_STOP_FUNC_RET; |
8201 | ftrace_run_update_code(command); |
8202 | } |
8203 | } |
8204 | #else |
8205 | # define ftrace_startup_sysctl() do { } while (0) |
8206 | # define ftrace_shutdown_sysctl() do { } while (0) |
8207 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
8208 | |
8209 | static bool is_permanent_ops_registered(void) |
8210 | { |
8211 | struct ftrace_ops *op; |
8212 | |
8213 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
8214 | if (op->flags & FTRACE_OPS_FL_PERMANENT) |
8215 | return true; |
8216 | } while_for_each_ftrace_op(op); |
8217 | |
8218 | return false; |
8219 | } |
8220 | |
8221 | static int |
8222 | ftrace_enable_sysctl(struct ctl_table *table, int write, |
8223 | void *buffer, size_t *lenp, loff_t *ppos) |
8224 | { |
8225 | int ret = -ENODEV; |
8226 | |
8227 | mutex_lock(&ftrace_lock); |
8228 | |
8229 | if (unlikely(ftrace_disabled)) |
8230 | goto out; |
8231 | |
8232 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
8233 | |
8234 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) |
8235 | goto out; |
8236 | |
8237 | if (ftrace_enabled) { |
8238 | |
8239 | /* we are starting ftrace again */ |
8240 | if (rcu_dereference_protected(ftrace_ops_list, |
8241 | lockdep_is_held(&ftrace_lock)) != &ftrace_list_end) |
8242 | update_ftrace_function(); |
8243 | |
8244 | ftrace_startup_sysctl(); |
8245 | |
8246 | } else { |
8247 | if (is_permanent_ops_registered()) { |
8248 | ftrace_enabled = true; |
8249 | ret = -EBUSY; |
8250 | goto out; |
8251 | } |
8252 | |
8253 | /* stopping ftrace calls (just send to ftrace_stub) */ |
8254 | ftrace_trace_function = ftrace_stub; |
8255 | |
8256 | ftrace_shutdown_sysctl(); |
8257 | } |
8258 | |
8259 | last_ftrace_enabled = !!ftrace_enabled; |
8260 | out: |
8261 | mutex_unlock(lock: &ftrace_lock); |
8262 | return ret; |
8263 | } |
8264 | |
8265 | static struct ctl_table ftrace_sysctls[] = { |
8266 | { |
8267 | .procname = "ftrace_enabled", |
8268 | .data = &ftrace_enabled, |
8269 | .maxlen = sizeof(int), |
8270 | .mode = 0644, |
8271 | .proc_handler = ftrace_enable_sysctl, |
8272 | }, |
8273 | {} |
8274 | }; |
8275 | |
8276 | static int __init ftrace_sysctl_init(void) |
8277 | { |
8278 | register_sysctl_init("kernel", ftrace_sysctls); |
8279 | return 0; |
8280 | } |
8281 | late_initcall(ftrace_sysctl_init); |
8282 | #endif |
8283 |
Definitions
- ftrace_list_end
- ftrace_enabled
- last_ftrace_enabled
- function_trace_op
- set_function_trace_op
- ftrace_pids_enabled
- ftrace_disabled
- ftrace_lock
- ftrace_ops_list
- ftrace_trace_function
- global_ops
- ftrace_ops_init
- ftrace_pid_func
- ftrace_sync_ipi
- ftrace_ops_get_list_func
- update_ftrace_function
- add_ftrace_ops
- remove_ftrace_ops
- __register_ftrace_function
- __unregister_ftrace_function
- ftrace_update_pid_func
- ftrace_profile
- ftrace_profile_page
- ftrace_profile_stat
- ftrace_profile_enabled
- ftrace_profile_lock
- ftrace_profile_stats
- function_stat_next
- function_stat_start
- function_stat_cmp
- function_stat_headers
- function_stat_show
- ftrace_profile_reset
- ftrace_profile_pages_init
- ftrace_profile_init_cpu
- ftrace_profile_init
- ftrace_find_profiled_func
- ftrace_add_profile
- ftrace_profile_alloc
- function_profile_call
- fgraph_graph_time
- ftrace_graph_graph_time_control
- profile_graph_entry
- profile_graph_return
- fprofiler_ops
- register_ftrace_profiler
- unregister_ftrace_profiler
- ftrace_profile_write
- ftrace_profile_read
- ftrace_profile_fops
- function_stats
- ftrace_profile_tracefs
- removed_ops
- update_all_ops
- ftrace_func_probe
- empty_buckets
- empty_hash
- global_ops
- ftrace_ops_trampoline
- is_ftrace_trampoline
- ftrace_page
- ftrace_pages_start
- ftrace_pages
- ftrace_hash_key
- __ftrace_lookup_ip
- ftrace_lookup_ip
- __add_hash_entry
- add_hash_entry
- free_hash_entry
- remove_hash_entry
- ftrace_hash_clear
- free_ftrace_mod
- clear_ftrace_mod_list
- free_ftrace_hash
- __free_ftrace_hash_rcu
- free_ftrace_hash_rcu
- ftrace_free_filter
- alloc_ftrace_hash
- ftrace_add_mod
- alloc_and_copy_ftrace_hash
- dup_hash
- __ftrace_hash_move
- ftrace_hash_move
- hash_contains_ip
- ftrace_ops_test
- ftrace_cmp_recs
- lookup_rec
- ftrace_location_range
- ftrace_location
- ftrace_text_reserved
- test_rec_ops_needs_regs
- skip_record
- __ftrace_hash_rec_update
- ftrace_hash_rec_disable
- ftrace_hash_rec_enable
- ftrace_hash_rec_update_modify
- ftrace_hash_rec_disable_modify
- ftrace_hash_rec_enable_modify
- __ftrace_hash_update_ipmodify
- ftrace_hash_ipmodify_enable
- ftrace_hash_ipmodify_disable
- ftrace_hash_ipmodify_update
- print_ip_ins
- ftrace_bug_type
- ftrace_expected
- print_bug_type
- ftrace_bug
- ftrace_check_record
- ftrace_update_record
- ftrace_test_record
- ftrace_find_tramp_ops_any
- ftrace_find_tramp_ops_any_other
- ftrace_find_tramp_ops_next
- ftrace_find_tramp_ops_curr
- ftrace_find_tramp_ops_new
- ftrace_find_unique_ops
- direct_functions
- direct_mutex
- ftrace_direct_func_count
- ftrace_find_rec_direct
- call_direct_funcs
- ftrace_get_addr_new
- ftrace_get_addr_curr
- __ftrace_replace_code
- ftrace_replace_code
- ftrace_rec_iter
- ftrace_rec_iter_start
- ftrace_rec_iter_next
- ftrace_rec_iter_record
- ftrace_nop_initialize
- ftrace_arch_code_modify_prepare
- ftrace_arch_code_modify_post_process
- update_ftrace_func
- ftrace_modify_all_code
- __ftrace_modify_code
- ftrace_run_stop_machine
- arch_ftrace_update_code
- ftrace_run_update_code
- ftrace_run_modify_code
- saved_ftrace_func
- ftrace_start_up
- arch_ftrace_trampoline_free
- ftrace_ops_trampoline_list
- ftrace_add_trampoline_to_kallsyms
- ftrace_remove_trampoline_from_kallsyms
- ftrace_trampoline_free
- ftrace_startup_enable
- ftrace_startup_all
- ftrace_startup
- ftrace_shutdown
- ftrace_update_time
- ftrace_update_tot_cnt
- ftrace_number_of_pages
- ftrace_number_of_groups
- ops_traces_mod
- ftrace_update_code
- ftrace_allocate_records
- ftrace_free_pages
- ftrace_allocate_pages
- ftrace_iterator
- t_probe_next
- t_probe_start
- t_probe_show
- t_mod_next
- t_mod_start
- t_mod_show
- t_func_next
- t_next
- reset_iter_read
- t_start
- t_stop
- arch_ftrace_trampoline_func
- add_trampoline_func
- test_for_valid_rec
- ftrace_check_wq
- ftrace_check_work
- ftrace_check_work_func
- ftrace_check_for_weak_functions
- ftrace_check_sync
- print_rec
- t_show
- show_ftrace_seq_ops
- ftrace_avail_open
- ftrace_enabled_open
- ftrace_touched_open
- ftrace_avail_addrs_open
- ftrace_regex_open
- ftrace_filter_open
- ftrace_notrace_open
- ftrace_glob
- arch_ftrace_match_adjust
- ftrace_match
- enter_record
- add_rec_by_index
- lookup_ip
- ftrace_match_record
- match_records
- ftrace_match_records
- ftrace_ops_update_code
- ftrace_hash_move_and_update_ops
- module_exists
- cache_mod
- process_mod_list
- process_cached_mods
- ftrace_mod_callback
- ftrace_mod_cmd
- ftrace_mod_cmd_init
- function_trace_probe_call
- ftrace_func_map
- ftrace_func_mapper
- allocate_ftrace_func_mapper
- ftrace_func_mapper_find_ip
- ftrace_func_mapper_add_ip
- ftrace_func_mapper_remove_ip
- free_ftrace_func_mapper
- release_probe
- acquire_probe_locked
- register_ftrace_function_probe
- unregister_ftrace_function_probe_func
- clear_ftrace_function_probes
- ftrace_commands
- ftrace_cmd_mutex
- register_ftrace_command
- unregister_ftrace_command
- ftrace_process_regex
- ftrace_regex_write
- ftrace_filter_write
- ftrace_notrace_write
- __ftrace_match_addr
- ftrace_match_addr
- ftrace_set_hash
- ftrace_set_addr
- ftrace_direct_func
- ftrace_direct_funcs
- check_direct_multi
- remove_direct_functions_hash
- register_ftrace_direct
- unregister_ftrace_direct
- __modify_ftrace_direct
- modify_ftrace_direct_nolock
- modify_ftrace_direct
- ftrace_set_filter_ip
- ftrace_set_filter_ips
- ftrace_ops_set_global_filter
- ftrace_set_regex
- ftrace_set_filter
- ftrace_set_notrace
- ftrace_set_global_filter
- ftrace_set_global_notrace
- ftrace_notrace_buf
- ftrace_filter_buf
- ftrace_filter_param
- set_ftrace_notrace
- set_ftrace_filter
- ftrace_graph_buf
- ftrace_graph_notrace_buf
- set_graph_function
- set_graph_notrace_function
- set_graph_max_depth_function
- set_ftrace_early_graph
- ftrace_set_early_filter
- set_ftrace_early_filters
- ftrace_regex_release
- ftrace_avail_fops
- ftrace_enabled_fops
- ftrace_touched_fops
- ftrace_avail_addrs_fops
- ftrace_filter_fops
- ftrace_notrace_fops
- graph_lock
- ftrace_graph_hash
- ftrace_graph_notrace_hash
- graph_filter_type
- ftrace_graph_data
- __g_next
- g_next
- g_start
- g_stop
- g_show
- ftrace_graph_seq_ops
- __ftrace_graph_open
- ftrace_graph_open
- ftrace_graph_notrace_open
- ftrace_graph_release
- ftrace_graph_set_hash
- ftrace_graph_write
- ftrace_graph_fops
- ftrace_graph_notrace_fops
- ftrace_create_filter_files
- ftrace_destroy_filter_files
- ftrace_init_dyn_tracefs
- ftrace_cmp_ips
- test_is_sorted
- ftrace_process_locs
- ftrace_mod_func
- ftrace_mod_map
- ftrace_get_trampoline_kallsym
- ops_references_ip
- ftrace_mod_maps
- referenced_filters
- clear_mod_from_hash
- clear_mod_from_hashes
- ftrace_free_mod_map
- ftrace_release_mod
- ftrace_module_enable
- ftrace_module_init
- save_ftrace_mod_rec
- allocate_ftrace_mod_map
- ftrace_func_address_lookup
- ftrace_mod_address_lookup
- ftrace_mod_get_kallsym
- ftrace_init_func
- clear_func_from_hash
- clear_func_from_hashes
- add_to_clear_hash_list
- ftrace_free_mem
- ftrace_free_init_mem
- ftrace_dyn_arch_init
- ftrace_init
- arch_ftrace_update_trampoline
- ftrace_update_trampoline
- ftrace_init_trace_array
- ftrace_init_global_array_ops
- ftrace_init_array_ops
- ftrace_reset_array_ops
- __ftrace_ops_list_func
- arch_ftrace_ops_list_func
- ftrace_ops_assist_func
- ftrace_ops_get_func
- ftrace_filter_pid_sched_switch_probe
- ftrace_pid_follow_sched_process_fork
- ftrace_pid_follow_sched_process_exit
- ftrace_pid_follow_fork
- clear_ftrace_pids
- ftrace_clear_pids
- ftrace_pid_reset
- fpid_start
- fpid_next
- fpid_stop
- fpid_show
- ftrace_pid_sops
- fnpid_start
- fnpid_next
- ftrace_no_pid_sops
- pid_open
- ftrace_pid_open
- ftrace_no_pid_open
- ignore_task_cpu
- pid_write
- ftrace_pid_write
- ftrace_no_pid_write
- ftrace_pid_release
- ftrace_pid_fops
- ftrace_no_pid_fops
- ftrace_init_tracefs
- ftrace_init_tracefs_toplevel
- ftrace_kill
- ftrace_is_dead
- prepare_direct_functions_for_ipmodify
- cleanup_direct_functions_after_ipmodify
- register_ftrace_function_nolock
- register_ftrace_function
- unregister_ftrace_function
- symbols_cmp
- kallsyms_data
- kallsyms_callback
- ftrace_lookup_symbols
- ftrace_startup_sysctl
- ftrace_shutdown_sysctl
- is_permanent_ops_registered
- ftrace_enable_sysctl
- ftrace_sysctls
Improve your Profiling and Debugging skills
Find out more