1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * Ftrace header. For implementation details beyond the random comments |
4 | * scattered below, see: Documentation/trace/ftrace-design.rst |
5 | */ |
6 | |
7 | #ifndef _LINUX_FTRACE_H |
8 | #define _LINUX_FTRACE_H |
9 | |
10 | #include <linux/trace_recursion.h> |
11 | #include <linux/trace_clock.h> |
12 | #include <linux/jump_label.h> |
13 | #include <linux/kallsyms.h> |
14 | #include <linux/linkage.h> |
15 | #include <linux/bitops.h> |
16 | #include <linux/ptrace.h> |
17 | #include <linux/ktime.h> |
18 | #include <linux/sched.h> |
19 | #include <linux/types.h> |
20 | #include <linux/init.h> |
21 | #include <linux/fs.h> |
22 | |
23 | #include <asm/ftrace.h> |
24 | |
25 | /* |
26 | * If the arch supports passing the variable contents of |
27 | * function_trace_op as the third parameter back from the |
28 | * mcount call, then the arch should define this as 1. |
29 | */ |
30 | #ifndef ARCH_SUPPORTS_FTRACE_OPS |
31 | #define ARCH_SUPPORTS_FTRACE_OPS 0 |
32 | #endif |
33 | |
34 | #ifdef CONFIG_TRACING |
35 | extern void ftrace_boot_snapshot(void); |
36 | #else |
37 | static inline void ftrace_boot_snapshot(void) { } |
38 | #endif |
39 | |
40 | struct ftrace_ops; |
41 | struct ftrace_regs; |
42 | struct dyn_ftrace; |
43 | |
44 | char *arch_ftrace_match_adjust(char *str, const char *search); |
45 | |
46 | #ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL |
47 | struct fgraph_ret_regs; |
48 | unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs); |
49 | #else |
50 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer); |
51 | #endif |
52 | |
53 | #ifdef CONFIG_FUNCTION_TRACER |
54 | /* |
55 | * If the arch's mcount caller does not support all of ftrace's |
56 | * features, then it must call an indirect function that |
57 | * does. Or at least does enough to prevent any unwelcome side effects. |
58 | * |
59 | * Also define the function prototype that these architectures use |
60 | * to call the ftrace_ops_list_func(). |
61 | */ |
62 | #if !ARCH_SUPPORTS_FTRACE_OPS |
63 | # define FTRACE_FORCE_LIST_FUNC 1 |
64 | void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); |
65 | #else |
66 | # define FTRACE_FORCE_LIST_FUNC 0 |
67 | void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
68 | struct ftrace_ops *op, struct ftrace_regs *fregs); |
69 | #endif |
70 | extern const struct ftrace_ops ftrace_nop_ops; |
71 | extern const struct ftrace_ops ftrace_list_ops; |
72 | struct ftrace_ops *ftrace_find_unique_ops(struct dyn_ftrace *rec); |
73 | #endif /* CONFIG_FUNCTION_TRACER */ |
74 | |
75 | /* Main tracing buffer and events set up */ |
76 | #ifdef CONFIG_TRACING |
77 | void trace_init(void); |
78 | void early_trace_init(void); |
79 | #else |
80 | static inline void trace_init(void) { } |
81 | static inline void early_trace_init(void) { } |
82 | #endif |
83 | |
84 | struct module; |
85 | struct ftrace_hash; |
86 | struct ftrace_direct_func; |
87 | |
88 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \ |
89 | defined(CONFIG_DYNAMIC_FTRACE) |
90 | const char * |
91 | ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, |
92 | unsigned long *off, char **modname, char *sym); |
93 | #else |
94 | static inline const char * |
95 | ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, |
96 | unsigned long *off, char **modname, char *sym) |
97 | { |
98 | return NULL; |
99 | } |
100 | #endif |
101 | |
102 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) |
103 | int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, |
104 | char *type, char *name, |
105 | char *module_name, int *exported); |
106 | #else |
107 | static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, |
108 | char *type, char *name, |
109 | char *module_name, int *exported) |
110 | { |
111 | return -1; |
112 | } |
113 | #endif |
114 | |
115 | #ifdef CONFIG_FUNCTION_TRACER |
116 | |
117 | extern int ftrace_enabled; |
118 | |
119 | #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS |
120 | |
121 | struct ftrace_regs { |
122 | struct pt_regs regs; |
123 | }; |
124 | #define arch_ftrace_get_regs(fregs) (&(fregs)->regs) |
125 | |
126 | /* |
127 | * ftrace_regs_set_instruction_pointer() is to be defined by the architecture |
128 | * if to allow setting of the instruction pointer from the ftrace_regs when |
129 | * HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports live kernel patching. |
130 | */ |
131 | #define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0) |
132 | #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ |
133 | |
134 | static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs) |
135 | { |
136 | if (!fregs) |
137 | return NULL; |
138 | |
139 | return arch_ftrace_get_regs(fregs); |
140 | } |
141 | |
142 | /* |
143 | * When true, the ftrace_regs_{get,set}_*() functions may be used on fregs. |
144 | * Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs. |
145 | */ |
146 | static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs) |
147 | { |
148 | if (IS_ENABLED(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)) |
149 | return true; |
150 | |
151 | return ftrace_get_regs(fregs) != NULL; |
152 | } |
153 | |
154 | #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS |
155 | #define ftrace_regs_get_instruction_pointer(fregs) \ |
156 | instruction_pointer(ftrace_get_regs(fregs)) |
157 | #define ftrace_regs_get_argument(fregs, n) \ |
158 | regs_get_kernel_argument(ftrace_get_regs(fregs), n) |
159 | #define ftrace_regs_get_stack_pointer(fregs) \ |
160 | kernel_stack_pointer(ftrace_get_regs(fregs)) |
161 | #define ftrace_regs_return_value(fregs) \ |
162 | regs_return_value(ftrace_get_regs(fregs)) |
163 | #define ftrace_regs_set_return_value(fregs, ret) \ |
164 | regs_set_return_value(ftrace_get_regs(fregs), ret) |
165 | #define ftrace_override_function_with_return(fregs) \ |
166 | override_function_with_return(ftrace_get_regs(fregs)) |
167 | #define ftrace_regs_query_register_offset(name) \ |
168 | regs_query_register_offset(name) |
169 | #endif |
170 | |
171 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, |
172 | struct ftrace_ops *op, struct ftrace_regs *fregs); |
173 | |
174 | ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); |
175 | |
176 | /* |
177 | * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are |
178 | * set in the flags member. |
179 | * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and |
180 | * IPMODIFY are a kind of attribute flags which can be set only before |
181 | * registering the ftrace_ops, and can not be modified while registered. |
182 | * Changing those attribute flags after registering ftrace_ops will |
183 | * cause unexpected results. |
184 | * |
185 | * ENABLED - set/unset when ftrace_ops is registered/unregistered |
186 | * DYNAMIC - set when ftrace_ops is registered to denote dynamically |
187 | * allocated ftrace_ops which need special care |
188 | * SAVE_REGS - The ftrace_ops wants regs saved at each function called |
189 | * and passed to the callback. If this flag is set, but the |
190 | * architecture does not support passing regs |
191 | * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the |
192 | * ftrace_ops will fail to register, unless the next flag |
193 | * is set. |
194 | * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the |
195 | * handler can handle an arch that does not save regs |
196 | * (the handler tests if regs == NULL), then it can set |
197 | * this flag instead. It will not fail registering the ftrace_ops |
198 | * but, the regs field will be NULL if the arch does not support |
199 | * passing regs to the handler. |
200 | * Note, if this flag is set, the SAVE_REGS flag will automatically |
201 | * get set upon registering the ftrace_ops, if the arch supports it. |
202 | * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure |
203 | * that the call back needs recursion protection. If it does |
204 | * not set this, then the ftrace infrastructure will assume |
205 | * that the callback can handle recursion on its own. |
206 | * STUB - The ftrace_ops is just a place holder. |
207 | * INITIALIZED - The ftrace_ops has already been initialized (first use time |
208 | * register_ftrace_function() is called, it will initialized the ops) |
209 | * DELETED - The ops are being deleted, do not let them be registered again. |
210 | * ADDING - The ops is in the process of being added. |
211 | * REMOVING - The ops is in the process of being removed. |
212 | * MODIFYING - The ops is in the process of changing its filter functions. |
213 | * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. |
214 | * The arch specific code sets this flag when it allocated a |
215 | * trampoline. This lets the arch know that it can update the |
216 | * trampoline in case the callback function changes. |
217 | * The ftrace_ops trampoline can be set by the ftrace users, and |
218 | * in such cases the arch must not modify it. Only the arch ftrace |
219 | * core code should set this flag. |
220 | * IPMODIFY - The ops can modify the IP register. This can only be set with |
221 | * SAVE_REGS. If another ops with this flag set is already registered |
222 | * for any of the functions that this ops will be registered for, then |
223 | * this ops will fail to register or set_filter_ip. |
224 | * PID - Is affected by set_ftrace_pid (allows filtering on those pids) |
225 | * RCU - Set when the ops can only be called when RCU is watching. |
226 | * TRACE_ARRAY - The ops->private points to a trace_array descriptor. |
227 | * PERMANENT - Set when the ops is permanent and should not be affected by |
228 | * ftrace_enabled. |
229 | * DIRECT - Used by the direct ftrace_ops helper for direct functions |
230 | * (internal ftrace only, should not be used by others) |
231 | */ |
232 | enum { |
233 | FTRACE_OPS_FL_ENABLED = BIT(0), |
234 | FTRACE_OPS_FL_DYNAMIC = BIT(1), |
235 | FTRACE_OPS_FL_SAVE_REGS = BIT(2), |
236 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3), |
237 | FTRACE_OPS_FL_RECURSION = BIT(4), |
238 | FTRACE_OPS_FL_STUB = BIT(5), |
239 | FTRACE_OPS_FL_INITIALIZED = BIT(6), |
240 | FTRACE_OPS_FL_DELETED = BIT(7), |
241 | FTRACE_OPS_FL_ADDING = BIT(8), |
242 | FTRACE_OPS_FL_REMOVING = BIT(9), |
243 | FTRACE_OPS_FL_MODIFYING = BIT(10), |
244 | FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11), |
245 | FTRACE_OPS_FL_IPMODIFY = BIT(12), |
246 | FTRACE_OPS_FL_PID = BIT(13), |
247 | FTRACE_OPS_FL_RCU = BIT(14), |
248 | FTRACE_OPS_FL_TRACE_ARRAY = BIT(15), |
249 | FTRACE_OPS_FL_PERMANENT = BIT(16), |
250 | FTRACE_OPS_FL_DIRECT = BIT(17), |
251 | }; |
252 | |
253 | #ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS |
254 | #define FTRACE_OPS_FL_SAVE_ARGS FTRACE_OPS_FL_SAVE_REGS |
255 | #else |
256 | #define FTRACE_OPS_FL_SAVE_ARGS 0 |
257 | #endif |
258 | |
259 | /* |
260 | * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes |
261 | * to a ftrace_ops. Note, the requests may fail. |
262 | * |
263 | * ENABLE_SHARE_IPMODIFY_SELF - enable a DIRECT ops to work on the same |
264 | * function as an ops with IPMODIFY. Called |
265 | * when the DIRECT ops is being registered. |
266 | * This is called with both direct_mutex and |
267 | * ftrace_lock are locked. |
268 | * |
269 | * ENABLE_SHARE_IPMODIFY_PEER - enable a DIRECT ops to work on the same |
270 | * function as an ops with IPMODIFY. Called |
271 | * when the other ops (the one with IPMODIFY) |
272 | * is being registered. |
273 | * This is called with direct_mutex locked. |
274 | * |
275 | * DISABLE_SHARE_IPMODIFY_PEER - disable a DIRECT ops to work on the same |
276 | * function as an ops with IPMODIFY. Called |
277 | * when the other ops (the one with IPMODIFY) |
278 | * is being unregistered. |
279 | * This is called with direct_mutex locked. |
280 | */ |
281 | enum ftrace_ops_cmd { |
282 | FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF, |
283 | FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER, |
284 | FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER, |
285 | }; |
286 | |
287 | /* |
288 | * For most ftrace_ops_cmd, |
289 | * Returns: |
290 | * 0 - Success. |
291 | * Negative on failure. The return value is dependent on the |
292 | * callback. |
293 | */ |
294 | typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd); |
295 | |
296 | #ifdef CONFIG_DYNAMIC_FTRACE |
297 | /* The hash used to know what functions callbacks trace */ |
298 | struct ftrace_ops_hash { |
299 | struct ftrace_hash __rcu *notrace_hash; |
300 | struct ftrace_hash __rcu *filter_hash; |
301 | struct mutex regex_lock; |
302 | }; |
303 | |
304 | void ftrace_free_init_mem(void); |
305 | void ftrace_free_mem(struct module *mod, void *start, void *end); |
306 | #else |
307 | static inline void ftrace_free_init_mem(void) |
308 | { |
309 | ftrace_boot_snapshot(); |
310 | } |
311 | static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } |
312 | #endif |
313 | |
314 | /* |
315 | * Note, ftrace_ops can be referenced outside of RCU protection, unless |
316 | * the RCU flag is set. If ftrace_ops is allocated and not part of kernel |
317 | * core data, the unregistering of it will perform a scheduling on all CPUs |
318 | * to make sure that there are no more users. Depending on the load of the |
319 | * system that may take a bit of time. |
320 | * |
321 | * Any private data added must also take care not to be freed and if private |
322 | * data is added to a ftrace_ops that is in core code, the user of the |
323 | * ftrace_ops must perform a schedule_on_each_cpu() before freeing it. |
324 | */ |
325 | struct ftrace_ops { |
326 | ftrace_func_t func; |
327 | struct ftrace_ops __rcu *next; |
328 | unsigned long flags; |
329 | void *private; |
330 | ftrace_func_t saved_func; |
331 | #ifdef CONFIG_DYNAMIC_FTRACE |
332 | struct ftrace_ops_hash local_hash; |
333 | struct ftrace_ops_hash *func_hash; |
334 | struct ftrace_ops_hash old_hash; |
335 | unsigned long trampoline; |
336 | unsigned long trampoline_size; |
337 | struct list_head list; |
338 | ftrace_ops_func_t ops_func; |
339 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
340 | unsigned long direct_call; |
341 | #endif |
342 | #endif |
343 | }; |
344 | |
345 | extern struct ftrace_ops __rcu *ftrace_ops_list; |
346 | extern struct ftrace_ops ftrace_list_end; |
347 | |
348 | /* |
349 | * Traverse the ftrace_ops_list, invoking all entries. The reason that we |
350 | * can use rcu_dereference_raw_check() is that elements removed from this list |
351 | * are simply leaked, so there is no need to interact with a grace-period |
352 | * mechanism. The rcu_dereference_raw_check() calls are needed to handle |
353 | * concurrent insertions into the ftrace_ops_list. |
354 | * |
355 | * Silly Alpha and silly pointer-speculation compiler optimizations! |
356 | */ |
357 | #define do_for_each_ftrace_op(op, list) \ |
358 | op = rcu_dereference_raw_check(list); \ |
359 | do |
360 | |
361 | /* |
362 | * Optimized for just a single item in the list (as that is the normal case). |
363 | */ |
364 | #define while_for_each_ftrace_op(op) \ |
365 | while (likely(op = rcu_dereference_raw_check((op)->next)) && \ |
366 | unlikely((op) != &ftrace_list_end)) |
367 | |
368 | /* |
369 | * Type of the current tracing. |
370 | */ |
371 | enum ftrace_tracing_type_t { |
372 | FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ |
373 | FTRACE_TYPE_RETURN, /* Hook the return of the function */ |
374 | }; |
375 | |
376 | /* Current tracing type, default is FTRACE_TYPE_ENTER */ |
377 | extern enum ftrace_tracing_type_t ftrace_tracing_type; |
378 | |
379 | /* |
380 | * The ftrace_ops must be a static and should also |
381 | * be read_mostly. These functions do modify read_mostly variables |
382 | * so use them sparely. Never free an ftrace_op or modify the |
383 | * next pointer after it has been registered. Even after unregistering |
384 | * it, the next pointer may still be used internally. |
385 | */ |
386 | int register_ftrace_function(struct ftrace_ops *ops); |
387 | int unregister_ftrace_function(struct ftrace_ops *ops); |
388 | |
389 | extern void ftrace_stub(unsigned long a0, unsigned long a1, |
390 | struct ftrace_ops *op, struct ftrace_regs *fregs); |
391 | |
392 | |
393 | int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs); |
394 | #else /* !CONFIG_FUNCTION_TRACER */ |
395 | /* |
396 | * (un)register_ftrace_function must be a macro since the ops parameter |
397 | * must not be evaluated. |
398 | */ |
399 | #define register_ftrace_function(ops) ({ 0; }) |
400 | #define unregister_ftrace_function(ops) ({ 0; }) |
401 | static inline void ftrace_kill(void) { } |
402 | static inline void ftrace_free_init_mem(void) { } |
403 | static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } |
404 | static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs) |
405 | { |
406 | return -EOPNOTSUPP; |
407 | } |
408 | #endif /* CONFIG_FUNCTION_TRACER */ |
409 | |
410 | struct ftrace_func_entry { |
411 | struct hlist_node hlist; |
412 | unsigned long ip; |
413 | unsigned long direct; /* for direct lookup only */ |
414 | }; |
415 | |
416 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
417 | extern int ftrace_direct_func_count; |
418 | unsigned long ftrace_find_rec_direct(unsigned long ip); |
419 | int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr); |
420 | int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, |
421 | bool free_filters); |
422 | int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr); |
423 | int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr); |
424 | |
425 | void ftrace_stub_direct_tramp(void); |
426 | |
427 | #else |
428 | struct ftrace_ops; |
429 | # define ftrace_direct_func_count 0 |
430 | static inline unsigned long ftrace_find_rec_direct(unsigned long ip) |
431 | { |
432 | return 0; |
433 | } |
434 | static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) |
435 | { |
436 | return -ENODEV; |
437 | } |
438 | static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, |
439 | bool free_filters) |
440 | { |
441 | return -ENODEV; |
442 | } |
443 | static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) |
444 | { |
445 | return -ENODEV; |
446 | } |
447 | static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr) |
448 | { |
449 | return -ENODEV; |
450 | } |
451 | |
452 | /* |
453 | * This must be implemented by the architecture. |
454 | * It is the way the ftrace direct_ops helper, when called |
455 | * via ftrace (because there's other callbacks besides the |
456 | * direct call), can inform the architecture's trampoline that this |
457 | * routine has a direct caller, and what the caller is. |
458 | * |
459 | * For example, in x86, it returns the direct caller |
460 | * callback function via the regs->orig_ax parameter. |
461 | * Then in the ftrace trampoline, if this is set, it makes |
462 | * the return from the trampoline jump to the direct caller |
463 | * instead of going back to the function it just traced. |
464 | */ |
465 | static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, |
466 | unsigned long addr) { } |
467 | #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
468 | |
469 | #ifdef CONFIG_STACK_TRACER |
470 | |
471 | extern int stack_tracer_enabled; |
472 | |
473 | int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer, |
474 | size_t *lenp, loff_t *ppos); |
475 | |
476 | /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ |
477 | DECLARE_PER_CPU(int, disable_stack_tracer); |
478 | |
479 | /** |
480 | * stack_tracer_disable - temporarily disable the stack tracer |
481 | * |
482 | * There's a few locations (namely in RCU) where stack tracing |
483 | * cannot be executed. This function is used to disable stack |
484 | * tracing during those critical sections. |
485 | * |
486 | * This function must be called with preemption or interrupts |
487 | * disabled and stack_tracer_enable() must be called shortly after |
488 | * while preemption or interrupts are still disabled. |
489 | */ |
490 | static inline void stack_tracer_disable(void) |
491 | { |
492 | /* Preemption or interrupts must be disabled */ |
493 | if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) |
494 | WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); |
495 | this_cpu_inc(disable_stack_tracer); |
496 | } |
497 | |
498 | /** |
499 | * stack_tracer_enable - re-enable the stack tracer |
500 | * |
501 | * After stack_tracer_disable() is called, stack_tracer_enable() |
502 | * must be called shortly afterward. |
503 | */ |
504 | static inline void stack_tracer_enable(void) |
505 | { |
506 | if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) |
507 | WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); |
508 | this_cpu_dec(disable_stack_tracer); |
509 | } |
510 | #else |
511 | static inline void stack_tracer_disable(void) { } |
512 | static inline void stack_tracer_enable(void) { } |
513 | #endif |
514 | |
515 | #ifdef CONFIG_DYNAMIC_FTRACE |
516 | |
517 | void ftrace_arch_code_modify_prepare(void); |
518 | void ftrace_arch_code_modify_post_process(void); |
519 | |
520 | enum ftrace_bug_type { |
521 | FTRACE_BUG_UNKNOWN, |
522 | FTRACE_BUG_INIT, |
523 | FTRACE_BUG_NOP, |
524 | FTRACE_BUG_CALL, |
525 | FTRACE_BUG_UPDATE, |
526 | }; |
527 | extern enum ftrace_bug_type ftrace_bug_type; |
528 | |
529 | /* |
530 | * Archs can set this to point to a variable that holds the value that was |
531 | * expected at the call site before calling ftrace_bug(). |
532 | */ |
533 | extern const void *ftrace_expected; |
534 | |
535 | void ftrace_bug(int err, struct dyn_ftrace *rec); |
536 | |
537 | struct seq_file; |
538 | |
539 | extern int ftrace_text_reserved(const void *start, const void *end); |
540 | |
541 | struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr); |
542 | |
543 | bool is_ftrace_trampoline(unsigned long addr); |
544 | |
545 | /* |
546 | * The dyn_ftrace record's flags field is split into two parts. |
547 | * the first part which is '0-FTRACE_REF_MAX' is a counter of |
548 | * the number of callbacks that have registered the function that |
549 | * the dyn_ftrace descriptor represents. |
550 | * |
551 | * The second part is a mask: |
552 | * ENABLED - the function is being traced |
553 | * REGS - the record wants the function to save regs |
554 | * REGS_EN - the function is set up to save regs. |
555 | * IPMODIFY - the record allows for the IP address to be changed. |
556 | * DISABLED - the record is not ready to be touched yet |
557 | * DIRECT - there is a direct function to call |
558 | * CALL_OPS - the record can use callsite-specific ops |
559 | * CALL_OPS_EN - the function is set up to use callsite-specific ops |
560 | * TOUCHED - A callback was added since boot up |
561 | * MODIFIED - The function had IPMODIFY or DIRECT attached to it |
562 | * |
563 | * When a new ftrace_ops is registered and wants a function to save |
564 | * pt_regs, the rec->flags REGS is set. When the function has been |
565 | * set up to save regs, the REG_EN flag is set. Once a function |
566 | * starts saving regs it will do so until all ftrace_ops are removed |
567 | * from tracing that function. |
568 | */ |
569 | enum { |
570 | FTRACE_FL_ENABLED = (1UL << 31), |
571 | FTRACE_FL_REGS = (1UL << 30), |
572 | FTRACE_FL_REGS_EN = (1UL << 29), |
573 | FTRACE_FL_TRAMP = (1UL << 28), |
574 | FTRACE_FL_TRAMP_EN = (1UL << 27), |
575 | FTRACE_FL_IPMODIFY = (1UL << 26), |
576 | FTRACE_FL_DISABLED = (1UL << 25), |
577 | FTRACE_FL_DIRECT = (1UL << 24), |
578 | FTRACE_FL_DIRECT_EN = (1UL << 23), |
579 | FTRACE_FL_CALL_OPS = (1UL << 22), |
580 | FTRACE_FL_CALL_OPS_EN = (1UL << 21), |
581 | FTRACE_FL_TOUCHED = (1UL << 20), |
582 | FTRACE_FL_MODIFIED = (1UL << 19), |
583 | }; |
584 | |
585 | #define FTRACE_REF_MAX_SHIFT 19 |
586 | #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) |
587 | |
588 | #define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX) |
589 | |
590 | struct dyn_ftrace { |
591 | unsigned long ip; /* address of mcount call-site */ |
592 | unsigned long flags; |
593 | struct dyn_arch_ftrace arch; |
594 | }; |
595 | |
596 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, |
597 | int remove, int reset); |
598 | int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips, |
599 | unsigned int cnt, int remove, int reset); |
600 | int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
601 | int len, int reset); |
602 | int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
603 | int len, int reset); |
604 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset); |
605 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); |
606 | void ftrace_free_filter(struct ftrace_ops *ops); |
607 | void ftrace_ops_set_global_filter(struct ftrace_ops *ops); |
608 | |
609 | enum { |
610 | FTRACE_UPDATE_CALLS = (1 << 0), |
611 | FTRACE_DISABLE_CALLS = (1 << 1), |
612 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), |
613 | FTRACE_START_FUNC_RET = (1 << 3), |
614 | FTRACE_STOP_FUNC_RET = (1 << 4), |
615 | FTRACE_MAY_SLEEP = (1 << 5), |
616 | }; |
617 | |
618 | /* |
619 | * The FTRACE_UPDATE_* enum is used to pass information back |
620 | * from the ftrace_update_record() and ftrace_test_record() |
621 | * functions. These are called by the code update routines |
622 | * to find out what is to be done for a given function. |
623 | * |
624 | * IGNORE - The function is already what we want it to be |
625 | * MAKE_CALL - Start tracing the function |
626 | * MODIFY_CALL - Stop saving regs for the function |
627 | * MAKE_NOP - Stop tracing the function |
628 | */ |
629 | enum { |
630 | FTRACE_UPDATE_IGNORE, |
631 | FTRACE_UPDATE_MAKE_CALL, |
632 | FTRACE_UPDATE_MODIFY_CALL, |
633 | FTRACE_UPDATE_MAKE_NOP, |
634 | }; |
635 | |
636 | enum { |
637 | FTRACE_ITER_FILTER = (1 << 0), |
638 | FTRACE_ITER_NOTRACE = (1 << 1), |
639 | FTRACE_ITER_PRINTALL = (1 << 2), |
640 | FTRACE_ITER_DO_PROBES = (1 << 3), |
641 | FTRACE_ITER_PROBE = (1 << 4), |
642 | FTRACE_ITER_MOD = (1 << 5), |
643 | FTRACE_ITER_ENABLED = (1 << 6), |
644 | FTRACE_ITER_TOUCHED = (1 << 7), |
645 | FTRACE_ITER_ADDRS = (1 << 8), |
646 | }; |
647 | |
648 | void arch_ftrace_update_code(int command); |
649 | void arch_ftrace_update_trampoline(struct ftrace_ops *ops); |
650 | void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec); |
651 | void arch_ftrace_trampoline_free(struct ftrace_ops *ops); |
652 | |
653 | struct ftrace_rec_iter; |
654 | |
655 | struct ftrace_rec_iter *ftrace_rec_iter_start(void); |
656 | struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); |
657 | struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); |
658 | |
659 | #define for_ftrace_rec_iter(iter) \ |
660 | for (iter = ftrace_rec_iter_start(); \ |
661 | iter; \ |
662 | iter = ftrace_rec_iter_next(iter)) |
663 | |
664 | |
665 | int ftrace_update_record(struct dyn_ftrace *rec, bool enable); |
666 | int ftrace_test_record(struct dyn_ftrace *rec, bool enable); |
667 | void ftrace_run_stop_machine(int command); |
668 | unsigned long ftrace_location(unsigned long ip); |
669 | unsigned long ftrace_location_range(unsigned long start, unsigned long end); |
670 | unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); |
671 | unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); |
672 | |
673 | extern ftrace_func_t ftrace_trace_function; |
674 | |
675 | int ftrace_regex_open(struct ftrace_ops *ops, int flag, |
676 | struct inode *inode, struct file *file); |
677 | ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, |
678 | size_t cnt, loff_t *ppos); |
679 | ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, |
680 | size_t cnt, loff_t *ppos); |
681 | int ftrace_regex_release(struct inode *inode, struct file *file); |
682 | |
683 | void __init |
684 | ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); |
685 | |
686 | /* defined in arch */ |
687 | extern int ftrace_dyn_arch_init(void); |
688 | extern void ftrace_replace_code(int enable); |
689 | extern int ftrace_update_ftrace_func(ftrace_func_t func); |
690 | extern void ftrace_caller(void); |
691 | extern void ftrace_regs_caller(void); |
692 | extern void ftrace_call(void); |
693 | extern void ftrace_regs_call(void); |
694 | extern void mcount_call(void); |
695 | |
696 | void ftrace_modify_all_code(int command); |
697 | |
698 | #ifndef FTRACE_ADDR |
699 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) |
700 | #endif |
701 | |
702 | #ifndef FTRACE_GRAPH_ADDR |
703 | #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller) |
704 | #endif |
705 | |
706 | #ifndef FTRACE_REGS_ADDR |
707 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
708 | # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) |
709 | #else |
710 | # define FTRACE_REGS_ADDR FTRACE_ADDR |
711 | #endif |
712 | #endif |
713 | |
714 | /* |
715 | * If an arch would like functions that are only traced |
716 | * by the function graph tracer to jump directly to its own |
717 | * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR |
718 | * to be that address to jump to. |
719 | */ |
720 | #ifndef FTRACE_GRAPH_TRAMP_ADDR |
721 | #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0) |
722 | #endif |
723 | |
724 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
725 | extern void ftrace_graph_caller(void); |
726 | extern int ftrace_enable_ftrace_graph_caller(void); |
727 | extern int ftrace_disable_ftrace_graph_caller(void); |
728 | #else |
729 | static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } |
730 | static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } |
731 | #endif |
732 | |
733 | /** |
734 | * ftrace_make_nop - convert code into nop |
735 | * @mod: module structure if called by module load initialization |
736 | * @rec: the call site record (e.g. mcount/fentry) |
737 | * @addr: the address that the call site should be calling |
738 | * |
739 | * This is a very sensitive operation and great care needs |
740 | * to be taken by the arch. The operation should carefully |
741 | * read the location, check to see if what is read is indeed |
742 | * what we expect it to be, and then on success of the compare, |
743 | * it should write to the location. |
744 | * |
745 | * The code segment at @rec->ip should be a caller to @addr |
746 | * |
747 | * Return must be: |
748 | * 0 on success |
749 | * -EFAULT on error reading the location |
750 | * -EINVAL on a failed compare of the contents |
751 | * -EPERM on error writing to the location |
752 | * Any other value will be considered a failure. |
753 | */ |
754 | extern int ftrace_make_nop(struct module *mod, |
755 | struct dyn_ftrace *rec, unsigned long addr); |
756 | |
757 | /** |
758 | * ftrace_need_init_nop - return whether nop call sites should be initialized |
759 | * |
760 | * Normally the compiler's -mnop-mcount generates suitable nops, so we don't |
761 | * need to call ftrace_init_nop() if the code is built with that flag. |
762 | * Architectures where this is not always the case may define their own |
763 | * condition. |
764 | * |
765 | * Return must be: |
766 | * 0 if ftrace_init_nop() should be called |
767 | * Nonzero if ftrace_init_nop() should not be called |
768 | */ |
769 | |
770 | #ifndef ftrace_need_init_nop |
771 | #define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT)) |
772 | #endif |
773 | |
774 | /** |
775 | * ftrace_init_nop - initialize a nop call site |
776 | * @mod: module structure if called by module load initialization |
777 | * @rec: the call site record (e.g. mcount/fentry) |
778 | * |
779 | * This is a very sensitive operation and great care needs |
780 | * to be taken by the arch. The operation should carefully |
781 | * read the location, check to see if what is read is indeed |
782 | * what we expect it to be, and then on success of the compare, |
783 | * it should write to the location. |
784 | * |
785 | * The code segment at @rec->ip should contain the contents created by |
786 | * the compiler |
787 | * |
788 | * Return must be: |
789 | * 0 on success |
790 | * -EFAULT on error reading the location |
791 | * -EINVAL on a failed compare of the contents |
792 | * -EPERM on error writing to the location |
793 | * Any other value will be considered a failure. |
794 | */ |
795 | #ifndef ftrace_init_nop |
796 | static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) |
797 | { |
798 | return ftrace_make_nop(mod, rec, MCOUNT_ADDR); |
799 | } |
800 | #endif |
801 | |
802 | /** |
803 | * ftrace_make_call - convert a nop call site into a call to addr |
804 | * @rec: the call site record (e.g. mcount/fentry) |
805 | * @addr: the address that the call site should call |
806 | * |
807 | * This is a very sensitive operation and great care needs |
808 | * to be taken by the arch. The operation should carefully |
809 | * read the location, check to see if what is read is indeed |
810 | * what we expect it to be, and then on success of the compare, |
811 | * it should write to the location. |
812 | * |
813 | * The code segment at @rec->ip should be a nop |
814 | * |
815 | * Return must be: |
816 | * 0 on success |
817 | * -EFAULT on error reading the location |
818 | * -EINVAL on a failed compare of the contents |
819 | * -EPERM on error writing to the location |
820 | * Any other value will be considered a failure. |
821 | */ |
822 | extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); |
823 | |
824 | #if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || \ |
825 | defined(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) |
826 | /** |
827 | * ftrace_modify_call - convert from one addr to another (no nop) |
828 | * @rec: the call site record (e.g. mcount/fentry) |
829 | * @old_addr: the address expected to be currently called to |
830 | * @addr: the address to change to |
831 | * |
832 | * This is a very sensitive operation and great care needs |
833 | * to be taken by the arch. The operation should carefully |
834 | * read the location, check to see if what is read is indeed |
835 | * what we expect it to be, and then on success of the compare, |
836 | * it should write to the location. |
837 | * |
838 | * When using call ops, this is called when the associated ops change, even |
839 | * when (addr == old_addr). |
840 | * |
841 | * The code segment at @rec->ip should be a caller to @old_addr |
842 | * |
843 | * Return must be: |
844 | * 0 on success |
845 | * -EFAULT on error reading the location |
846 | * -EINVAL on a failed compare of the contents |
847 | * -EPERM on error writing to the location |
848 | * Any other value will be considered a failure. |
849 | */ |
850 | extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, |
851 | unsigned long addr); |
852 | #else |
853 | /* Should never be called */ |
854 | static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, |
855 | unsigned long addr) |
856 | { |
857 | return -EINVAL; |
858 | } |
859 | #endif |
860 | |
861 | extern int skip_trace(unsigned long ip); |
862 | extern void ftrace_module_init(struct module *mod); |
863 | extern void ftrace_module_enable(struct module *mod); |
864 | extern void ftrace_release_mod(struct module *mod); |
865 | #else /* CONFIG_DYNAMIC_FTRACE */ |
866 | static inline int skip_trace(unsigned long ip) { return 0; } |
867 | static inline void ftrace_module_init(struct module *mod) { } |
868 | static inline void ftrace_module_enable(struct module *mod) { } |
869 | static inline void ftrace_release_mod(struct module *mod) { } |
870 | static inline int ftrace_text_reserved(const void *start, const void *end) |
871 | { |
872 | return 0; |
873 | } |
874 | static inline unsigned long ftrace_location(unsigned long ip) |
875 | { |
876 | return 0; |
877 | } |
878 | |
879 | /* |
880 | * Again users of functions that have ftrace_ops may not |
881 | * have them defined when ftrace is not enabled, but these |
882 | * functions may still be called. Use a macro instead of inline. |
883 | */ |
884 | #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) |
885 | #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) |
886 | #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) |
887 | #define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; }) |
888 | #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) |
889 | #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) |
890 | #define ftrace_free_filter(ops) do { } while (0) |
891 | #define ftrace_ops_set_global_filter(ops) do { } while (0) |
892 | |
893 | static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, |
894 | size_t cnt, loff_t *ppos) { return -ENODEV; } |
895 | static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, |
896 | size_t cnt, loff_t *ppos) { return -ENODEV; } |
897 | static inline int |
898 | ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } |
899 | |
900 | static inline bool is_ftrace_trampoline(unsigned long addr) |
901 | { |
902 | return false; |
903 | } |
904 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
905 | |
906 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
907 | #ifndef ftrace_graph_func |
908 | #define ftrace_graph_func ftrace_stub |
909 | #define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB |
910 | #else |
911 | #define FTRACE_OPS_GRAPH_STUB 0 |
912 | #endif |
913 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
914 | |
915 | /* totally disable ftrace - can not re-enable after this */ |
916 | void ftrace_kill(void); |
917 | |
918 | static inline void tracer_disable(void) |
919 | { |
920 | #ifdef CONFIG_FUNCTION_TRACER |
921 | ftrace_enabled = 0; |
922 | #endif |
923 | } |
924 | |
925 | /* |
926 | * Ftrace disable/restore without lock. Some synchronization mechanism |
927 | * must be used to prevent ftrace_enabled to be changed between |
928 | * disable/restore. |
929 | */ |
930 | static inline int __ftrace_enabled_save(void) |
931 | { |
932 | #ifdef CONFIG_FUNCTION_TRACER |
933 | int saved_ftrace_enabled = ftrace_enabled; |
934 | ftrace_enabled = 0; |
935 | return saved_ftrace_enabled; |
936 | #else |
937 | return 0; |
938 | #endif |
939 | } |
940 | |
941 | static inline void __ftrace_enabled_restore(int enabled) |
942 | { |
943 | #ifdef CONFIG_FUNCTION_TRACER |
944 | ftrace_enabled = enabled; |
945 | #endif |
946 | } |
947 | |
948 | /* All archs should have this, but we define it for consistency */ |
949 | #ifndef ftrace_return_address0 |
950 | # define ftrace_return_address0 __builtin_return_address(0) |
951 | #endif |
952 | |
953 | /* Archs may use other ways for ADDR1 and beyond */ |
954 | #ifndef ftrace_return_address |
955 | # ifdef CONFIG_FRAME_POINTER |
956 | # define ftrace_return_address(n) __builtin_return_address(n) |
957 | # else |
958 | # define ftrace_return_address(n) 0UL |
959 | # endif |
960 | #endif |
961 | |
962 | #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0) |
963 | #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1)) |
964 | #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2)) |
965 | #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3)) |
966 | #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4)) |
967 | #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) |
968 | #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) |
969 | |
970 | static __always_inline unsigned long get_lock_parent_ip(void) |
971 | { |
972 | unsigned long addr = CALLER_ADDR0; |
973 | |
974 | if (!in_lock_functions(addr)) |
975 | return addr; |
976 | addr = CALLER_ADDR1; |
977 | if (!in_lock_functions(addr)) |
978 | return addr; |
979 | return CALLER_ADDR2; |
980 | } |
981 | |
982 | #ifdef CONFIG_TRACE_PREEMPT_TOGGLE |
983 | extern void trace_preempt_on(unsigned long a0, unsigned long a1); |
984 | extern void trace_preempt_off(unsigned long a0, unsigned long a1); |
985 | #else |
986 | /* |
987 | * Use defines instead of static inlines because some arches will make code out |
988 | * of the CALLER_ADDR, when we really want these to be a real nop. |
989 | */ |
990 | # define trace_preempt_on(a0, a1) do { } while (0) |
991 | # define trace_preempt_off(a0, a1) do { } while (0) |
992 | #endif |
993 | |
994 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
995 | extern void ftrace_init(void); |
996 | #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY |
997 | #define FTRACE_CALLSITE_SECTION "__patchable_function_entries" |
998 | #else |
999 | #define FTRACE_CALLSITE_SECTION "__mcount_loc" |
1000 | #endif |
1001 | #else |
1002 | static inline void ftrace_init(void) { } |
1003 | #endif |
1004 | |
1005 | /* |
1006 | * Structure that defines an entry function trace. |
1007 | * It's already packed but the attribute "packed" is needed |
1008 | * to remove extra padding at the end. |
1009 | */ |
1010 | struct ftrace_graph_ent { |
1011 | unsigned long func; /* Current function */ |
1012 | int depth; |
1013 | } __packed; |
1014 | |
1015 | /* |
1016 | * Structure that defines a return function trace. |
1017 | * It's already packed but the attribute "packed" is needed |
1018 | * to remove extra padding at the end. |
1019 | */ |
1020 | struct ftrace_graph_ret { |
1021 | unsigned long func; /* Current function */ |
1022 | #ifdef CONFIG_FUNCTION_GRAPH_RETVAL |
1023 | unsigned long retval; |
1024 | #endif |
1025 | int depth; |
1026 | /* Number of functions that overran the depth limit for current task */ |
1027 | unsigned int overrun; |
1028 | unsigned long long calltime; |
1029 | unsigned long long rettime; |
1030 | } __packed; |
1031 | |
1032 | /* Type of the callback handlers for tracing function graph*/ |
1033 | typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ |
1034 | typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ |
1035 | |
1036 | extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); |
1037 | |
1038 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1039 | |
1040 | struct fgraph_ops { |
1041 | trace_func_graph_ent_t entryfunc; |
1042 | trace_func_graph_ret_t retfunc; |
1043 | }; |
1044 | |
1045 | /* |
1046 | * Stack of return addresses for functions |
1047 | * of a thread. |
1048 | * Used in struct thread_info |
1049 | */ |
1050 | struct ftrace_ret_stack { |
1051 | unsigned long ret; |
1052 | unsigned long func; |
1053 | unsigned long long calltime; |
1054 | #ifdef CONFIG_FUNCTION_PROFILER |
1055 | unsigned long long subtime; |
1056 | #endif |
1057 | #ifdef HAVE_FUNCTION_GRAPH_FP_TEST |
1058 | unsigned long fp; |
1059 | #endif |
1060 | #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR |
1061 | unsigned long *retp; |
1062 | #endif |
1063 | }; |
1064 | |
1065 | /* |
1066 | * Primary handler of a function return. |
1067 | * It relays on ftrace_return_to_handler. |
1068 | * Defined in entry_32/64.S |
1069 | */ |
1070 | extern void return_to_handler(void); |
1071 | |
1072 | extern int |
1073 | function_graph_enter(unsigned long ret, unsigned long func, |
1074 | unsigned long frame_pointer, unsigned long *retp); |
1075 | |
1076 | struct ftrace_ret_stack * |
1077 | ftrace_graph_get_ret_stack(struct task_struct *task, int idx); |
1078 | |
1079 | unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, |
1080 | unsigned long ret, unsigned long *retp); |
1081 | |
1082 | /* |
1083 | * Sometimes we don't want to trace a function with the function |
1084 | * graph tracer but we want them to keep traced by the usual function |
1085 | * tracer if the function graph tracer is not configured. |
1086 | */ |
1087 | #define __notrace_funcgraph notrace |
1088 | |
1089 | #define FTRACE_RETFUNC_DEPTH 50 |
1090 | #define FTRACE_RETSTACK_ALLOC_SIZE 32 |
1091 | |
1092 | extern int register_ftrace_graph(struct fgraph_ops *ops); |
1093 | extern void unregister_ftrace_graph(struct fgraph_ops *ops); |
1094 | |
1095 | /** |
1096 | * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called |
1097 | * |
1098 | * ftrace_graph_stop() is called when a severe error is detected in |
1099 | * the function graph tracing. This function is called by the critical |
1100 | * paths of function graph to keep those paths from doing any more harm. |
1101 | */ |
1102 | DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph); |
1103 | |
1104 | static inline bool ftrace_graph_is_dead(void) |
1105 | { |
1106 | return static_branch_unlikely(&kill_ftrace_graph); |
1107 | } |
1108 | |
1109 | extern void ftrace_graph_stop(void); |
1110 | |
1111 | /* The current handlers in use */ |
1112 | extern trace_func_graph_ret_t ftrace_graph_return; |
1113 | extern trace_func_graph_ent_t ftrace_graph_entry; |
1114 | |
1115 | extern void ftrace_graph_init_task(struct task_struct *t); |
1116 | extern void ftrace_graph_exit_task(struct task_struct *t); |
1117 | extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); |
1118 | |
1119 | static inline void pause_graph_tracing(void) |
1120 | { |
1121 | atomic_inc(v: ¤t->tracing_graph_pause); |
1122 | } |
1123 | |
1124 | static inline void unpause_graph_tracing(void) |
1125 | { |
1126 | atomic_dec(v: ¤t->tracing_graph_pause); |
1127 | } |
1128 | #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ |
1129 | |
1130 | #define __notrace_funcgraph |
1131 | |
1132 | static inline void ftrace_graph_init_task(struct task_struct *t) { } |
1133 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } |
1134 | static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } |
1135 | |
1136 | /* Define as macros as fgraph_ops may not be defined */ |
1137 | #define register_ftrace_graph(ops) ({ -1; }) |
1138 | #define unregister_ftrace_graph(ops) do { } while (0) |
1139 | |
1140 | static inline unsigned long |
1141 | ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, |
1142 | unsigned long *retp) |
1143 | { |
1144 | return ret; |
1145 | } |
1146 | |
1147 | static inline void pause_graph_tracing(void) { } |
1148 | static inline void unpause_graph_tracing(void) { } |
1149 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
1150 | |
1151 | #ifdef CONFIG_TRACING |
1152 | enum ftrace_dump_mode; |
1153 | |
1154 | #define MAX_TRACER_SIZE 100 |
1155 | extern char ftrace_dump_on_oops[]; |
1156 | extern int ftrace_dump_on_oops_enabled(void); |
1157 | extern int tracepoint_printk; |
1158 | |
1159 | extern void disable_trace_on_warning(void); |
1160 | extern int __disable_trace_on_warning; |
1161 | |
1162 | int tracepoint_printk_sysctl(struct ctl_table *table, int write, |
1163 | void *buffer, size_t *lenp, loff_t *ppos); |
1164 | |
1165 | #else /* CONFIG_TRACING */ |
1166 | static inline void disable_trace_on_warning(void) { } |
1167 | #endif /* CONFIG_TRACING */ |
1168 | |
1169 | #ifdef CONFIG_FTRACE_SYSCALLS |
1170 | |
1171 | unsigned long arch_syscall_addr(int nr); |
1172 | |
1173 | #endif /* CONFIG_FTRACE_SYSCALLS */ |
1174 | |
1175 | #endif /* _LINUX_FTRACE_H */ |
1176 | |