1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_FTRACE_H |
3 | #define _ASM_X86_FTRACE_H |
4 | |
5 | #ifdef CONFIG_FUNCTION_TRACER |
6 | #ifndef CC_USING_FENTRY |
7 | # error Compiler does not support fentry? |
8 | #endif |
9 | # define MCOUNT_ADDR ((unsigned long)(__fentry__)) |
10 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ |
11 | |
12 | /* Ignore unused weak functions which will have non zero offsets */ |
13 | #ifdef CONFIG_HAVE_FENTRY |
14 | # include <asm/ibt.h> |
15 | /* Add offset for endbr64 if IBT enabled */ |
16 | # define FTRACE_MCOUNT_MAX_OFFSET ENDBR_INSN_SIZE |
17 | #endif |
18 | |
19 | #ifdef CONFIG_DYNAMIC_FTRACE |
20 | #define ARCH_SUPPORTS_FTRACE_OPS 1 |
21 | #endif |
22 | |
23 | #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR |
24 | |
25 | #ifndef __ASSEMBLY__ |
26 | extern void __fentry__(void); |
27 | |
28 | static inline unsigned long ftrace_call_adjust(unsigned long addr) |
29 | { |
30 | /* |
31 | * addr is the address of the mcount call instruction. |
32 | * recordmcount does the necessary offset calculation. |
33 | */ |
34 | return addr; |
35 | } |
36 | |
37 | #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS |
38 | struct ftrace_regs { |
39 | struct pt_regs regs; |
40 | }; |
41 | |
42 | static __always_inline struct pt_regs * |
43 | arch_ftrace_get_regs(struct ftrace_regs *fregs) |
44 | { |
45 | /* Only when FL_SAVE_REGS is set, cs will be non zero */ |
46 | if (!fregs->regs.cs) |
47 | return NULL; |
48 | return &fregs->regs; |
49 | } |
50 | |
51 | #define ftrace_regs_set_instruction_pointer(fregs, _ip) \ |
52 | do { (fregs)->regs.ip = (_ip); } while (0) |
53 | |
54 | #define ftrace_regs_get_instruction_pointer(fregs) \ |
55 | ((fregs)->regs.ip) |
56 | |
57 | #define ftrace_regs_get_argument(fregs, n) \ |
58 | regs_get_kernel_argument(&(fregs)->regs, n) |
59 | #define ftrace_regs_get_stack_pointer(fregs) \ |
60 | kernel_stack_pointer(&(fregs)->regs) |
61 | #define ftrace_regs_return_value(fregs) \ |
62 | regs_return_value(&(fregs)->regs) |
63 | #define ftrace_regs_set_return_value(fregs, ret) \ |
64 | regs_set_return_value(&(fregs)->regs, ret) |
65 | #define ftrace_override_function_with_return(fregs) \ |
66 | override_function_with_return(&(fregs)->regs) |
67 | #define ftrace_regs_query_register_offset(name) \ |
68 | regs_query_register_offset(name) |
69 | |
70 | struct ftrace_ops; |
71 | #define ftrace_graph_func ftrace_graph_func |
72 | void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, |
73 | struct ftrace_ops *op, struct ftrace_regs *fregs); |
74 | #else |
75 | #define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR |
76 | #endif |
77 | |
78 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
79 | /* |
80 | * When a ftrace registered caller is tracing a function that is |
81 | * also set by a register_ftrace_direct() call, it needs to be |
82 | * differentiated in the ftrace_caller trampoline. To do this, we |
83 | * place the direct caller in the ORIG_AX part of pt_regs. This |
84 | * tells the ftrace_caller that there's a direct caller. |
85 | */ |
86 | static inline void |
87 | __arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr) |
88 | { |
89 | /* Emulate a call */ |
90 | regs->orig_ax = addr; |
91 | } |
92 | #define arch_ftrace_set_direct_caller(fregs, addr) \ |
93 | __arch_ftrace_set_direct_caller(&(fregs)->regs, addr) |
94 | #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
95 | |
96 | #ifdef CONFIG_DYNAMIC_FTRACE |
97 | |
98 | struct dyn_arch_ftrace { |
99 | /* No extra data needed for x86 */ |
100 | }; |
101 | |
102 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
103 | #endif /* __ASSEMBLY__ */ |
104 | #endif /* CONFIG_FUNCTION_TRACER */ |
105 | |
106 | |
107 | #ifndef __ASSEMBLY__ |
108 | |
109 | void prepare_ftrace_return(unsigned long ip, unsigned long *parent, |
110 | unsigned long frame_pointer); |
111 | |
112 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) |
113 | extern void set_ftrace_ops_ro(void); |
114 | #else |
115 | static inline void set_ftrace_ops_ro(void) { } |
116 | #endif |
117 | |
118 | #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME |
119 | static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) |
120 | { |
121 | /* |
122 | * Compare the symbol name with the system call name. Skip the |
123 | * "__x64_sys", "__ia32_sys", "__do_sys" or simple "sys" prefix. |
124 | */ |
125 | return !strcmp(sym + 3, name + 3) || |
126 | (!strncmp(sym, "__x64_" , 6) && !strcmp(sym + 9, name + 3)) || |
127 | (!strncmp(sym, "__ia32_" , 7) && !strcmp(sym + 10, name + 3)) || |
128 | (!strncmp(sym, "__do_sys" , 8) && !strcmp(sym + 8, name + 3)); |
129 | } |
130 | |
131 | #ifndef COMPILE_OFFSETS |
132 | |
133 | #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION) |
134 | #include <linux/compat.h> |
135 | |
136 | /* |
137 | * Because ia32 syscalls do not map to x86_64 syscall numbers |
138 | * this screws up the trace output when tracing a ia32 task. |
139 | * Instead of reporting bogus syscalls, just do not trace them. |
140 | * |
141 | * If the user really wants these, then they should use the |
142 | * raw syscall tracepoints with filtering. |
143 | */ |
144 | #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1 |
145 | static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) |
146 | { |
147 | return in_32bit_syscall(); |
148 | } |
149 | #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */ |
150 | #endif /* !COMPILE_OFFSETS */ |
151 | #endif /* !__ASSEMBLY__ */ |
152 | |
153 | #ifndef __ASSEMBLY__ |
154 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
155 | struct fgraph_ret_regs { |
156 | unsigned long ax; |
157 | unsigned long dx; |
158 | unsigned long bp; |
159 | }; |
160 | |
161 | static inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs) |
162 | { |
163 | return ret_regs->ax; |
164 | } |
165 | |
166 | static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs) |
167 | { |
168 | return ret_regs->bp; |
169 | } |
170 | #endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER */ |
171 | #endif |
172 | |
173 | #endif /* _ASM_X86_FTRACE_H */ |
174 | |