1 | /* |
2 | * Dynamic function tracing support. |
3 | * |
4 | * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com> |
5 | * Copyright (C) 2010 Rabin Vincent <rabin@rab.in> |
6 | * |
7 | * For licencing details, see COPYING. |
8 | * |
9 | * Defines low-level handling of mcount calls when the kernel |
10 | * is compiled with the -pg flag. When using dynamic ftrace, the |
11 | * mcount call-sites get patched with NOP till they are enabled. |
12 | * All code mutation routines here are called under stop_machine(). |
13 | */ |
14 | |
15 | #include <linux/ftrace.h> |
16 | #include <linux/uaccess.h> |
17 | #include <linux/module.h> |
18 | #include <linux/stop_machine.h> |
19 | |
20 | #include <asm/cacheflush.h> |
21 | #include <asm/opcodes.h> |
22 | #include <asm/ftrace.h> |
23 | #include <asm/insn.h> |
24 | #include <asm/set_memory.h> |
25 | #include <asm/stacktrace.h> |
26 | #include <asm/patch.h> |
27 | |
28 | /* |
29 | * The compiler emitted profiling hook consists of |
30 | * |
31 | * PUSH {LR} |
32 | * BL __gnu_mcount_nc |
33 | * |
34 | * To turn this combined sequence into a NOP, we need to restore the value of |
35 | * SP before the PUSH. Let's use an ADD rather than a POP into LR, as LR is not |
36 | * modified anyway, and reloading LR from memory is highly likely to be less |
37 | * efficient. |
38 | */ |
39 | #ifdef CONFIG_THUMB2_KERNEL |
40 | #define NOP 0xf10d0d04 /* add.w sp, sp, #4 */ |
41 | #else |
42 | #define NOP 0xe28dd004 /* add sp, sp, #4 */ |
43 | #endif |
44 | |
45 | #ifdef CONFIG_DYNAMIC_FTRACE |
46 | |
47 | static int __ftrace_modify_code(void *data) |
48 | { |
49 | int *command = data; |
50 | |
51 | ftrace_modify_all_code(command: *command); |
52 | |
53 | return 0; |
54 | } |
55 | |
56 | void arch_ftrace_update_code(int command) |
57 | { |
58 | stop_machine(fn: __ftrace_modify_code, data: &command, NULL); |
59 | } |
60 | |
61 | static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) |
62 | { |
63 | return NOP; |
64 | } |
65 | |
66 | void ftrace_caller_from_init(void); |
67 | void ftrace_regs_caller_from_init(void); |
68 | |
69 | static unsigned long __ref adjust_address(struct dyn_ftrace *rec, |
70 | unsigned long addr) |
71 | { |
72 | if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE) || |
73 | system_state >= SYSTEM_FREEING_INITMEM || |
74 | likely(!is_kernel_inittext(rec->ip))) |
75 | return addr; |
76 | if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || |
77 | addr == (unsigned long)&ftrace_caller) |
78 | return (unsigned long)&ftrace_caller_from_init; |
79 | return (unsigned long)&ftrace_regs_caller_from_init; |
80 | } |
81 | |
82 | void ftrace_arch_code_modify_prepare(void) |
83 | { |
84 | } |
85 | |
86 | void ftrace_arch_code_modify_post_process(void) |
87 | { |
88 | /* Make sure any TLB misses during machine stop are cleared. */ |
89 | flush_tlb_all(); |
90 | } |
91 | |
92 | static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr, |
93 | bool warn) |
94 | { |
95 | return arm_gen_branch_link(pc, addr, warn); |
96 | } |
97 | |
98 | static int ftrace_modify_code(unsigned long pc, unsigned long old, |
99 | unsigned long new, bool validate) |
100 | { |
101 | unsigned long replaced; |
102 | |
103 | if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) |
104 | old = __opcode_to_mem_thumb32(old); |
105 | else |
106 | old = __opcode_to_mem_arm(old); |
107 | |
108 | if (validate) { |
109 | if (copy_from_kernel_nofault(dst: &replaced, src: (void *)pc, |
110 | MCOUNT_INSN_SIZE)) |
111 | return -EFAULT; |
112 | |
113 | if (replaced != old) |
114 | return -EINVAL; |
115 | } |
116 | |
117 | __patch_text((void *)pc, new); |
118 | |
119 | return 0; |
120 | } |
121 | |
122 | int ftrace_update_ftrace_func(ftrace_func_t func) |
123 | { |
124 | unsigned long pc; |
125 | unsigned long new; |
126 | int ret; |
127 | |
128 | pc = (unsigned long)&ftrace_call; |
129 | new = ftrace_call_replace(pc, addr: (unsigned long)func, warn: true); |
130 | |
131 | ret = ftrace_modify_code(pc, old: 0, new, validate: false); |
132 | |
133 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
134 | if (!ret) { |
135 | pc = (unsigned long)&ftrace_regs_call; |
136 | new = ftrace_call_replace(pc, addr: (unsigned long)func, warn: true); |
137 | |
138 | ret = ftrace_modify_code(pc, old: 0, new, validate: false); |
139 | } |
140 | #endif |
141 | |
142 | return ret; |
143 | } |
144 | |
145 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
146 | { |
147 | unsigned long new, old; |
148 | unsigned long ip = rec->ip; |
149 | unsigned long aaddr = adjust_address(rec, addr); |
150 | struct module *mod = NULL; |
151 | |
152 | #ifdef CONFIG_ARM_MODULE_PLTS |
153 | mod = rec->arch.mod; |
154 | #endif |
155 | |
156 | old = ftrace_nop_replace(rec); |
157 | |
158 | new = ftrace_call_replace(pc: ip, addr: aaddr, warn: !mod); |
159 | #ifdef CONFIG_ARM_MODULE_PLTS |
160 | if (!new && mod) { |
161 | aaddr = get_module_plt(mod, ip, aaddr); |
162 | new = ftrace_call_replace(ip, aaddr, true); |
163 | } |
164 | #endif |
165 | |
166 | return ftrace_modify_code(pc: rec->ip, old, new, validate: true); |
167 | } |
168 | |
169 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
170 | |
171 | int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, |
172 | unsigned long addr) |
173 | { |
174 | unsigned long new, old; |
175 | unsigned long ip = rec->ip; |
176 | |
177 | old = ftrace_call_replace(pc: ip, addr: adjust_address(rec, addr: old_addr), warn: true); |
178 | |
179 | new = ftrace_call_replace(pc: ip, addr: adjust_address(rec, addr), warn: true); |
180 | |
181 | return ftrace_modify_code(pc: rec->ip, old, new, validate: true); |
182 | } |
183 | |
184 | #endif |
185 | |
186 | int ftrace_make_nop(struct module *mod, |
187 | struct dyn_ftrace *rec, unsigned long addr) |
188 | { |
189 | unsigned long aaddr = adjust_address(rec, addr); |
190 | unsigned long ip = rec->ip; |
191 | unsigned long old; |
192 | unsigned long new; |
193 | int ret; |
194 | |
195 | #ifdef CONFIG_ARM_MODULE_PLTS |
196 | /* mod is only supplied during module loading */ |
197 | if (!mod) |
198 | mod = rec->arch.mod; |
199 | else |
200 | rec->arch.mod = mod; |
201 | #endif |
202 | |
203 | old = ftrace_call_replace(pc: ip, addr: aaddr, |
204 | warn: !IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || !mod); |
205 | #ifdef CONFIG_ARM_MODULE_PLTS |
206 | if (!old && mod) { |
207 | aaddr = get_module_plt(mod, ip, aaddr); |
208 | old = ftrace_call_replace(ip, aaddr, true); |
209 | } |
210 | #endif |
211 | |
212 | new = ftrace_nop_replace(rec); |
213 | /* |
214 | * Locations in .init.text may call __gnu_mcount_mc via a linker |
215 | * emitted veneer if they are too far away from its implementation, and |
216 | * so validation may fail spuriously in such cases. Let's work around |
217 | * this by omitting those from validation. |
218 | */ |
219 | ret = ftrace_modify_code(pc: ip, old, new, validate: !is_kernel_inittext(addr: ip)); |
220 | |
221 | return ret; |
222 | } |
223 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
224 | |
225 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
226 | asmlinkage |
227 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, |
228 | unsigned long frame_pointer, |
229 | unsigned long stack_pointer) |
230 | { |
231 | unsigned long return_hooker = (unsigned long) &return_to_handler; |
232 | unsigned long old; |
233 | |
234 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
235 | return; |
236 | |
237 | if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER)) { |
238 | /* FP points one word below parent's top of stack */ |
239 | frame_pointer += 4; |
240 | } else { |
241 | struct stackframe frame = { |
242 | .fp = frame_pointer, |
243 | .sp = stack_pointer, |
244 | .lr = self_addr, |
245 | .pc = self_addr, |
246 | }; |
247 | if (unwind_frame(&frame) < 0) |
248 | return; |
249 | if (frame.lr != self_addr) |
250 | parent = frame.lr_addr; |
251 | frame_pointer = frame.sp; |
252 | } |
253 | |
254 | old = *parent; |
255 | *parent = return_hooker; |
256 | |
257 | if (function_graph_enter(ret: old, func: self_addr, frame_pointer, NULL)) |
258 | *parent = old; |
259 | } |
260 | |
261 | #ifdef CONFIG_DYNAMIC_FTRACE |
262 | extern unsigned long ftrace_graph_call; |
263 | extern unsigned long ftrace_graph_call_old; |
264 | extern void ftrace_graph_caller_old(void); |
265 | extern unsigned long ftrace_graph_regs_call; |
266 | extern void ftrace_graph_regs_caller(void); |
267 | |
268 | static int __ftrace_modify_caller(unsigned long *callsite, |
269 | void (*func) (void), bool enable) |
270 | { |
271 | unsigned long caller_fn = (unsigned long) func; |
272 | unsigned long pc = (unsigned long) callsite; |
273 | unsigned long branch = arm_gen_branch(pc, caller_fn); |
274 | unsigned long nop = arm_gen_nop(); |
275 | unsigned long old = enable ? nop : branch; |
276 | unsigned long new = enable ? branch : nop; |
277 | |
278 | return ftrace_modify_code(pc, old, new, validate: true); |
279 | } |
280 | |
281 | static int ftrace_modify_graph_caller(bool enable) |
282 | { |
283 | int ret; |
284 | |
285 | ret = __ftrace_modify_caller(callsite: &ftrace_graph_call, |
286 | func: ftrace_graph_caller, |
287 | enable); |
288 | |
289 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
290 | if (!ret) |
291 | ret = __ftrace_modify_caller(callsite: &ftrace_graph_regs_call, |
292 | func: ftrace_graph_regs_caller, |
293 | enable); |
294 | #endif |
295 | |
296 | |
297 | return ret; |
298 | } |
299 | |
300 | int ftrace_enable_ftrace_graph_caller(void) |
301 | { |
302 | return ftrace_modify_graph_caller(enable: true); |
303 | } |
304 | |
305 | int ftrace_disable_ftrace_graph_caller(void) |
306 | { |
307 | return ftrace_modify_graph_caller(enable: false); |
308 | } |
309 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
310 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
311 | |