1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Based on arch/arm64/kernel/ftrace.c |
4 | * |
5 | * Copyright (C) 2022 Loongson Technology Corporation Limited |
6 | */ |
7 | |
8 | #include <linux/ftrace.h> |
9 | #include <linux/kprobes.h> |
10 | #include <linux/uaccess.h> |
11 | |
12 | #include <asm/inst.h> |
13 | #include <asm/module.h> |
14 | |
15 | static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, bool validate) |
16 | { |
17 | u32 replaced; |
18 | |
19 | if (validate) { |
20 | if (larch_insn_read((void *)pc, &replaced)) |
21 | return -EFAULT; |
22 | |
23 | if (replaced != old) |
24 | return -EINVAL; |
25 | } |
26 | |
27 | if (larch_insn_patch_text((void *)pc, new)) |
28 | return -EPERM; |
29 | |
30 | return 0; |
31 | } |
32 | |
33 | #ifdef CONFIG_MODULES |
34 | static bool reachable_by_bl(unsigned long addr, unsigned long pc) |
35 | { |
36 | long offset = (long)addr - (long)pc; |
37 | |
38 | return offset >= -SZ_128M && offset < SZ_128M; |
39 | } |
40 | |
41 | static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr) |
42 | { |
43 | struct plt_entry *plt = mod->arch.ftrace_trampolines; |
44 | |
45 | if (addr == FTRACE_ADDR) |
46 | return &plt[FTRACE_PLT_IDX]; |
47 | if (addr == FTRACE_REGS_ADDR && |
48 | IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) |
49 | return &plt[FTRACE_REGS_PLT_IDX]; |
50 | |
51 | return NULL; |
52 | } |
53 | |
54 | /* |
55 | * Find the address the callsite must branch to in order to reach '*addr'. |
56 | * |
57 | * Due to the limited range of 'bl' instruction, modules may be placed too far |
58 | * away to branch directly and we must use a PLT. |
59 | * |
60 | * Returns true when '*addr' contains a reachable target address, or has been |
61 | * modified to contain a PLT address. Returns false otherwise. |
62 | */ |
63 | static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod, unsigned long *addr) |
64 | { |
65 | unsigned long pc = rec->ip + LOONGARCH_INSN_SIZE; |
66 | struct plt_entry *plt; |
67 | |
68 | /* |
69 | * If a custom trampoline is unreachable, rely on the ftrace_regs_caller |
70 | * trampoline which knows how to indirectly reach that trampoline through |
71 | * ops->direct_call. |
72 | */ |
73 | if (*addr != FTRACE_ADDR && *addr != FTRACE_REGS_ADDR && !reachable_by_bl(addr: *addr, pc)) |
74 | *addr = FTRACE_REGS_ADDR; |
75 | |
76 | /* |
77 | * When the target is within range of the 'bl' instruction, use 'addr' |
78 | * as-is and branch to that directly. |
79 | */ |
80 | if (reachable_by_bl(addr: *addr, pc)) |
81 | return true; |
82 | |
83 | /* |
84 | * 'mod' is only set at module load time, but if we end up |
85 | * dealing with an out-of-range condition, we can assume it |
86 | * is due to a module being loaded far away from the kernel. |
87 | * |
88 | * NOTE: __module_text_address() must be called with preemption |
89 | * disabled, but we can rely on ftrace_lock to ensure that 'mod' |
90 | * retains its validity throughout the remainder of this code. |
91 | */ |
92 | if (!mod) { |
93 | preempt_disable(); |
94 | mod = __module_text_address(addr: pc); |
95 | preempt_enable(); |
96 | } |
97 | |
98 | if (WARN_ON(!mod)) |
99 | return false; |
100 | |
101 | plt = get_ftrace_plt(mod, addr: *addr); |
102 | if (!plt) { |
103 | pr_err("ftrace: no module PLT for %ps\n" , (void *)*addr); |
104 | return false; |
105 | } |
106 | |
107 | *addr = (unsigned long)plt; |
108 | return true; |
109 | } |
110 | #else /* !CONFIG_MODULES */ |
111 | static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod, unsigned long *addr) |
112 | { |
113 | return true; |
114 | } |
115 | #endif |
116 | |
117 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
118 | int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr) |
119 | { |
120 | u32 old, new; |
121 | unsigned long pc; |
122 | |
123 | pc = rec->ip + LOONGARCH_INSN_SIZE; |
124 | |
125 | if (!ftrace_find_callable_addr(rec, NULL, addr: &addr)) |
126 | return -EINVAL; |
127 | |
128 | if (!ftrace_find_callable_addr(rec, NULL, addr: &old_addr)) |
129 | return -EINVAL; |
130 | |
131 | new = larch_insn_gen_bl(pc, addr); |
132 | old = larch_insn_gen_bl(pc, old_addr); |
133 | |
134 | return ftrace_modify_code(pc, old, new, validate: true); |
135 | } |
136 | #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ |
137 | |
138 | int ftrace_update_ftrace_func(ftrace_func_t func) |
139 | { |
140 | u32 new; |
141 | unsigned long pc; |
142 | |
143 | pc = (unsigned long)&ftrace_call; |
144 | new = larch_insn_gen_bl(pc, (unsigned long)func); |
145 | |
146 | return ftrace_modify_code(pc, old: 0, new, validate: false); |
147 | } |
148 | |
149 | /* |
150 | * The compiler has inserted 2 NOPs before the regular function prologue. |
151 | * T series registers are available and safe because of LoongArch's psABI. |
152 | * |
153 | * At runtime, we can replace nop with bl to enable ftrace call and replace bl |
154 | * with nop to disable ftrace call. The bl requires us to save the original RA |
155 | * value, so it saves RA at t0 here. |
156 | * |
157 | * Details are: |
158 | * |
159 | * | Compiled | Disabled | Enabled | |
160 | * +------------+------------------------+------------------------+ |
161 | * | nop | move t0, ra | move t0, ra | |
162 | * | nop | nop | bl ftrace_caller | |
163 | * | func_body | func_body | func_body | |
164 | * |
165 | * The RA value will be recovered by ftrace_regs_entry, and restored into RA |
166 | * before returning to the regular function prologue. When a function is not |
167 | * being traced, the "move t0, ra" is not harmful. |
168 | */ |
169 | |
170 | int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) |
171 | { |
172 | u32 old, new; |
173 | unsigned long pc; |
174 | |
175 | pc = rec->ip; |
176 | old = larch_insn_gen_nop(); |
177 | new = larch_insn_gen_move(LOONGARCH_GPR_T0, LOONGARCH_GPR_RA); |
178 | |
179 | return ftrace_modify_code(pc, old, new, validate: true); |
180 | } |
181 | |
182 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
183 | { |
184 | u32 old, new; |
185 | unsigned long pc; |
186 | |
187 | pc = rec->ip + LOONGARCH_INSN_SIZE; |
188 | |
189 | if (!ftrace_find_callable_addr(rec, NULL, addr: &addr)) |
190 | return -EINVAL; |
191 | |
192 | old = larch_insn_gen_nop(); |
193 | new = larch_insn_gen_bl(pc, addr); |
194 | |
195 | return ftrace_modify_code(pc, old, new, validate: true); |
196 | } |
197 | |
198 | int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) |
199 | { |
200 | u32 old, new; |
201 | unsigned long pc; |
202 | |
203 | pc = rec->ip + LOONGARCH_INSN_SIZE; |
204 | |
205 | if (!ftrace_find_callable_addr(rec, NULL, addr: &addr)) |
206 | return -EINVAL; |
207 | |
208 | new = larch_insn_gen_nop(); |
209 | old = larch_insn_gen_bl(pc, addr); |
210 | |
211 | return ftrace_modify_code(pc, old, new, validate: true); |
212 | } |
213 | |
214 | void arch_ftrace_update_code(int command) |
215 | { |
216 | command |= FTRACE_MAY_SLEEP; |
217 | ftrace_modify_all_code(command); |
218 | } |
219 | |
220 | int __init ftrace_dyn_arch_init(void) |
221 | { |
222 | return 0; |
223 | } |
224 | |
225 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
226 | void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent) |
227 | { |
228 | unsigned long old; |
229 | unsigned long return_hooker = (unsigned long)&return_to_handler; |
230 | |
231 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
232 | return; |
233 | |
234 | old = *parent; |
235 | |
236 | if (!function_graph_enter(ret: old, func: self_addr, frame_pointer: 0, retp: parent)) |
237 | *parent = return_hooker; |
238 | } |
239 | |
240 | #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS |
241 | void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, |
242 | struct ftrace_ops *op, struct ftrace_regs *fregs) |
243 | { |
244 | struct pt_regs *regs = &fregs->regs; |
245 | unsigned long *parent = (unsigned long *)®s->regs[1]; |
246 | |
247 | prepare_ftrace_return(ip, (unsigned long *)parent); |
248 | } |
249 | #else |
250 | static int ftrace_modify_graph_caller(bool enable) |
251 | { |
252 | u32 branch, nop; |
253 | unsigned long pc, func; |
254 | extern void ftrace_graph_call(void); |
255 | |
256 | pc = (unsigned long)&ftrace_graph_call; |
257 | func = (unsigned long)&ftrace_graph_caller; |
258 | |
259 | nop = larch_insn_gen_nop(); |
260 | branch = larch_insn_gen_b(pc, func); |
261 | |
262 | if (enable) |
263 | return ftrace_modify_code(pc, nop, branch, true); |
264 | else |
265 | return ftrace_modify_code(pc, branch, nop, true); |
266 | } |
267 | |
268 | int ftrace_enable_ftrace_graph_caller(void) |
269 | { |
270 | return ftrace_modify_graph_caller(true); |
271 | } |
272 | |
273 | int ftrace_disable_ftrace_graph_caller(void) |
274 | { |
275 | return ftrace_modify_graph_caller(false); |
276 | } |
277 | #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ |
278 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
279 | |
280 | #ifdef CONFIG_KPROBES_ON_FTRACE |
281 | /* Ftrace callback handler for kprobes -- called under preepmt disabled */ |
282 | void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, |
283 | struct ftrace_ops *ops, struct ftrace_regs *fregs) |
284 | { |
285 | int bit; |
286 | struct pt_regs *regs; |
287 | struct kprobe *p; |
288 | struct kprobe_ctlblk *kcb; |
289 | |
290 | bit = ftrace_test_recursion_trylock(ip, parent_ip); |
291 | if (bit < 0) |
292 | return; |
293 | |
294 | p = get_kprobe(addr: (kprobe_opcode_t *)ip); |
295 | if (unlikely(!p) || kprobe_disabled(p)) |
296 | goto out; |
297 | |
298 | regs = ftrace_get_regs(fregs); |
299 | if (!regs) |
300 | goto out; |
301 | |
302 | kcb = get_kprobe_ctlblk(); |
303 | if (kprobe_running()) { |
304 | kprobes_inc_nmissed_count(p); |
305 | } else { |
306 | unsigned long orig_ip = instruction_pointer(regs); |
307 | |
308 | instruction_pointer_set(regs, val: ip); |
309 | |
310 | __this_cpu_write(current_kprobe, p); |
311 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
312 | if (!p->pre_handler || !p->pre_handler(p, regs)) { |
313 | /* |
314 | * Emulate singlestep (and also recover regs->csr_era) |
315 | * as if there is a nop |
316 | */ |
317 | instruction_pointer_set(regs, val: (unsigned long)p->addr + MCOUNT_INSN_SIZE); |
318 | if (unlikely(p->post_handler)) { |
319 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
320 | p->post_handler(p, regs, 0); |
321 | } |
322 | instruction_pointer_set(regs, val: orig_ip); |
323 | } |
324 | |
325 | /* |
326 | * If pre_handler returns !0, it changes regs->csr_era. We have to |
327 | * skip emulating post_handler. |
328 | */ |
329 | __this_cpu_write(current_kprobe, NULL); |
330 | } |
331 | out: |
332 | ftrace_test_recursion_unlock(bit); |
333 | } |
334 | NOKPROBE_SYMBOL(kprobe_ftrace_handler); |
335 | |
336 | int arch_prepare_kprobe_ftrace(struct kprobe *p) |
337 | { |
338 | p->ainsn.insn = NULL; |
339 | return 0; |
340 | } |
341 | #endif /* CONFIG_KPROBES_ON_FTRACE */ |
342 | |