1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Code for tracing calls in Linux kernel. |
4 | * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de> |
5 | * |
6 | * based on code for x86 which is: |
7 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
8 | * |
9 | * future possible enhancements: |
10 | * - add CONFIG_STACK_TRACER |
11 | */ |
12 | |
13 | #include <linux/init.h> |
14 | #include <linux/ftrace.h> |
15 | #include <linux/uaccess.h> |
16 | #include <linux/kprobes.h> |
17 | #include <linux/ptrace.h> |
18 | #include <linux/jump_label.h> |
19 | |
20 | #include <asm/assembly.h> |
21 | #include <asm/sections.h> |
22 | #include <asm/ftrace.h> |
23 | #include <asm/patch.h> |
24 | |
25 | #define __hot __section(".text.hot") |
26 | |
27 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
28 | static DEFINE_STATIC_KEY_FALSE(ftrace_graph_enable); |
29 | |
30 | /* |
31 | * Hook the return address and push it in the stack of return addrs |
32 | * in current thread info. |
33 | */ |
34 | static void __hot prepare_ftrace_return(unsigned long *parent, |
35 | unsigned long self_addr) |
36 | { |
37 | unsigned long old; |
38 | extern int parisc_return_to_handler; |
39 | |
40 | if (unlikely(ftrace_graph_is_dead())) |
41 | return; |
42 | |
43 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
44 | return; |
45 | |
46 | old = *parent; |
47 | |
48 | if (!function_graph_enter(ret: old, func: self_addr, frame_pointer: 0, NULL)) |
49 | /* activate parisc_return_to_handler() as return point */ |
50 | *parent = (unsigned long) &parisc_return_to_handler; |
51 | } |
52 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
53 | |
54 | static ftrace_func_t ftrace_func; |
55 | |
56 | asmlinkage void notrace __hot ftrace_function_trampoline(unsigned long parent, |
57 | unsigned long self_addr, |
58 | unsigned long org_sp_gr3, |
59 | struct ftrace_regs *fregs) |
60 | { |
61 | extern struct ftrace_ops *function_trace_op; |
62 | |
63 | ftrace_func(self_addr, parent, function_trace_op, fregs); |
64 | |
65 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
66 | if (static_branch_unlikely(&ftrace_graph_enable)) { |
67 | unsigned long *parent_rp; |
68 | |
69 | /* calculate pointer to %rp in stack */ |
70 | parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET); |
71 | /* sanity check: parent_rp should hold parent */ |
72 | if (*parent_rp != parent) |
73 | return; |
74 | |
75 | prepare_ftrace_return(parent_rp, self_addr); |
76 | return; |
77 | } |
78 | #endif |
79 | } |
80 | |
81 | #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_FUNCTION_GRAPH_TRACER) |
82 | int ftrace_enable_ftrace_graph_caller(void) |
83 | { |
84 | static_key_enable(key: &ftrace_graph_enable.key); |
85 | return 0; |
86 | } |
87 | |
88 | int ftrace_disable_ftrace_graph_caller(void) |
89 | { |
90 | static_key_enable(key: &ftrace_graph_enable.key); |
91 | return 0; |
92 | } |
93 | #endif |
94 | |
95 | #ifdef CONFIG_DYNAMIC_FTRACE |
96 | int ftrace_update_ftrace_func(ftrace_func_t func) |
97 | { |
98 | ftrace_func = func; |
99 | return 0; |
100 | } |
101 | |
102 | int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, |
103 | unsigned long addr) |
104 | { |
105 | return 0; |
106 | } |
107 | |
108 | unsigned long ftrace_call_adjust(unsigned long addr) |
109 | { |
110 | return addr+(FTRACE_PATCHABLE_FUNCTION_SIZE-1)*4; |
111 | } |
112 | |
113 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
114 | { |
115 | u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE]; |
116 | u32 *tramp; |
117 | int size, ret, i; |
118 | void *ip; |
119 | |
120 | #ifdef CONFIG_64BIT |
121 | unsigned long addr2 = |
122 | (unsigned long)dereference_function_descriptor((void *)addr); |
123 | |
124 | u32 ftrace_trampoline[] = { |
125 | 0x73c10208, /* std,ma r1,100(sp) */ |
126 | 0x0c2110c1, /* ldd -10(r1),r1 */ |
127 | 0xe820d002, /* bve,n (r1) */ |
128 | addr2 >> 32, |
129 | addr2 & 0xffffffff, |
130 | 0xe83f1fd7, /* b,l,n .-14,r1 */ |
131 | }; |
132 | |
133 | u32 ftrace_trampoline_unaligned[] = { |
134 | addr2 >> 32, |
135 | addr2 & 0xffffffff, |
136 | 0x37de0200, /* ldo 100(sp),sp */ |
137 | 0x73c13e01, /* std r1,-100(sp) */ |
138 | 0x34213ff9, /* ldo -4(r1),r1 */ |
139 | 0x50213fc1, /* ldd -20(r1),r1 */ |
140 | 0xe820d002, /* bve,n (r1) */ |
141 | 0xe83f1fcf, /* b,l,n .-20,r1 */ |
142 | }; |
143 | |
144 | BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline_unaligned) > |
145 | FTRACE_PATCHABLE_FUNCTION_SIZE); |
146 | #else |
147 | u32 ftrace_trampoline[] = { |
148 | (u32)addr, |
149 | 0x6fc10080, /* stw,ma r1,40(sp) */ |
150 | 0x48213fd1, /* ldw -18(r1),r1 */ |
151 | 0xe820c002, /* bv,n r0(r1) */ |
152 | 0xe83f1fdf, /* b,l,n .-c,r1 */ |
153 | }; |
154 | #endif |
155 | |
156 | BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline) > |
157 | FTRACE_PATCHABLE_FUNCTION_SIZE); |
158 | |
159 | size = sizeof(ftrace_trampoline); |
160 | tramp = ftrace_trampoline; |
161 | |
162 | #ifdef CONFIG_64BIT |
163 | if (rec->ip & 0x4) { |
164 | size = sizeof(ftrace_trampoline_unaligned); |
165 | tramp = ftrace_trampoline_unaligned; |
166 | } |
167 | #endif |
168 | |
169 | ip = (void *)(rec->ip + 4 - size); |
170 | |
171 | ret = copy_from_kernel_nofault(dst: insn, src: ip, size); |
172 | if (ret) |
173 | return ret; |
174 | |
175 | for (i = 0; i < size / 4; i++) { |
176 | if (insn[i] != INSN_NOP) |
177 | return -EINVAL; |
178 | } |
179 | |
180 | __patch_text_multiple(ip, tramp, size); |
181 | return 0; |
182 | } |
183 | |
184 | int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, |
185 | unsigned long addr) |
186 | { |
187 | u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE]; |
188 | int i; |
189 | |
190 | for (i = 0; i < ARRAY_SIZE(insn); i++) |
191 | insn[i] = INSN_NOP; |
192 | |
193 | __patch_text((void *)rec->ip, INSN_NOP); |
194 | __patch_text_multiple((void *)rec->ip + 4 - sizeof(insn), |
195 | insn, sizeof(insn)-4); |
196 | return 0; |
197 | } |
198 | #endif |
199 | |
200 | #ifdef CONFIG_KPROBES_ON_FTRACE |
201 | void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, |
202 | struct ftrace_ops *ops, struct ftrace_regs *fregs) |
203 | { |
204 | struct kprobe_ctlblk *kcb; |
205 | struct pt_regs *regs; |
206 | struct kprobe *p; |
207 | int bit; |
208 | |
209 | bit = ftrace_test_recursion_trylock(ip, parent_ip); |
210 | if (bit < 0) |
211 | return; |
212 | |
213 | regs = ftrace_get_regs(fregs); |
214 | p = get_kprobe(addr: (kprobe_opcode_t *)ip); |
215 | if (unlikely(!p) || kprobe_disabled(p)) |
216 | goto out; |
217 | |
218 | if (kprobe_running()) { |
219 | kprobes_inc_nmissed_count(p); |
220 | goto out; |
221 | } |
222 | |
223 | __this_cpu_write(current_kprobe, p); |
224 | |
225 | kcb = get_kprobe_ctlblk(); |
226 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
227 | |
228 | regs->iaoq[0] = ip; |
229 | regs->iaoq[1] = ip + 4; |
230 | |
231 | if (!p->pre_handler || !p->pre_handler(p, regs)) { |
232 | regs->iaoq[0] = ip + 4; |
233 | regs->iaoq[1] = ip + 8; |
234 | |
235 | if (unlikely(p->post_handler)) { |
236 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
237 | p->post_handler(p, regs, 0); |
238 | } |
239 | } |
240 | __this_cpu_write(current_kprobe, NULL); |
241 | out: |
242 | ftrace_test_recursion_unlock(bit); |
243 | } |
244 | NOKPROBE_SYMBOL(kprobe_ftrace_handler); |
245 | |
246 | int arch_prepare_kprobe_ftrace(struct kprobe *p) |
247 | { |
248 | p->ainsn.insn = NULL; |
249 | return 0; |
250 | } |
251 | #endif |
252 | |