1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * Split from ftrace_64.S |
4 | */ |
5 | |
6 | #include <linux/export.h> |
7 | #include <linux/magic.h> |
8 | #include <asm/ppc_asm.h> |
9 | #include <asm/asm-offsets.h> |
10 | #include <asm/ftrace.h> |
11 | #include <asm/ppc-opcode.h> |
12 | #include <asm/thread_info.h> |
13 | #include <asm/bug.h> |
14 | #include <asm/ptrace.h> |
15 | |
16 | /* |
17 | * |
18 | * ftrace_caller()/ftrace_regs_caller() is the function that replaces _mcount() |
19 | * when ftrace is active. |
20 | * |
21 | * We arrive here after a function A calls function B, and we are the trace |
22 | * function for B. When we enter r1 points to A's stack frame, B has not yet |
23 | * had a chance to allocate one yet. |
24 | * |
25 | * Additionally r2 may point either to the TOC for A, or B, depending on |
26 | * whether B did a TOC setup sequence before calling us. |
27 | * |
28 | * On entry the LR points back to the _mcount() call site, and r0 holds the |
29 | * saved LR as it was on entry to B, ie. the original return address at the |
30 | * call site in A. |
31 | * |
32 | * Our job is to save the register state into a struct pt_regs (on the stack) |
33 | * and then arrange for the ftrace function to be called. |
34 | */ |
35 | .macro ftrace_regs_entry allregs |
36 | /* Create a minimal stack frame for representing B */ |
37 | PPC_STLU r1, -STACK_FRAME_MIN_SIZE(r1) |
38 | |
39 | /* Create our stack frame + pt_regs */ |
40 | PPC_STLU r1,-SWITCH_FRAME_SIZE(r1) |
41 | |
42 | /* Save all gprs to pt_regs */ |
43 | SAVE_GPR(0, r1) |
44 | SAVE_GPRS(3, 10, r1) |
45 | |
46 | #ifdef CONFIG_PPC64 |
47 | /* Save the original return address in A's stack frame */ |
48 | std r0, LRSAVE+SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE(r1) |
49 | /* Ok to continue? */ |
50 | lbz r3, PACA_FTRACE_ENABLED(r13) |
51 | cmpdi r3, 0 |
52 | beq ftrace_no_trace |
53 | #endif |
54 | |
55 | .if \allregs == 1 |
56 | SAVE_GPR(2, r1) |
57 | SAVE_GPRS(11, 31, r1) |
58 | .else |
59 | #ifdef CONFIG_LIVEPATCH_64 |
60 | SAVE_GPR(14, r1) |
61 | #endif |
62 | .endif |
63 | |
64 | /* Save previous stack pointer (r1) */ |
65 | addi r8, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE |
66 | PPC_STL r8, GPR1(r1) |
67 | |
68 | .if \allregs == 1 |
69 | /* Load special regs for save below */ |
70 | mfmsr r8 |
71 | mfctr r9 |
72 | mfxer r10 |
73 | mfcr r11 |
74 | .else |
75 | /* Clear MSR to flag as ftrace_caller versus frace_regs_caller */ |
76 | li r8, 0 |
77 | .endif |
78 | |
79 | /* Get the _mcount() call site out of LR */ |
80 | mflr r7 |
81 | /* Save it as pt_regs->nip */ |
82 | PPC_STL r7, _NIP(r1) |
83 | /* Also save it in B's stackframe header for proper unwind */ |
84 | PPC_STL r7, LRSAVE+SWITCH_FRAME_SIZE(r1) |
85 | /* Save the read LR in pt_regs->link */ |
86 | PPC_STL r0, _LINK(r1) |
87 | |
88 | #ifdef CONFIG_PPC64 |
89 | /* Save callee's TOC in the ABI compliant location */ |
90 | std r2, STK_GOT(r1) |
91 | LOAD_PACA_TOC() /* get kernel TOC in r2 */ |
92 | LOAD_REG_ADDR(r3, function_trace_op) |
93 | ld r5,0(r3) |
94 | #else |
95 | lis r3,function_trace_op@ha |
96 | lwz r5,function_trace_op@l(r3) |
97 | #endif |
98 | |
99 | #ifdef CONFIG_LIVEPATCH_64 |
100 | mr r14, r7 /* remember old NIP */ |
101 | #endif |
102 | |
103 | /* Calculate ip from nip-4 into r3 for call below */ |
104 | subi r3, r7, MCOUNT_INSN_SIZE |
105 | |
106 | /* Put the original return address in r4 as parent_ip */ |
107 | mr r4, r0 |
108 | |
109 | /* Save special regs */ |
110 | PPC_STL r8, _MSR(r1) |
111 | .if \allregs == 1 |
112 | PPC_STL r9, _CTR(r1) |
113 | PPC_STL r10, _XER(r1) |
114 | PPC_STL r11, _CCR(r1) |
115 | .endif |
116 | |
117 | /* Load &pt_regs in r6 for call below */ |
118 | addi r6, r1, STACK_INT_FRAME_REGS |
119 | .endm |
120 | |
121 | .macro ftrace_regs_exit allregs |
122 | /* Load ctr with the possibly modified NIP */ |
123 | PPC_LL r3, _NIP(r1) |
124 | mtctr r3 |
125 | |
126 | #ifdef CONFIG_LIVEPATCH_64 |
127 | cmpd r14, r3 /* has NIP been altered? */ |
128 | #endif |
129 | |
130 | /* Restore gprs */ |
131 | .if \allregs == 1 |
132 | REST_GPRS(2, 31, r1) |
133 | .else |
134 | REST_GPRS(3, 10, r1) |
135 | #ifdef CONFIG_LIVEPATCH_64 |
136 | REST_GPR(14, r1) |
137 | #endif |
138 | .endif |
139 | |
140 | /* Restore possibly modified LR */ |
141 | PPC_LL r0, _LINK(r1) |
142 | mtlr r0 |
143 | |
144 | #ifdef CONFIG_PPC64 |
145 | /* Restore callee's TOC */ |
146 | ld r2, STK_GOT(r1) |
147 | #endif |
148 | |
149 | /* Pop our stack frame */ |
150 | addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE |
151 | |
152 | #ifdef CONFIG_LIVEPATCH_64 |
153 | /* Based on the cmpd above, if the NIP was altered handle livepatch */ |
154 | bne- livepatch_handler |
155 | #endif |
156 | bctr /* jump after _mcount site */ |
157 | .endm |
158 | |
159 | _GLOBAL(ftrace_regs_caller) |
160 | ftrace_regs_entry 1 |
161 | /* ftrace_call(r3, r4, r5, r6) */ |
162 | .globl ftrace_regs_call |
163 | ftrace_regs_call: |
164 | bl ftrace_stub |
165 | ftrace_regs_exit 1 |
166 | |
167 | _GLOBAL(ftrace_caller) |
168 | ftrace_regs_entry 0 |
169 | /* ftrace_call(r3, r4, r5, r6) */ |
170 | .globl ftrace_call |
171 | ftrace_call: |
172 | bl ftrace_stub |
173 | ftrace_regs_exit 0 |
174 | |
175 | _GLOBAL(ftrace_stub) |
176 | blr |
177 | |
178 | #ifdef CONFIG_PPC64 |
179 | ftrace_no_trace: |
180 | mflr r3 |
181 | mtctr r3 |
182 | REST_GPR(3, r1) |
183 | addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE |
184 | mtlr r0 |
185 | bctr |
186 | #endif |
187 | |
188 | #ifdef CONFIG_LIVEPATCH_64 |
189 | /* |
190 | * This function runs in the mcount context, between two functions. As |
191 | * such it can only clobber registers which are volatile and used in |
192 | * function linkage. |
193 | * |
194 | * We get here when a function A, calls another function B, but B has |
195 | * been live patched with a new function C. |
196 | * |
197 | * On entry: |
198 | * - we have no stack frame and can not allocate one |
199 | * - LR points back to the original caller (in A) |
200 | * - CTR holds the new NIP in C |
201 | * - r0, r11 & r12 are free |
202 | */ |
203 | livepatch_handler: |
204 | ld r12, PACA_THREAD_INFO(r13) |
205 | |
206 | /* Allocate 3 x 8 bytes */ |
207 | ld r11, TI_livepatch_sp(r12) |
208 | addi r11, r11, 24 |
209 | std r11, TI_livepatch_sp(r12) |
210 | |
211 | /* Save toc & real LR on livepatch stack */ |
212 | std r2, -24(r11) |
213 | mflr r12 |
214 | std r12, -16(r11) |
215 | |
216 | /* Store stack end marker */ |
217 | lis r12, STACK_END_MAGIC@h |
218 | ori r12, r12, STACK_END_MAGIC@l |
219 | std r12, -8(r11) |
220 | |
221 | /* Put ctr in r12 for global entry and branch there */ |
222 | mfctr r12 |
223 | bctrl |
224 | |
225 | /* |
226 | * Now we are returning from the patched function to the original |
227 | * caller A. We are free to use r11, r12 and we can use r2 until we |
228 | * restore it. |
229 | */ |
230 | |
231 | ld r12, PACA_THREAD_INFO(r13) |
232 | |
233 | ld r11, TI_livepatch_sp(r12) |
234 | |
235 | /* Check stack marker hasn't been trashed */ |
236 | lis r2, STACK_END_MAGIC@h |
237 | ori r2, r2, STACK_END_MAGIC@l |
238 | ld r12, -8(r11) |
239 | 1: tdne r12, r2 |
240 | EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0 |
241 | |
242 | /* Restore LR & toc from livepatch stack */ |
243 | ld r12, -16(r11) |
244 | mtlr r12 |
245 | ld r2, -24(r11) |
246 | |
247 | /* Pop livepatch stack frame */ |
248 | ld r12, PACA_THREAD_INFO(r13) |
249 | subi r11, r11, 24 |
250 | std r11, TI_livepatch_sp(r12) |
251 | |
252 | /* Return to original caller of live patched function */ |
253 | blr |
254 | #endif /* CONFIG_LIVEPATCH */ |
255 | |
256 | #ifndef CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY |
257 | _GLOBAL(mcount) |
258 | _GLOBAL(_mcount) |
259 | EXPORT_SYMBOL(_mcount) |
260 | mflr r12 |
261 | mtctr r12 |
262 | mtlr r0 |
263 | bctr |
264 | #endif |
265 | |
266 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
267 | _GLOBAL(return_to_handler) |
268 | /* need to save return values */ |
269 | #ifdef CONFIG_PPC64 |
270 | std r4, -32(r1) |
271 | std r3, -24(r1) |
272 | /* save TOC */ |
273 | std r2, -16(r1) |
274 | std r31, -8(r1) |
275 | mr r31, r1 |
276 | stdu r1, -112(r1) |
277 | |
278 | /* |
279 | * We might be called from a module. |
280 | * Switch to our TOC to run inside the core kernel. |
281 | */ |
282 | LOAD_PACA_TOC() |
283 | #else |
284 | stwu r1, -16(r1) |
285 | stw r3, 8(r1) |
286 | stw r4, 12(r1) |
287 | #endif |
288 | |
289 | bl ftrace_return_to_handler |
290 | nop |
291 | |
292 | /* return value has real return address */ |
293 | mtlr r3 |
294 | |
295 | #ifdef CONFIG_PPC64 |
296 | ld r1, 0(r1) |
297 | ld r4, -32(r1) |
298 | ld r3, -24(r1) |
299 | ld r2, -16(r1) |
300 | ld r31, -8(r1) |
301 | #else |
302 | lwz r3, 8(r1) |
303 | lwz r4, 12(r1) |
304 | addi r1, r1, 16 |
305 | #endif |
306 | |
307 | /* Jump back to real return address */ |
308 | blr |
309 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
310 | |
311 | .pushsection ".tramp.ftrace.text" ,"aw" ,@progbits; |
312 | .globl ftrace_tramp_text |
313 | ftrace_tramp_text: |
314 | .space 32 |
315 | .popsection |
316 | |
317 | .pushsection ".tramp.ftrace.init" ,"aw" ,@progbits; |
318 | .globl ftrace_tramp_init |
319 | ftrace_tramp_init: |
320 | .space 32 |
321 | .popsection |
322 | |