1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Kernel unwinding support
4 *
5 * (c) 2002-2004 Randolph Chung <tausq@debian.org>
6 *
7 * Derived partially from the IA64 implementation. The PA-RISC
8 * Runtime Architecture Document is also a useful reference to
9 * understand what is happening here
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/sort.h>
17#include <linux/sched/task_stack.h>
18
19#include <linux/uaccess.h>
20#include <asm/assembly.h>
21#include <asm/asm-offsets.h>
22#include <asm/ptrace.h>
23
24#include <asm/unwind.h>
25#include <asm/switch_to.h>
26#include <asm/sections.h>
27#include <asm/ftrace.h>
28
29/* #define DEBUG 1 */
30#ifdef DEBUG
31#define dbg(x...) pr_debug(x)
32#else
33#define dbg(x...) do { } while (0)
34#endif
35
36#define KERNEL_START (KERNEL_BINARY_TEXT_START)
37
38extern struct unwind_table_entry __start___unwind[];
39extern struct unwind_table_entry __stop___unwind[];
40
41static DEFINE_SPINLOCK(unwind_lock);
42/*
43 * the kernel unwind block is not dynamically allocated so that
44 * we can call unwind_init as early in the bootup process as
45 * possible (before the slab allocator is initialized)
46 */
47static struct unwind_table kernel_unwind_table __ro_after_init;
48static LIST_HEAD(unwind_tables);
49
50static inline const struct unwind_table_entry *
51find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
52{
53 const struct unwind_table_entry *e = NULL;
54 unsigned long lo, hi, mid;
55
56 lo = 0;
57 hi = table->length - 1;
58
59 while (lo <= hi) {
60 mid = (hi - lo) / 2 + lo;
61 e = &table->table[mid];
62 if (addr < e->region_start)
63 hi = mid - 1;
64 else if (addr > e->region_end)
65 lo = mid + 1;
66 else
67 return e;
68 }
69
70 return NULL;
71}
72
73static const struct unwind_table_entry *
74find_unwind_entry(unsigned long addr)
75{
76 struct unwind_table *table;
77 const struct unwind_table_entry *e = NULL;
78
79 if (addr >= kernel_unwind_table.start &&
80 addr <= kernel_unwind_table.end)
81 e = find_unwind_entry_in_table(table: &kernel_unwind_table, addr);
82 else {
83 unsigned long flags;
84
85 spin_lock_irqsave(&unwind_lock, flags);
86 list_for_each_entry(table, &unwind_tables, list) {
87 if (addr >= table->start &&
88 addr <= table->end)
89 e = find_unwind_entry_in_table(table, addr);
90 if (e) {
91 /* Move-to-front to exploit common traces */
92 list_move(list: &table->list, head: &unwind_tables);
93 break;
94 }
95 }
96 spin_unlock_irqrestore(lock: &unwind_lock, flags);
97 }
98
99 return e;
100}
101
102static void
103unwind_table_init(struct unwind_table *table, const char *name,
104 unsigned long base_addr, unsigned long gp,
105 void *table_start, void *table_end)
106{
107 struct unwind_table_entry *start = table_start;
108 struct unwind_table_entry *end =
109 (struct unwind_table_entry *)table_end - 1;
110
111 table->name = name;
112 table->base_addr = base_addr;
113 table->gp = gp;
114 table->start = base_addr + start->region_start;
115 table->end = base_addr + end->region_end;
116 table->table = (struct unwind_table_entry *)table_start;
117 table->length = end - start + 1;
118 INIT_LIST_HEAD(list: &table->list);
119
120 for (; start <= end; start++) {
121 if (start < end &&
122 start->region_end > (start+1)->region_start) {
123 pr_warn("Out of order unwind entry! %px and %px\n",
124 start, start+1);
125 }
126
127 start->region_start += base_addr;
128 start->region_end += base_addr;
129 }
130}
131
132static int cmp_unwind_table_entry(const void *a, const void *b)
133{
134 return ((const struct unwind_table_entry *)a)->region_start
135 - ((const struct unwind_table_entry *)b)->region_start;
136}
137
138static void
139unwind_table_sort(struct unwind_table_entry *start,
140 struct unwind_table_entry *finish)
141{
142 sort(start, finish - start, sizeof(struct unwind_table_entry),
143 cmp_unwind_table_entry, NULL);
144}
145
146struct unwind_table *
147unwind_table_add(const char *name, unsigned long base_addr,
148 unsigned long gp,
149 void *start, void *end)
150{
151 struct unwind_table *table;
152 unsigned long flags;
153 struct unwind_table_entry *s = (struct unwind_table_entry *)start;
154 struct unwind_table_entry *e = (struct unwind_table_entry *)end;
155
156 unwind_table_sort(start: s, finish: e);
157
158 table = kmalloc(sizeof(struct unwind_table), GFP_USER);
159 if (table == NULL)
160 return NULL;
161 unwind_table_init(table, name, base_addr, gp, table_start: start, table_end: end);
162 spin_lock_irqsave(&unwind_lock, flags);
163 list_add_tail(new: &table->list, head: &unwind_tables);
164 spin_unlock_irqrestore(lock: &unwind_lock, flags);
165
166 return table;
167}
168
169void unwind_table_remove(struct unwind_table *table)
170{
171 unsigned long flags;
172
173 spin_lock_irqsave(&unwind_lock, flags);
174 list_del(entry: &table->list);
175 spin_unlock_irqrestore(lock: &unwind_lock, flags);
176
177 kfree(objp: table);
178}
179
180/* Called from setup_arch to import the kernel unwind info */
181int __init unwind_init(void)
182{
183 long start __maybe_unused, stop __maybe_unused;
184 register unsigned long gp __asm__ ("r27");
185
186 start = (long)&__start___unwind[0];
187 stop = (long)&__stop___unwind[0];
188
189 dbg("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
190 start, stop,
191 (stop - start) / sizeof(struct unwind_table_entry));
192
193 unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
194 gp,
195 &__start___unwind[0], &__stop___unwind[0]);
196#if 0
197 {
198 int i;
199 for (i = 0; i < 10; i++)
200 {
201 printk("region 0x%x-0x%x\n",
202 __start___unwind[i].region_start,
203 __start___unwind[i].region_end);
204 }
205 }
206#endif
207 return 0;
208}
209
210static bool pc_is_kernel_fn(unsigned long pc, void *fn)
211{
212 return (unsigned long)dereference_kernel_function_descriptor(fn) == pc;
213}
214
215static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
216{
217 /*
218 * We have to use void * instead of a function pointer, because
219 * function pointers aren't a pointer to the function on 64-bit.
220 * Make them const so the compiler knows they live in .text
221 * Note: We could use dereference_kernel_function_descriptor()
222 * instead but we want to keep it simple here.
223 */
224 extern void * const ret_from_kernel_thread;
225 extern void * const syscall_exit;
226 extern void * const intr_return;
227 extern void * const _switch_to_ret;
228#ifdef CONFIG_IRQSTACKS
229 extern void * const _call_on_stack;
230#endif /* CONFIG_IRQSTACKS */
231
232 if (pc_is_kernel_fn(pc, fn: handle_interruption)) {
233 struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
234 dbg("Unwinding through handle_interruption()\n");
235 info->prev_sp = regs->gr[30];
236 info->prev_ip = regs->iaoq[0];
237 return 1;
238 }
239
240 if (pc == (unsigned long)&ret_from_kernel_thread ||
241 pc == (unsigned long)&syscall_exit) {
242 info->prev_sp = info->prev_ip = 0;
243 return 1;
244 }
245
246 if (pc == (unsigned long)&intr_return) {
247 struct pt_regs *regs;
248
249 dbg("Found intr_return()\n");
250 regs = (struct pt_regs *)(info->sp - PT_SZ_ALGN);
251 info->prev_sp = regs->gr[30];
252 info->prev_ip = regs->iaoq[0];
253 info->rp = regs->gr[2];
254 return 1;
255 }
256
257 if (pc_is_kernel_fn(pc, fn: _switch_to) ||
258 pc == (unsigned long)&_switch_to_ret) {
259 info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
260 info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
261 return 1;
262 }
263
264#ifdef CONFIG_IRQSTACKS
265 if (pc == (unsigned long)&_call_on_stack) {
266 info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
267 info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
268 return 1;
269 }
270#endif
271 return 0;
272}
273
274static void unwind_frame_regs(struct unwind_frame_info *info)
275{
276 const struct unwind_table_entry *e;
277 unsigned long npc;
278 unsigned int insn;
279 long frame_size = 0;
280 int looking_for_rp, rpoffset = 0;
281
282 e = find_unwind_entry(addr: info->ip);
283 if (e == NULL) {
284 unsigned long sp;
285
286 dbg("Cannot find unwind entry for %pS; forced unwinding\n",
287 (void *) info->ip);
288
289 /* Since we are doing the unwinding blind, we don't know if
290 we are adjusting the stack correctly or extracting the rp
291 correctly. The rp is checked to see if it belongs to the
292 kernel text section, if not we assume we don't have a
293 correct stack frame and we continue to unwind the stack.
294 This is not quite correct, and will fail for loadable
295 modules. */
296 sp = info->sp & ~63;
297 do {
298 unsigned long tmp;
299
300 info->prev_sp = sp - 64;
301 info->prev_ip = 0;
302
303 /* Check if stack is inside kernel stack area */
304 if ((info->prev_sp - (unsigned long) task_stack_page(task: info->t))
305 >= THREAD_SIZE) {
306 info->prev_sp = 0;
307 break;
308 }
309
310 if (copy_from_kernel_nofault(dst: &tmp,
311 src: (void *)info->prev_sp - RP_OFFSET, size: sizeof(tmp)))
312 break;
313 info->prev_ip = tmp;
314 sp = info->prev_sp;
315 } while (!kernel_text_address(addr: info->prev_ip));
316
317 info->rp = 0;
318
319 dbg("analyzing func @ %lx with no unwind info, setting "
320 "prev_sp=%lx prev_ip=%lx\n", info->ip,
321 info->prev_sp, info->prev_ip);
322 } else {
323 dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
324 "Save_RP = %d, Millicode = %d size = %u\n",
325 e->region_start, e->region_end, e->Save_SP, e->Save_RP,
326 e->Millicode, e->Total_frame_size);
327
328 looking_for_rp = e->Save_RP;
329
330 for (npc = e->region_start;
331 (frame_size < (e->Total_frame_size << 3) ||
332 looking_for_rp) &&
333 npc < info->ip;
334 npc += 4) {
335
336 insn = *(unsigned int *)npc;
337
338 if ((insn & 0xffffc001) == 0x37de0000 ||
339 (insn & 0xffe00001) == 0x6fc00000) {
340 /* ldo X(sp), sp, or stwm X,D(sp) */
341 frame_size += (insn & 0x3fff) >> 1;
342 dbg("analyzing func @ %lx, insn=%08x @ "
343 "%lx, frame_size = %ld\n", info->ip,
344 insn, npc, frame_size);
345 } else if ((insn & 0xffe00009) == 0x73c00008) {
346 /* std,ma X,D(sp) */
347 frame_size += ((insn >> 4) & 0x3ff) << 3;
348 dbg("analyzing func @ %lx, insn=%08x @ "
349 "%lx, frame_size = %ld\n", info->ip,
350 insn, npc, frame_size);
351 } else if (insn == 0x6bc23fd9) {
352 /* stw rp,-20(sp) */
353 rpoffset = 20;
354 looking_for_rp = 0;
355 dbg("analyzing func @ %lx, insn=stw rp,"
356 "-20(sp) @ %lx\n", info->ip, npc);
357 } else if (insn == 0x0fc212c1) {
358 /* std rp,-16(sr0,sp) */
359 rpoffset = 16;
360 looking_for_rp = 0;
361 dbg("analyzing func @ %lx, insn=std rp,"
362 "-16(sp) @ %lx\n", info->ip, npc);
363 }
364 }
365
366 if (frame_size > e->Total_frame_size << 3)
367 frame_size = e->Total_frame_size << 3;
368
369 if (!unwind_special(info, pc: e->region_start, frame_size)) {
370 info->prev_sp = info->sp - frame_size;
371 if (e->Millicode)
372 info->rp = info->r31;
373 else if (rpoffset)
374 info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
375 info->prev_ip = info->rp;
376 info->rp = 0;
377 }
378
379 dbg("analyzing func @ %lx, setting prev_sp=%lx "
380 "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
381 info->prev_ip, npc);
382 }
383}
384
385void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
386 struct pt_regs *regs)
387{
388 memset(info, 0, sizeof(struct unwind_frame_info));
389 info->t = t;
390 info->sp = regs->gr[30];
391 info->ip = regs->iaoq[0];
392 info->rp = regs->gr[2];
393 info->r31 = regs->gr[31];
394
395 dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
396 t ? (int)t->pid : -1, info->sp, info->ip);
397}
398
399void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
400{
401 struct pt_regs *r = &t->thread.regs;
402 struct pt_regs *r2;
403
404 r2 = kmalloc(size: sizeof(struct pt_regs), GFP_ATOMIC);
405 if (!r2)
406 return;
407 *r2 = *r;
408 r2->gr[30] = r->ksp;
409 r2->iaoq[0] = r->kpc;
410 unwind_frame_init(info, t, regs: r2);
411 kfree(objp: r2);
412}
413
414#define get_parisc_stackpointer() ({ \
415 unsigned long sp; \
416 __asm__("copy %%r30, %0" : "=r"(sp)); \
417 (sp); \
418})
419
420void unwind_frame_init_task(struct unwind_frame_info *info,
421 struct task_struct *task, struct pt_regs *regs)
422{
423 task = task ? task : current;
424
425 if (task == current) {
426 struct pt_regs r;
427
428 if (!regs) {
429 memset(&r, 0, sizeof(r));
430 r.iaoq[0] = _THIS_IP_;
431 r.gr[2] = _RET_IP_;
432 r.gr[30] = get_parisc_stackpointer();
433 regs = &r;
434 }
435 unwind_frame_init(info, t: task, regs);
436 } else {
437 unwind_frame_init_from_blocked_task(info, t: task);
438 }
439}
440
441int unwind_once(struct unwind_frame_info *next_frame)
442{
443 unwind_frame_regs(info: next_frame);
444
445 if (next_frame->prev_sp == 0 ||
446 next_frame->prev_ip == 0)
447 return -1;
448
449 next_frame->sp = next_frame->prev_sp;
450 next_frame->ip = next_frame->prev_ip;
451 next_frame->prev_sp = 0;
452 next_frame->prev_ip = 0;
453
454 dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
455 next_frame->t ? (int)next_frame->t->pid : -1,
456 next_frame->sp, next_frame->ip);
457
458 return 0;
459}
460
461int unwind_to_user(struct unwind_frame_info *info)
462{
463 int ret;
464
465 do {
466 ret = unwind_once(next_frame: info);
467 } while (!ret && !(info->ip & 3));
468
469 return ret;
470}
471
472unsigned long return_address(unsigned int level)
473{
474 struct unwind_frame_info info;
475
476 /* initialize unwind info */
477 unwind_frame_init_task(info: &info, current, NULL);
478
479 /* unwind stack */
480 level += 2;
481 do {
482 if (unwind_once(next_frame: &info) < 0 || info.ip == 0)
483 return 0;
484 if (!kernel_text_address(addr: info.ip))
485 return 0;
486 } while (info.ip && level--);
487
488 return info.ip;
489}
490

source code of linux/arch/parisc/kernel/unwind.c