1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | #include <linux/objtool.h> |
3 | #include <linux/module.h> |
4 | #include <linux/sort.h> |
5 | #include <asm/ptrace.h> |
6 | #include <asm/stacktrace.h> |
7 | #include <asm/unwind.h> |
8 | #include <asm/orc_types.h> |
9 | #include <asm/orc_lookup.h> |
10 | |
11 | #define orc_warn(fmt, ...) \ |
12 | printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__) |
13 | |
14 | #define orc_warn_current(args...) \ |
15 | ({ \ |
16 | if (state->task == current && !state->error) \ |
17 | orc_warn(args); \ |
18 | }) |
19 | |
20 | extern int __start_orc_unwind_ip[]; |
21 | extern int __stop_orc_unwind_ip[]; |
22 | extern struct orc_entry __start_orc_unwind[]; |
23 | extern struct orc_entry __stop_orc_unwind[]; |
24 | |
25 | static bool orc_init __ro_after_init; |
26 | static unsigned int lookup_num_blocks __ro_after_init; |
27 | |
28 | static inline unsigned long orc_ip(const int *ip) |
29 | { |
30 | return (unsigned long)ip + *ip; |
31 | } |
32 | |
33 | static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table, |
34 | unsigned int num_entries, unsigned long ip) |
35 | { |
36 | int *first = ip_table; |
37 | int *last = ip_table + num_entries - 1; |
38 | int *mid = first, *found = first; |
39 | |
40 | if (!num_entries) |
41 | return NULL; |
42 | |
43 | /* |
44 | * Do a binary range search to find the rightmost duplicate of a given |
45 | * starting address. Some entries are section terminators which are |
46 | * "weak" entries for ensuring there are no gaps. They should be |
47 | * ignored when they conflict with a real entry. |
48 | */ |
49 | while (first <= last) { |
50 | mid = first + ((last - first) / 2); |
51 | |
52 | if (orc_ip(ip: mid) <= ip) { |
53 | found = mid; |
54 | first = mid + 1; |
55 | } else |
56 | last = mid - 1; |
57 | } |
58 | |
59 | return u_table + (found - ip_table); |
60 | } |
61 | |
62 | #ifdef CONFIG_MODULES |
63 | static struct orc_entry *orc_module_find(unsigned long ip) |
64 | { |
65 | struct module *mod; |
66 | |
67 | mod = __module_address(ip); |
68 | if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip) |
69 | return NULL; |
70 | return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind, |
71 | mod->arch.num_orcs, ip); |
72 | } |
73 | #else |
74 | static struct orc_entry *orc_module_find(unsigned long ip) |
75 | { |
76 | return NULL; |
77 | } |
78 | #endif |
79 | |
80 | #ifdef CONFIG_DYNAMIC_FTRACE |
81 | static struct orc_entry *orc_find(unsigned long ip); |
82 | |
83 | /* |
84 | * Ftrace dynamic trampolines do not have orc entries of their own. |
85 | * But they are copies of the ftrace entries that are static and |
86 | * defined in ftrace_*.S, which do have orc entries. |
87 | * |
88 | * If the unwinder comes across a ftrace trampoline, then find the |
89 | * ftrace function that was used to create it, and use that ftrace |
90 | * function's orc entry, as the placement of the return code in |
91 | * the stack will be identical. |
92 | */ |
93 | static struct orc_entry *orc_ftrace_find(unsigned long ip) |
94 | { |
95 | struct ftrace_ops *ops; |
96 | unsigned long tramp_addr, offset; |
97 | |
98 | ops = ftrace_ops_trampoline(ip); |
99 | if (!ops) |
100 | return NULL; |
101 | |
102 | /* Set tramp_addr to the start of the code copied by the trampoline */ |
103 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) |
104 | tramp_addr = (unsigned long)ftrace_regs_caller; |
105 | else |
106 | tramp_addr = (unsigned long)ftrace_caller; |
107 | |
108 | /* Now place tramp_addr to the location within the trampoline ip is at */ |
109 | offset = ip - ops->trampoline; |
110 | tramp_addr += offset; |
111 | |
112 | /* Prevent unlikely recursion */ |
113 | if (ip == tramp_addr) |
114 | return NULL; |
115 | |
116 | return orc_find(tramp_addr); |
117 | } |
118 | #else |
119 | static struct orc_entry *orc_ftrace_find(unsigned long ip) |
120 | { |
121 | return NULL; |
122 | } |
123 | #endif |
124 | |
125 | /* |
126 | * If we crash with IP==0, the last successfully executed instruction |
127 | * was probably an indirect function call with a NULL function pointer, |
128 | * and we don't have unwind information for NULL. |
129 | * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function |
130 | * pointer into its parent and then continue normally from there. |
131 | */ |
132 | static struct orc_entry null_orc_entry = { |
133 | .sp_offset = sizeof(long), |
134 | .sp_reg = ORC_REG_SP, |
135 | .bp_reg = ORC_REG_UNDEFINED, |
136 | .type = UNWIND_HINT_TYPE_CALL |
137 | }; |
138 | |
139 | #ifdef CONFIG_CALL_THUNKS |
140 | static struct orc_entry *orc_callthunk_find(unsigned long ip) |
141 | { |
142 | if (!is_callthunk((void *)ip)) |
143 | return NULL; |
144 | |
145 | return &null_orc_entry; |
146 | } |
147 | #else |
148 | static struct orc_entry *orc_callthunk_find(unsigned long ip) |
149 | { |
150 | return NULL; |
151 | } |
152 | #endif |
153 | |
154 | /* Fake frame pointer entry -- used as a fallback for generated code */ |
155 | static struct orc_entry orc_fp_entry = { |
156 | .type = UNWIND_HINT_TYPE_CALL, |
157 | .sp_reg = ORC_REG_BP, |
158 | .sp_offset = 16, |
159 | .bp_reg = ORC_REG_PREV_SP, |
160 | .bp_offset = -16, |
161 | .end = 0, |
162 | }; |
163 | |
164 | static struct orc_entry *orc_find(unsigned long ip) |
165 | { |
166 | static struct orc_entry *orc; |
167 | |
168 | if (ip == 0) |
169 | return &null_orc_entry; |
170 | |
171 | /* For non-init vmlinux addresses, use the fast lookup table: */ |
172 | if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) { |
173 | unsigned int idx, start, stop; |
174 | |
175 | idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE; |
176 | |
177 | if (unlikely((idx >= lookup_num_blocks-1))) { |
178 | orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n" , |
179 | idx, lookup_num_blocks, (void *)ip); |
180 | return NULL; |
181 | } |
182 | |
183 | start = orc_lookup[idx]; |
184 | stop = orc_lookup[idx + 1] + 1; |
185 | |
186 | if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) || |
187 | (__start_orc_unwind + stop > __stop_orc_unwind))) { |
188 | orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n" , |
189 | idx, lookup_num_blocks, start, stop, (void *)ip); |
190 | return NULL; |
191 | } |
192 | |
193 | return __orc_find(ip_table: __start_orc_unwind_ip + start, |
194 | u_table: __start_orc_unwind + start, num_entries: stop - start, ip); |
195 | } |
196 | |
197 | /* vmlinux .init slow lookup: */ |
198 | if (is_kernel_inittext(addr: ip)) |
199 | return __orc_find(ip_table: __start_orc_unwind_ip, u_table: __start_orc_unwind, |
200 | num_entries: __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); |
201 | |
202 | /* Module lookup: */ |
203 | orc = orc_module_find(ip); |
204 | if (orc) |
205 | return orc; |
206 | |
207 | orc = orc_ftrace_find(ip); |
208 | if (orc) |
209 | return orc; |
210 | |
211 | return orc_callthunk_find(ip); |
212 | } |
213 | |
214 | #ifdef CONFIG_MODULES |
215 | |
216 | static DEFINE_MUTEX(sort_mutex); |
217 | static int *cur_orc_ip_table = __start_orc_unwind_ip; |
218 | static struct orc_entry *cur_orc_table = __start_orc_unwind; |
219 | |
220 | static void orc_sort_swap(void *_a, void *_b, int size) |
221 | { |
222 | struct orc_entry *orc_a, *orc_b; |
223 | struct orc_entry orc_tmp; |
224 | int *a = _a, *b = _b, tmp; |
225 | int delta = _b - _a; |
226 | |
227 | /* Swap the .orc_unwind_ip entries: */ |
228 | tmp = *a; |
229 | *a = *b + delta; |
230 | *b = tmp - delta; |
231 | |
232 | /* Swap the corresponding .orc_unwind entries: */ |
233 | orc_a = cur_orc_table + (a - cur_orc_ip_table); |
234 | orc_b = cur_orc_table + (b - cur_orc_ip_table); |
235 | orc_tmp = *orc_a; |
236 | *orc_a = *orc_b; |
237 | *orc_b = orc_tmp; |
238 | } |
239 | |
240 | static int orc_sort_cmp(const void *_a, const void *_b) |
241 | { |
242 | struct orc_entry *orc_a; |
243 | const int *a = _a, *b = _b; |
244 | unsigned long a_val = orc_ip(a); |
245 | unsigned long b_val = orc_ip(b); |
246 | |
247 | if (a_val > b_val) |
248 | return 1; |
249 | if (a_val < b_val) |
250 | return -1; |
251 | |
252 | /* |
253 | * The "weak" section terminator entries need to always be on the left |
254 | * to ensure the lookup code skips them in favor of real entries. |
255 | * These terminator entries exist to handle any gaps created by |
256 | * whitelisted .o files which didn't get objtool generation. |
257 | */ |
258 | orc_a = cur_orc_table + (a - cur_orc_ip_table); |
259 | return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1; |
260 | } |
261 | |
262 | void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size, |
263 | void *_orc, size_t orc_size) |
264 | { |
265 | int *orc_ip = _orc_ip; |
266 | struct orc_entry *orc = _orc; |
267 | unsigned int num_entries = orc_ip_size / sizeof(int); |
268 | |
269 | WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 || |
270 | orc_size % sizeof(*orc) != 0 || |
271 | num_entries != orc_size / sizeof(*orc)); |
272 | |
273 | /* |
274 | * The 'cur_orc_*' globals allow the orc_sort_swap() callback to |
275 | * associate an .orc_unwind_ip table entry with its corresponding |
276 | * .orc_unwind entry so they can both be swapped. |
277 | */ |
278 | mutex_lock(&sort_mutex); |
279 | cur_orc_ip_table = orc_ip; |
280 | cur_orc_table = orc; |
281 | sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap); |
282 | mutex_unlock(&sort_mutex); |
283 | |
284 | mod->arch.orc_unwind_ip = orc_ip; |
285 | mod->arch.orc_unwind = orc; |
286 | mod->arch.num_orcs = num_entries; |
287 | } |
288 | #endif |
289 | |
290 | void __init unwind_init(void) |
291 | { |
292 | size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip; |
293 | size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind; |
294 | size_t num_entries = orc_ip_size / sizeof(int); |
295 | struct orc_entry *orc; |
296 | int i; |
297 | |
298 | if (!num_entries || orc_ip_size % sizeof(int) != 0 || |
299 | orc_size % sizeof(struct orc_entry) != 0 || |
300 | num_entries != orc_size / sizeof(struct orc_entry)) { |
301 | orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n" ); |
302 | return; |
303 | } |
304 | |
305 | /* |
306 | * Note, the orc_unwind and orc_unwind_ip tables were already |
307 | * sorted at build time via the 'sorttable' tool. |
308 | * It's ready for binary search straight away, no need to sort it. |
309 | */ |
310 | |
311 | /* Initialize the fast lookup table: */ |
312 | lookup_num_blocks = orc_lookup_end - orc_lookup; |
313 | for (i = 0; i < lookup_num_blocks-1; i++) { |
314 | orc = __orc_find(ip_table: __start_orc_unwind_ip, u_table: __start_orc_unwind, |
315 | num_entries, |
316 | LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i)); |
317 | if (!orc) { |
318 | orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n" ); |
319 | return; |
320 | } |
321 | |
322 | orc_lookup[i] = orc - __start_orc_unwind; |
323 | } |
324 | |
325 | /* Initialize the ending block: */ |
326 | orc = __orc_find(ip_table: __start_orc_unwind_ip, u_table: __start_orc_unwind, num_entries, |
327 | LOOKUP_STOP_IP); |
328 | if (!orc) { |
329 | orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n" ); |
330 | return; |
331 | } |
332 | orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind; |
333 | |
334 | orc_init = true; |
335 | } |
336 | |
337 | unsigned long unwind_get_return_address(struct unwind_state *state) |
338 | { |
339 | if (unwind_done(state)) |
340 | return 0; |
341 | |
342 | return __kernel_text_address(addr: state->ip) ? state->ip : 0; |
343 | } |
344 | EXPORT_SYMBOL_GPL(unwind_get_return_address); |
345 | |
346 | unsigned long *unwind_get_return_address_ptr(struct unwind_state *state) |
347 | { |
348 | if (unwind_done(state)) |
349 | return NULL; |
350 | |
351 | if (state->regs) |
352 | return &state->regs->ip; |
353 | |
354 | if (state->sp) |
355 | return (unsigned long *)state->sp - 1; |
356 | |
357 | return NULL; |
358 | } |
359 | |
360 | static bool stack_access_ok(struct unwind_state *state, unsigned long _addr, |
361 | size_t len) |
362 | { |
363 | struct stack_info *info = &state->stack_info; |
364 | void *addr = (void *)_addr; |
365 | |
366 | if (on_stack(info, addr, len)) |
367 | return true; |
368 | |
369 | return !get_stack_info(stack: addr, task: state->task, info, visit_mask: &state->stack_mask) && |
370 | on_stack(info, addr, len); |
371 | } |
372 | |
373 | static bool deref_stack_reg(struct unwind_state *state, unsigned long addr, |
374 | unsigned long *val) |
375 | { |
376 | if (!stack_access_ok(state, addr: addr, len: sizeof(long))) |
377 | return false; |
378 | |
379 | *val = READ_ONCE_NOCHECK(*(unsigned long *)addr); |
380 | return true; |
381 | } |
382 | |
383 | static bool deref_stack_regs(struct unwind_state *state, unsigned long addr, |
384 | unsigned long *ip, unsigned long *sp) |
385 | { |
386 | struct pt_regs *regs = (struct pt_regs *)addr; |
387 | |
388 | /* x86-32 support will be more complicated due to the ®s->sp hack */ |
389 | BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32)); |
390 | |
391 | if (!stack_access_ok(state, addr: addr, len: sizeof(struct pt_regs))) |
392 | return false; |
393 | |
394 | *ip = READ_ONCE_NOCHECK(regs->ip); |
395 | *sp = READ_ONCE_NOCHECK(regs->sp); |
396 | return true; |
397 | } |
398 | |
399 | static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr, |
400 | unsigned long *ip, unsigned long *sp) |
401 | { |
402 | struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET; |
403 | |
404 | if (!stack_access_ok(state, addr: addr, IRET_FRAME_SIZE)) |
405 | return false; |
406 | |
407 | *ip = READ_ONCE_NOCHECK(regs->ip); |
408 | *sp = READ_ONCE_NOCHECK(regs->sp); |
409 | return true; |
410 | } |
411 | |
412 | /* |
413 | * If state->regs is non-NULL, and points to a full pt_regs, just get the reg |
414 | * value from state->regs. |
415 | * |
416 | * Otherwise, if state->regs just points to IRET regs, and the previous frame |
417 | * had full regs, it's safe to get the value from the previous regs. This can |
418 | * happen when early/late IRQ entry code gets interrupted by an NMI. |
419 | */ |
420 | static bool get_reg(struct unwind_state *state, unsigned int reg_off, |
421 | unsigned long *val) |
422 | { |
423 | unsigned int reg = reg_off/8; |
424 | |
425 | if (!state->regs) |
426 | return false; |
427 | |
428 | if (state->full_regs) { |
429 | *val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]); |
430 | return true; |
431 | } |
432 | |
433 | if (state->prev_regs) { |
434 | *val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]); |
435 | return true; |
436 | } |
437 | |
438 | return false; |
439 | } |
440 | |
441 | bool unwind_next_frame(struct unwind_state *state) |
442 | { |
443 | unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp; |
444 | enum stack_type prev_type = state->stack_info.type; |
445 | struct orc_entry *orc; |
446 | bool indirect = false; |
447 | |
448 | if (unwind_done(state)) |
449 | return false; |
450 | |
451 | /* Don't let modules unload while we're reading their ORC data. */ |
452 | preempt_disable(); |
453 | |
454 | /* End-of-stack check for user tasks: */ |
455 | if (state->regs && user_mode(regs: state->regs)) |
456 | goto the_end; |
457 | |
458 | /* |
459 | * Find the orc_entry associated with the text address. |
460 | * |
461 | * For a call frame (as opposed to a signal frame), state->ip points to |
462 | * the instruction after the call. That instruction's stack layout |
463 | * could be different from the call instruction's layout, for example |
464 | * if the call was to a noreturn function. So get the ORC data for the |
465 | * call instruction itself. |
466 | */ |
467 | orc = orc_find(ip: state->signal ? state->ip : state->ip - 1); |
468 | if (!orc) { |
469 | /* |
470 | * As a fallback, try to assume this code uses a frame pointer. |
471 | * This is useful for generated code, like BPF, which ORC |
472 | * doesn't know about. This is just a guess, so the rest of |
473 | * the unwind is no longer considered reliable. |
474 | */ |
475 | orc = &orc_fp_entry; |
476 | state->error = true; |
477 | } |
478 | |
479 | /* End-of-stack check for kernel threads: */ |
480 | if (orc->sp_reg == ORC_REG_UNDEFINED) { |
481 | if (!orc->end) |
482 | goto err; |
483 | |
484 | goto the_end; |
485 | } |
486 | |
487 | state->signal = orc->signal; |
488 | |
489 | /* Find the previous frame's stack: */ |
490 | switch (orc->sp_reg) { |
491 | case ORC_REG_SP: |
492 | sp = state->sp + orc->sp_offset; |
493 | break; |
494 | |
495 | case ORC_REG_BP: |
496 | sp = state->bp + orc->sp_offset; |
497 | break; |
498 | |
499 | case ORC_REG_SP_INDIRECT: |
500 | sp = state->sp; |
501 | indirect = true; |
502 | break; |
503 | |
504 | case ORC_REG_BP_INDIRECT: |
505 | sp = state->bp + orc->sp_offset; |
506 | indirect = true; |
507 | break; |
508 | |
509 | case ORC_REG_R10: |
510 | if (!get_reg(state, offsetof(struct pt_regs, r10), val: &sp)) { |
511 | orc_warn_current("missing R10 value at %pB\n" , |
512 | (void *)state->ip); |
513 | goto err; |
514 | } |
515 | break; |
516 | |
517 | case ORC_REG_R13: |
518 | if (!get_reg(state, offsetof(struct pt_regs, r13), val: &sp)) { |
519 | orc_warn_current("missing R13 value at %pB\n" , |
520 | (void *)state->ip); |
521 | goto err; |
522 | } |
523 | break; |
524 | |
525 | case ORC_REG_DI: |
526 | if (!get_reg(state, offsetof(struct pt_regs, di), val: &sp)) { |
527 | orc_warn_current("missing RDI value at %pB\n" , |
528 | (void *)state->ip); |
529 | goto err; |
530 | } |
531 | break; |
532 | |
533 | case ORC_REG_DX: |
534 | if (!get_reg(state, offsetof(struct pt_regs, dx), val: &sp)) { |
535 | orc_warn_current("missing DX value at %pB\n" , |
536 | (void *)state->ip); |
537 | goto err; |
538 | } |
539 | break; |
540 | |
541 | default: |
542 | orc_warn("unknown SP base reg %d at %pB\n" , |
543 | orc->sp_reg, (void *)state->ip); |
544 | goto err; |
545 | } |
546 | |
547 | if (indirect) { |
548 | if (!deref_stack_reg(state, addr: sp, val: &sp)) |
549 | goto err; |
550 | |
551 | if (orc->sp_reg == ORC_REG_SP_INDIRECT) |
552 | sp += orc->sp_offset; |
553 | } |
554 | |
555 | /* Find IP, SP and possibly regs: */ |
556 | switch (orc->type) { |
557 | case UNWIND_HINT_TYPE_CALL: |
558 | ip_p = sp - sizeof(long); |
559 | |
560 | if (!deref_stack_reg(state, addr: ip_p, val: &state->ip)) |
561 | goto err; |
562 | |
563 | state->ip = unwind_recover_ret_addr(state, addr: state->ip, |
564 | addr_p: (unsigned long *)ip_p); |
565 | state->sp = sp; |
566 | state->regs = NULL; |
567 | state->prev_regs = NULL; |
568 | break; |
569 | |
570 | case UNWIND_HINT_TYPE_REGS: |
571 | if (!deref_stack_regs(state, addr: sp, ip: &state->ip, sp: &state->sp)) { |
572 | orc_warn_current("can't access registers at %pB\n" , |
573 | (void *)orig_ip); |
574 | goto err; |
575 | } |
576 | /* |
577 | * There is a small chance to interrupt at the entry of |
578 | * arch_rethook_trampoline() where the ORC info doesn't exist. |
579 | * That point is right after the RET to arch_rethook_trampoline() |
580 | * which was modified return address. |
581 | * At that point, the @addr_p of the unwind_recover_rethook() |
582 | * (this has to point the address of the stack entry storing |
583 | * the modified return address) must be "SP - (a stack entry)" |
584 | * because SP is incremented by the RET. |
585 | */ |
586 | state->ip = unwind_recover_rethook(state, addr: state->ip, |
587 | addr_p: (unsigned long *)(state->sp - sizeof(long))); |
588 | state->regs = (struct pt_regs *)sp; |
589 | state->prev_regs = NULL; |
590 | state->full_regs = true; |
591 | break; |
592 | |
593 | case UNWIND_HINT_TYPE_REGS_PARTIAL: |
594 | if (!deref_stack_iret_regs(state, addr: sp, ip: &state->ip, sp: &state->sp)) { |
595 | orc_warn_current("can't access iret registers at %pB\n" , |
596 | (void *)orig_ip); |
597 | goto err; |
598 | } |
599 | /* See UNWIND_HINT_TYPE_REGS case comment. */ |
600 | state->ip = unwind_recover_rethook(state, addr: state->ip, |
601 | addr_p: (unsigned long *)(state->sp - sizeof(long))); |
602 | |
603 | if (state->full_regs) |
604 | state->prev_regs = state->regs; |
605 | state->regs = (void *)sp - IRET_FRAME_OFFSET; |
606 | state->full_regs = false; |
607 | break; |
608 | |
609 | default: |
610 | orc_warn("unknown .orc_unwind entry type %d at %pB\n" , |
611 | orc->type, (void *)orig_ip); |
612 | goto err; |
613 | } |
614 | |
615 | /* Find BP: */ |
616 | switch (orc->bp_reg) { |
617 | case ORC_REG_UNDEFINED: |
618 | if (get_reg(state, offsetof(struct pt_regs, bp), val: &tmp)) |
619 | state->bp = tmp; |
620 | break; |
621 | |
622 | case ORC_REG_PREV_SP: |
623 | if (!deref_stack_reg(state, addr: sp + orc->bp_offset, val: &state->bp)) |
624 | goto err; |
625 | break; |
626 | |
627 | case ORC_REG_BP: |
628 | if (!deref_stack_reg(state, addr: state->bp + orc->bp_offset, val: &state->bp)) |
629 | goto err; |
630 | break; |
631 | |
632 | default: |
633 | orc_warn("unknown BP base reg %d for ip %pB\n" , |
634 | orc->bp_reg, (void *)orig_ip); |
635 | goto err; |
636 | } |
637 | |
638 | /* Prevent a recursive loop due to bad ORC data: */ |
639 | if (state->stack_info.type == prev_type && |
640 | on_stack(info: &state->stack_info, addr: (void *)state->sp, len: sizeof(long)) && |
641 | state->sp <= prev_sp) { |
642 | orc_warn_current("stack going in the wrong direction? at %pB\n" , |
643 | (void *)orig_ip); |
644 | goto err; |
645 | } |
646 | |
647 | preempt_enable(); |
648 | return true; |
649 | |
650 | err: |
651 | state->error = true; |
652 | |
653 | the_end: |
654 | preempt_enable(); |
655 | state->stack_info.type = STACK_TYPE_UNKNOWN; |
656 | return false; |
657 | } |
658 | EXPORT_SYMBOL_GPL(unwind_next_frame); |
659 | |
660 | void __unwind_start(struct unwind_state *state, struct task_struct *task, |
661 | struct pt_regs *regs, unsigned long *first_frame) |
662 | { |
663 | memset(s: state, c: 0, n: sizeof(*state)); |
664 | state->task = task; |
665 | |
666 | if (!orc_init) |
667 | goto err; |
668 | |
669 | /* |
670 | * Refuse to unwind the stack of a task while it's executing on another |
671 | * CPU. This check is racy, but that's ok: the unwinder has other |
672 | * checks to prevent it from going off the rails. |
673 | */ |
674 | if (task_on_another_cpu(task)) |
675 | goto err; |
676 | |
677 | if (regs) { |
678 | if (user_mode(regs)) |
679 | goto the_end; |
680 | |
681 | state->ip = regs->ip; |
682 | state->sp = regs->sp; |
683 | state->bp = regs->bp; |
684 | state->regs = regs; |
685 | state->full_regs = true; |
686 | state->signal = true; |
687 | |
688 | } else if (task == current) { |
689 | asm volatile("lea (%%rip), %0\n\t" |
690 | "mov %%rsp, %1\n\t" |
691 | "mov %%rbp, %2\n\t" |
692 | : "=r" (state->ip), "=r" (state->sp), |
693 | "=r" (state->bp)); |
694 | |
695 | } else { |
696 | struct inactive_task_frame *frame = (void *)task->thread.sp; |
697 | |
698 | state->sp = task->thread.sp + sizeof(*frame); |
699 | state->bp = READ_ONCE_NOCHECK(frame->bp); |
700 | state->ip = READ_ONCE_NOCHECK(frame->ret_addr); |
701 | state->signal = (void *)state->ip == ret_from_fork; |
702 | } |
703 | |
704 | if (get_stack_info(stack: (unsigned long *)state->sp, task: state->task, |
705 | info: &state->stack_info, visit_mask: &state->stack_mask)) { |
706 | /* |
707 | * We weren't on a valid stack. It's possible that |
708 | * we overflowed a valid stack into a guard page. |
709 | * See if the next page up is valid so that we can |
710 | * generate some kind of backtrace if this happens. |
711 | */ |
712 | void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp); |
713 | state->error = true; |
714 | if (get_stack_info(stack: next_page, task: state->task, info: &state->stack_info, |
715 | visit_mask: &state->stack_mask)) |
716 | return; |
717 | } |
718 | |
719 | /* |
720 | * The caller can provide the address of the first frame directly |
721 | * (first_frame) or indirectly (regs->sp) to indicate which stack frame |
722 | * to start unwinding at. Skip ahead until we reach it. |
723 | */ |
724 | |
725 | /* When starting from regs, skip the regs frame: */ |
726 | if (regs) { |
727 | unwind_next_frame(state); |
728 | return; |
729 | } |
730 | |
731 | /* Otherwise, skip ahead to the user-specified starting frame: */ |
732 | while (!unwind_done(state) && |
733 | (!on_stack(info: &state->stack_info, addr: first_frame, len: sizeof(long)) || |
734 | state->sp <= (unsigned long)first_frame)) |
735 | unwind_next_frame(state); |
736 | |
737 | return; |
738 | |
739 | err: |
740 | state->error = true; |
741 | the_end: |
742 | state->stack_info.type = STACK_TYPE_UNKNOWN; |
743 | } |
744 | EXPORT_SYMBOL_GPL(__unwind_start); |
745 | |