1//===-- sanitizer_stacktrace.cpp ------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is shared between AddressSanitizer and ThreadSanitizer
10// run-time libraries.
11//===----------------------------------------------------------------------===//
12
13#include "sanitizer_stacktrace.h"
14
15#include "sanitizer_common.h"
16#include "sanitizer_flags.h"
17#include "sanitizer_platform.h"
18#include "sanitizer_ptrauth.h"
19
20namespace __sanitizer {
21
22uptr StackTrace::GetNextInstructionPc(uptr pc) {
23#if defined(__aarch64__)
24 return STRIP_PAC_PC((void *)pc) + 4;
25#elif defined(__sparc__) || defined(__mips__)
26 return pc + 8;
27#elif SANITIZER_RISCV64
28 // Current check order is 4 -> 2 -> 6 -> 8
29 u8 InsnByte = *(u8 *)(pc);
30 if (((InsnByte & 0x3) == 0x3) && ((InsnByte & 0x1c) != 0x1c)) {
31 // xxxxxxxxxxxbbb11 | 32 bit | bbb != 111
32 return pc + 4;
33 }
34 if ((InsnByte & 0x3) != 0x3) {
35 // xxxxxxxxxxxxxxaa | 16 bit | aa != 11
36 return pc + 2;
37 }
38 // RISC-V encoding allows instructions to be up to 8 bytes long
39 if ((InsnByte & 0x3f) == 0x1f) {
40 // xxxxxxxxxx011111 | 48 bit |
41 return pc + 6;
42 }
43 if ((InsnByte & 0x7f) == 0x3f) {
44 // xxxxxxxxx0111111 | 64 bit |
45 return pc + 8;
46 }
47 // bail-out if could not figure out the instruction size
48 return 0;
49#elif SANITIZER_S390 || SANITIZER_I386 || SANITIZER_X32 || SANITIZER_X64
50 return pc + 1;
51#else
52 return pc + 4;
53#endif
54}
55
56uptr StackTrace::GetCurrentPc() {
57 return GET_CALLER_PC();
58}
59
60void BufferedStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
61 size = cnt + !!extra_top_pc;
62 CHECK_LE(size, kStackTraceMax);
63 internal_memcpy(dest: trace_buffer, src: pcs, n: cnt * sizeof(trace_buffer[0]));
64 if (extra_top_pc)
65 trace_buffer[cnt] = extra_top_pc;
66 top_frame_bp = 0;
67}
68
69// Sparc implementation is in its own file.
70#if !defined(__sparc__)
71
72// In GCC on ARM bp points to saved lr, not fp, so we should check the next
73// cell in stack to be a saved frame pointer. GetCanonicFrame returns the
74// pointer to saved frame pointer in any case.
75static inline uhwptr *GetCanonicFrame(uptr bp,
76 uptr stack_top,
77 uptr stack_bottom) {
78 CHECK_GT(stack_top, stack_bottom);
79#ifdef __arm__
80 if (!IsValidFrame(bp, stack_top, stack_bottom)) return 0;
81 uhwptr *bp_prev = (uhwptr *)bp;
82 if (IsValidFrame((uptr)bp_prev[0], stack_top, stack_bottom)) return bp_prev;
83 // The next frame pointer does not look right. This could be a GCC frame, step
84 // back by 1 word and try again.
85 if (IsValidFrame((uptr)bp_prev[-1], stack_top, stack_bottom))
86 return bp_prev - 1;
87 // Nope, this does not look right either. This means the frame after next does
88 // not have a valid frame pointer, but we can still extract the caller PC.
89 // Unfortunately, there is no way to decide between GCC and LLVM frame
90 // layouts. Assume LLVM.
91 return bp_prev;
92#else
93 return (uhwptr*)bp;
94#endif
95}
96
97void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
98 uptr stack_bottom, u32 max_depth) {
99 // TODO(yln): add arg sanity check for stack_top/stack_bottom
100 CHECK_GE(max_depth, 2);
101 const uptr kPageSize = GetPageSizeCached();
102 trace_buffer[0] = pc;
103 size = 1;
104 if (stack_top < 4096) return; // Sanity check for stack top.
105 uhwptr *frame = GetCanonicFrame(bp, stack_top, stack_bottom);
106 // Lowest possible address that makes sense as the next frame pointer.
107 // Goes up as we walk the stack.
108 uptr bottom = stack_bottom;
109 // Avoid infinite loop when frame == frame[0] by using frame > prev_frame.
110 while (IsValidFrame(frame: (uptr)frame, stack_top, stack_bottom: bottom) &&
111 IsAligned(a: (uptr)frame, alignment: sizeof(*frame)) &&
112 size < max_depth) {
113#ifdef __powerpc__
114 // PowerPC ABIs specify that the return address is saved at offset
115 // 16 of the *caller's* stack frame. Thus we must dereference the
116 // back chain to find the caller frame before extracting it.
117 uhwptr *caller_frame = (uhwptr*)frame[0];
118 if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
119 !IsAligned((uptr)caller_frame, sizeof(uhwptr)))
120 break;
121 uhwptr pc1 = caller_frame[2];
122#elif defined(__s390__)
123 uhwptr pc1 = frame[14];
124#elif defined(__loongarch__) || defined(__riscv)
125 // frame[-1] contains the return address
126 uhwptr pc1 = frame[-1];
127#else
128 uhwptr pc1 = STRIP_PAC_PC((void *)frame[1]);
129#endif
130 // Let's assume that any pointer in the 0th page (i.e. <0x1000 on i386 and
131 // x86_64) is invalid and stop unwinding here. If we're adding support for
132 // a platform where this isn't true, we need to reconsider this check.
133 if (pc1 < kPageSize)
134 break;
135 if (pc1 != pc) {
136 trace_buffer[size++] = (uptr) pc1;
137 }
138 bottom = (uptr)frame;
139#if defined(__loongarch__) || defined(__riscv)
140 // frame[-2] contain fp of the previous frame
141 uptr new_bp = (uptr)frame[-2];
142#else
143 uptr new_bp = (uptr)frame[0];
144#endif
145 frame = GetCanonicFrame(bp: new_bp, stack_top, stack_bottom: bottom);
146 }
147}
148
149#endif // !defined(__sparc__)
150
151void BufferedStackTrace::PopStackFrames(uptr count) {
152 CHECK_LT(count, size);
153 size -= count;
154 for (uptr i = 0; i < size; ++i) {
155 trace_buffer[i] = trace_buffer[i + count];
156 }
157}
158
159static uptr Distance(uptr a, uptr b) { return a < b ? b - a : a - b; }
160
161uptr BufferedStackTrace::LocatePcInTrace(uptr pc) {
162 uptr best = 0;
163 for (uptr i = 1; i < size; ++i) {
164 if (Distance(a: trace[i], b: pc) < Distance(a: trace[best], b: pc)) best = i;
165 }
166 return best;
167}
168
169} // namespace __sanitizer
170

source code of compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp