1 | //===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | /// |
9 | /// \file |
10 | /// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and |
11 | /// FreeBSD-specific code. |
12 | /// |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #include "sanitizer_common/sanitizer_platform.h" |
16 | #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD |
17 | |
18 | # include <dlfcn.h> |
19 | # include <elf.h> |
20 | # include <errno.h> |
21 | # include <link.h> |
22 | # include <pthread.h> |
23 | # include <signal.h> |
24 | # include <stdio.h> |
25 | # include <stdlib.h> |
26 | # include <sys/prctl.h> |
27 | # include <sys/resource.h> |
28 | # include <sys/time.h> |
29 | # include <unistd.h> |
30 | # include <unwind.h> |
31 | |
32 | # include "hwasan.h" |
33 | # include "hwasan_dynamic_shadow.h" |
34 | # include "hwasan_interface_internal.h" |
35 | # include "hwasan_mapping.h" |
36 | # include "hwasan_report.h" |
37 | # include "hwasan_thread.h" |
38 | # include "hwasan_thread_list.h" |
39 | # include "sanitizer_common/sanitizer_common.h" |
40 | # include "sanitizer_common/sanitizer_procmaps.h" |
41 | # include "sanitizer_common/sanitizer_stackdepot.h" |
42 | |
43 | // Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID. |
44 | // |
45 | // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF |
46 | // Not currently tested. |
47 | // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON |
48 | // Integration tests downstream exist. |
49 | // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF |
50 | // Tested with check-hwasan on x86_64-linux. |
51 | // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON |
52 | // Tested with check-hwasan on aarch64-linux-android. |
53 | # if !SANITIZER_ANDROID |
54 | SANITIZER_INTERFACE_ATTRIBUTE |
55 | THREADLOCAL uptr __hwasan_tls; |
56 | # endif |
57 | |
58 | namespace __hwasan { |
59 | |
60 | // With the zero shadow base we can not actually map pages starting from 0. |
61 | // This constant is somewhat arbitrary. |
62 | constexpr uptr kZeroBaseShadowStart = 0; |
63 | constexpr uptr kZeroBaseMaxShadowStart = 1 << 18; |
64 | |
65 | static void ProtectGap(uptr addr, uptr size) { |
66 | __sanitizer::ProtectGap(addr, size, zero_base_shadow_start: kZeroBaseShadowStart, |
67 | zero_base_max_shadow_start: kZeroBaseMaxShadowStart); |
68 | } |
69 | |
70 | uptr kLowMemStart; |
71 | uptr kLowMemEnd; |
72 | uptr kHighMemStart; |
73 | uptr kHighMemEnd; |
74 | |
75 | static void PrintRange(uptr start, uptr end, const char *name) { |
76 | Printf(format: "|| [%p, %p] || %.*s ||\n" , (void *)start, (void *)end, 10, name); |
77 | } |
78 | |
79 | static void PrintAddressSpaceLayout() { |
80 | PrintRange(start: kHighMemStart, end: kHighMemEnd, name: "HighMem" ); |
81 | if (kHighShadowEnd + 1 < kHighMemStart) |
82 | PrintRange(start: kHighShadowEnd + 1, end: kHighMemStart - 1, name: "ShadowGap" ); |
83 | else |
84 | CHECK_EQ(kHighShadowEnd + 1, kHighMemStart); |
85 | PrintRange(start: kHighShadowStart, end: kHighShadowEnd, name: "HighShadow" ); |
86 | if (kLowShadowEnd + 1 < kHighShadowStart) |
87 | PrintRange(start: kLowShadowEnd + 1, end: kHighShadowStart - 1, name: "ShadowGap" ); |
88 | else |
89 | CHECK_EQ(kLowMemEnd + 1, kHighShadowStart); |
90 | PrintRange(start: kLowShadowStart, end: kLowShadowEnd, name: "LowShadow" ); |
91 | if (kLowMemEnd + 1 < kLowShadowStart) |
92 | PrintRange(start: kLowMemEnd + 1, end: kLowShadowStart - 1, name: "ShadowGap" ); |
93 | else |
94 | CHECK_EQ(kLowMemEnd + 1, kLowShadowStart); |
95 | PrintRange(start: kLowMemStart, end: kLowMemEnd, name: "LowMem" ); |
96 | CHECK_EQ(0, kLowMemStart); |
97 | } |
98 | |
99 | static uptr GetHighMemEnd() { |
100 | // HighMem covers the upper part of the address space. |
101 | uptr max_address = GetMaxUserVirtualAddress(); |
102 | // Adjust max address to make sure that kHighMemEnd and kHighMemStart are |
103 | // properly aligned: |
104 | max_address |= (GetMmapGranularity() << kShadowScale) - 1; |
105 | return max_address; |
106 | } |
107 | |
108 | static void InitializeShadowBaseAddress(uptr shadow_size_bytes) { |
109 | if (flags()->fixed_shadow_base != (uptr)-1) { |
110 | __hwasan_shadow_memory_dynamic_address = flags()->fixed_shadow_base; |
111 | } else { |
112 | __hwasan_shadow_memory_dynamic_address = |
113 | FindDynamicShadowStart(shadow_size_bytes); |
114 | } |
115 | } |
116 | |
117 | static void MaybeDieIfNoTaggingAbi(const char *message) { |
118 | if (!flags()->fail_without_syscall_abi) |
119 | return; |
120 | Printf(format: "FATAL: %s\n" , message); |
121 | Die(); |
122 | } |
123 | |
124 | # define PR_SET_TAGGED_ADDR_CTRL 55 |
125 | # define PR_GET_TAGGED_ADDR_CTRL 56 |
126 | # define PR_TAGGED_ADDR_ENABLE (1UL << 0) |
127 | # define ARCH_GET_UNTAG_MASK 0x4001 |
128 | # define ARCH_ENABLE_TAGGED_ADDR 0x4002 |
129 | # define ARCH_GET_MAX_TAG_BITS 0x4003 |
130 | |
131 | static bool CanUseTaggingAbi() { |
132 | # if defined(__x86_64__) |
133 | unsigned long num_bits = 0; |
134 | // Check for x86 LAM support. This API is based on a currently unsubmitted |
135 | // patch to the Linux kernel (as of August 2022) and is thus subject to |
136 | // change. The patch is here: |
137 | // https://lore.kernel.org/all/20220815041803.17954-1-kirill.shutemov@linux.intel.com/ |
138 | // |
139 | // arch_prctl(ARCH_GET_MAX_TAG_BITS, &bits) returns the maximum number of tag |
140 | // bits the user can request, or zero if LAM is not supported by the hardware. |
141 | if (internal_iserror(retval: internal_arch_prctl(ARCH_GET_MAX_TAG_BITS, |
142 | arg2: reinterpret_cast<uptr>(&num_bits)))) |
143 | return false; |
144 | // The platform must provide enough bits for HWASan tags. |
145 | if (num_bits < kTagBits) |
146 | return false; |
147 | return true; |
148 | # else |
149 | // Check for ARM TBI support. |
150 | return !internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)); |
151 | # endif // __x86_64__ |
152 | } |
153 | |
154 | static bool EnableTaggingAbi() { |
155 | # if defined(__x86_64__) |
156 | // Enable x86 LAM tagging for the process. |
157 | // |
158 | // arch_prctl(ARCH_ENABLE_TAGGED_ADDR, bits) enables tagging if the number of |
159 | // tag bits requested by the user does not exceed that provided by the system. |
160 | // arch_prctl(ARCH_GET_UNTAG_MASK, &mask) returns the mask of significant |
161 | // address bits. It is ~0ULL if either LAM is disabled for the process or LAM |
162 | // is not supported by the hardware. |
163 | if (internal_iserror(retval: internal_arch_prctl(ARCH_ENABLE_TAGGED_ADDR, arg2: kTagBits))) |
164 | return false; |
165 | unsigned long mask = 0; |
166 | // Make sure the tag bits are where we expect them to be. |
167 | if (internal_iserror(retval: internal_arch_prctl(ARCH_GET_UNTAG_MASK, |
168 | arg2: reinterpret_cast<uptr>(&mask)))) |
169 | return false; |
170 | // @mask has ones for non-tag bits, whereas @kAddressTagMask has ones for tag |
171 | // bits. Therefore these masks must not overlap. |
172 | if (mask & kAddressTagMask) |
173 | return false; |
174 | return true; |
175 | # else |
176 | // Enable ARM TBI tagging for the process. If for some reason tagging is not |
177 | // supported, prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE) returns |
178 | // -EINVAL. |
179 | if (internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL, |
180 | PR_TAGGED_ADDR_ENABLE, 0, 0, 0))) |
181 | return false; |
182 | // Ensure that TBI is enabled. |
183 | if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) != |
184 | PR_TAGGED_ADDR_ENABLE) |
185 | return false; |
186 | return true; |
187 | # endif // __x86_64__ |
188 | } |
189 | |
190 | void InitializeOsSupport() { |
191 | // Check we're running on a kernel that can use the tagged address ABI. |
192 | bool has_abi = CanUseTaggingAbi(); |
193 | |
194 | if (!has_abi) { |
195 | # if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE) |
196 | // Some older Android kernels have the tagged pointer ABI on |
197 | // unconditionally, and hence don't have the tagged-addr prctl while still |
198 | // allow the ABI. |
199 | // If targeting Android and the prctl is not around we assume this is the |
200 | // case. |
201 | return; |
202 | # else |
203 | MaybeDieIfNoTaggingAbi( |
204 | message: "HWAddressSanitizer requires a kernel with tagged address ABI." ); |
205 | # endif |
206 | } |
207 | |
208 | if (EnableTaggingAbi()) |
209 | return; |
210 | |
211 | # if SANITIZER_ANDROID |
212 | MaybeDieIfNoTaggingAbi( |
213 | "HWAddressSanitizer failed to enable tagged address syscall ABI.\n" |
214 | "Check the `sysctl abi.tagged_addr_disabled` configuration." ); |
215 | # else |
216 | MaybeDieIfNoTaggingAbi( |
217 | message: "HWAddressSanitizer failed to enable tagged address syscall ABI.\n" ); |
218 | # endif |
219 | } |
220 | |
221 | bool InitShadow() { |
222 | // Define the entire memory range. |
223 | kHighMemEnd = GetHighMemEnd(); |
224 | |
225 | // Determine shadow memory base offset. |
226 | InitializeShadowBaseAddress(shadow_size_bytes: MemToShadowSize(size: kHighMemEnd)); |
227 | |
228 | // Place the low memory first. |
229 | kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1; |
230 | kLowMemStart = 0; |
231 | |
232 | // Define the low shadow based on the already placed low memory. |
233 | kLowShadowEnd = MemToShadow(untagged_addr: kLowMemEnd); |
234 | kLowShadowStart = __hwasan_shadow_memory_dynamic_address; |
235 | |
236 | // High shadow takes whatever memory is left up there (making sure it is not |
237 | // interfering with low memory in the fixed case). |
238 | kHighShadowEnd = MemToShadow(untagged_addr: kHighMemEnd); |
239 | kHighShadowStart = Max(a: kLowMemEnd, b: MemToShadow(untagged_addr: kHighShadowEnd)) + 1; |
240 | |
241 | // High memory starts where allocated shadow allows. |
242 | kHighMemStart = ShadowToMem(shadow_addr: kHighShadowStart); |
243 | |
244 | // Check the sanity of the defined memory ranges (there might be gaps). |
245 | CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0); |
246 | CHECK_GT(kHighMemStart, kHighShadowEnd); |
247 | CHECK_GT(kHighShadowEnd, kHighShadowStart); |
248 | CHECK_GT(kHighShadowStart, kLowMemEnd); |
249 | CHECK_GT(kLowMemEnd, kLowMemStart); |
250 | CHECK_GT(kLowShadowEnd, kLowShadowStart); |
251 | CHECK_GT(kLowShadowStart, kLowMemEnd); |
252 | |
253 | if (Verbosity()) |
254 | PrintAddressSpaceLayout(); |
255 | |
256 | // Reserve shadow memory. |
257 | ReserveShadowMemoryRange(beg: kLowShadowStart, end: kLowShadowEnd, name: "low shadow" ); |
258 | ReserveShadowMemoryRange(beg: kHighShadowStart, end: kHighShadowEnd, name: "high shadow" ); |
259 | |
260 | // Protect all the gaps. |
261 | ProtectGap(addr: 0, size: Min(a: kLowMemStart, b: kLowShadowStart)); |
262 | if (kLowMemEnd + 1 < kLowShadowStart) |
263 | ProtectGap(addr: kLowMemEnd + 1, size: kLowShadowStart - kLowMemEnd - 1); |
264 | if (kLowShadowEnd + 1 < kHighShadowStart) |
265 | ProtectGap(addr: kLowShadowEnd + 1, size: kHighShadowStart - kLowShadowEnd - 1); |
266 | if (kHighShadowEnd + 1 < kHighMemStart) |
267 | ProtectGap(addr: kHighShadowEnd + 1, size: kHighMemStart - kHighShadowEnd - 1); |
268 | |
269 | return true; |
270 | } |
271 | |
272 | void InitThreads() { |
273 | CHECK(__hwasan_shadow_memory_dynamic_address); |
274 | uptr guard_page_size = GetMmapGranularity(); |
275 | uptr thread_space_start = |
276 | __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment); |
277 | uptr thread_space_end = |
278 | __hwasan_shadow_memory_dynamic_address - guard_page_size; |
279 | ReserveShadowMemoryRange(beg: thread_space_start, end: thread_space_end - 1, |
280 | name: "hwasan threads" , /*madvise_shadow*/ false); |
281 | ProtectGap(addr: thread_space_end, |
282 | size: __hwasan_shadow_memory_dynamic_address - thread_space_end); |
283 | InitThreadList(storage: thread_space_start, size: thread_space_end - thread_space_start); |
284 | hwasanThreadList().CreateCurrentThread(); |
285 | } |
286 | |
287 | bool MemIsApp(uptr p) { |
288 | // Memory outside the alias range has non-zero tags. |
289 | # if !defined(HWASAN_ALIASING_MODE) |
290 | CHECK_EQ(GetTagFromPointer(p), 0); |
291 | # endif |
292 | |
293 | return (p >= kHighMemStart && p <= kHighMemEnd) || |
294 | (p >= kLowMemStart && p <= kLowMemEnd); |
295 | } |
296 | |
297 | void InstallAtExitHandler() { atexit(func: HwasanAtExit); } |
298 | |
299 | // ---------------------- TSD ---------------- {{{1 |
300 | |
301 | # if HWASAN_WITH_INTERCEPTORS |
302 | static pthread_key_t tsd_key; |
303 | static bool tsd_key_inited = false; |
304 | |
305 | void HwasanTSDThreadInit() { |
306 | if (tsd_key_inited) |
307 | CHECK_EQ(0, pthread_setspecific(tsd_key, |
308 | (void *)GetPthreadDestructorIterations())); |
309 | } |
310 | |
311 | void HwasanTSDDtor(void *tsd) { |
312 | uptr iterations = (uptr)tsd; |
313 | if (iterations > 1) { |
314 | CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1))); |
315 | return; |
316 | } |
317 | __hwasan_thread_exit(); |
318 | } |
319 | |
320 | void HwasanTSDInit() { |
321 | CHECK(!tsd_key_inited); |
322 | tsd_key_inited = true; |
323 | CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor)); |
324 | } |
325 | # else |
326 | void HwasanTSDInit() {} |
327 | void HwasanTSDThreadInit() {} |
328 | # endif |
329 | |
330 | # if SANITIZER_ANDROID |
331 | uptr *GetCurrentThreadLongPtr() { return (uptr *)get_android_tls_ptr(); } |
332 | # else |
333 | uptr *GetCurrentThreadLongPtr() { return &__hwasan_tls; } |
334 | # endif |
335 | |
336 | # if SANITIZER_ANDROID |
337 | void AndroidTestTlsSlot() { |
338 | uptr kMagicValue = 0x010203040A0B0C0D; |
339 | uptr *tls_ptr = GetCurrentThreadLongPtr(); |
340 | uptr old_value = *tls_ptr; |
341 | *tls_ptr = kMagicValue; |
342 | dlerror(); |
343 | if (*(uptr *)get_android_tls_ptr() != kMagicValue) { |
344 | Printf( |
345 | "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used " |
346 | "for dlerror().\n" ); |
347 | Die(); |
348 | } |
349 | *tls_ptr = old_value; |
350 | } |
351 | # else |
352 | void AndroidTestTlsSlot() {} |
353 | # endif |
354 | |
355 | static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) { |
356 | // Access type is passed in a platform dependent way (see below) and encoded |
357 | // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is |
358 | // recoverable. Valid values of Y are 0 to 4, which are interpreted as |
359 | // log2(access_size), and 0xF, which means that access size is passed via |
360 | // platform dependent register (see below). |
361 | # if defined(__aarch64__) |
362 | // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF, |
363 | // access size is stored in X1 register. Access address is always in X0 |
364 | // register. |
365 | uptr pc = (uptr)info->si_addr; |
366 | const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff; |
367 | if ((code & 0xff00) != 0x900) |
368 | return AccessInfo{}; // Not ours. |
369 | |
370 | const bool is_store = code & 0x10; |
371 | const bool recover = code & 0x20; |
372 | const uptr addr = uc->uc_mcontext.regs[0]; |
373 | const unsigned size_log = code & 0xf; |
374 | if (size_log > 4 && size_log != 0xf) |
375 | return AccessInfo{}; // Not ours. |
376 | const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log; |
377 | |
378 | # elif defined(__x86_64__) |
379 | // Access type is encoded in the instruction following INT3 as |
380 | // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in |
381 | // RSI register. Access address is always in RDI register. |
382 | uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP]; |
383 | uint8_t *nop = (uint8_t *)pc; |
384 | if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 || |
385 | *(nop + 3) < 0x40) |
386 | return AccessInfo{}; // Not ours. |
387 | const unsigned code = *(nop + 3); |
388 | |
389 | const bool is_store = code & 0x10; |
390 | const bool recover = code & 0x20; |
391 | const uptr addr = uc->uc_mcontext.gregs[REG_RDI]; |
392 | const unsigned size_log = code & 0xf; |
393 | if (size_log > 4 && size_log != 0xf) |
394 | return AccessInfo{}; // Not ours. |
395 | const uptr size = |
396 | size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log; |
397 | |
398 | # elif SANITIZER_RISCV64 |
399 | // Access type is encoded in the instruction following EBREAK as |
400 | // ADDI x0, x0, [0x40 + 0xXY]. For Y == 0xF, access size is stored in |
401 | // X11 register. Access address is always in X10 register. |
402 | uptr pc = (uptr)uc->uc_mcontext.__gregs[REG_PC]; |
403 | uint8_t byte1 = *((u8 *)(pc + 0)); |
404 | uint8_t byte2 = *((u8 *)(pc + 1)); |
405 | uint8_t byte3 = *((u8 *)(pc + 2)); |
406 | uint8_t byte4 = *((u8 *)(pc + 3)); |
407 | uint32_t ebreak = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24)); |
408 | bool isFaultShort = false; |
409 | bool isEbreak = (ebreak == 0x100073); |
410 | bool isShortEbreak = false; |
411 | # if defined(__riscv_compressed) |
412 | isFaultShort = ((ebreak & 0x3) != 0x3); |
413 | isShortEbreak = ((ebreak & 0xffff) == 0x9002); |
414 | # endif |
415 | // faulted insn is not ebreak, not our case |
416 | if (!(isEbreak || isShortEbreak)) |
417 | return AccessInfo{}; |
418 | // advance pc to point after ebreak and reconstruct addi instruction |
419 | pc += isFaultShort ? 2 : 4; |
420 | byte1 = *((u8 *)(pc + 0)); |
421 | byte2 = *((u8 *)(pc + 1)); |
422 | byte3 = *((u8 *)(pc + 2)); |
423 | byte4 = *((u8 *)(pc + 3)); |
424 | // reconstruct instruction |
425 | uint32_t instr = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24)); |
426 | // check if this is really 32 bit instruction |
427 | // code is encoded in top 12 bits, since instruction is supposed to be with |
428 | // imm |
429 | const unsigned code = (instr >> 20) & 0xffff; |
430 | const uptr addr = uc->uc_mcontext.__gregs[10]; |
431 | const bool is_store = code & 0x10; |
432 | const bool recover = code & 0x20; |
433 | const unsigned size_log = code & 0xf; |
434 | if (size_log > 4 && size_log != 0xf) |
435 | return AccessInfo{}; // Not our case |
436 | const uptr size = |
437 | size_log == 0xf ? uc->uc_mcontext.__gregs[11] : 1U << size_log; |
438 | |
439 | # else |
440 | # error Unsupported architecture |
441 | # endif |
442 | |
443 | return AccessInfo{.addr: addr, .size: size, .is_store: is_store, .is_load: !is_store, .recover: recover}; |
444 | } |
445 | |
446 | static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) { |
447 | AccessInfo ai = GetAccessInfo(info, uc); |
448 | if (!ai.is_store && !ai.is_load) |
449 | return false; |
450 | |
451 | SignalContext sig{info, uc}; |
452 | HandleTagMismatch(ai, pc: StackTrace::GetNextInstructionPc(pc: sig.pc), frame: sig.bp, uc); |
453 | |
454 | # if defined(__aarch64__) |
455 | uc->uc_mcontext.pc += 4; |
456 | # elif defined(__x86_64__) |
457 | # elif SANITIZER_RISCV64 |
458 | // pc points to EBREAK which is 2 bytes long |
459 | uint8_t *exception_source = (uint8_t *)(uc->uc_mcontext.__gregs[REG_PC]); |
460 | uint8_t byte1 = (uint8_t)(*(exception_source + 0)); |
461 | uint8_t byte2 = (uint8_t)(*(exception_source + 1)); |
462 | uint8_t byte3 = (uint8_t)(*(exception_source + 2)); |
463 | uint8_t byte4 = (uint8_t)(*(exception_source + 3)); |
464 | uint32_t faulted = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24)); |
465 | bool isFaultShort = false; |
466 | # if defined(__riscv_compressed) |
467 | isFaultShort = ((faulted & 0x3) != 0x3); |
468 | # endif |
469 | uc->uc_mcontext.__gregs[REG_PC] += isFaultShort ? 2 : 4; |
470 | # else |
471 | # error Unsupported architecture |
472 | # endif |
473 | return true; |
474 | } |
475 | |
476 | static void OnStackUnwind(const SignalContext &sig, const void *, |
477 | BufferedStackTrace *stack) { |
478 | stack->Unwind(pc: StackTrace::GetNextInstructionPc(pc: sig.pc), bp: sig.bp, context: sig.context, |
479 | request_fast: common_flags()->fast_unwind_on_fatal); |
480 | } |
481 | |
482 | void HwasanOnDeadlySignal(int signo, void *info, void *context) { |
483 | // Probably a tag mismatch. |
484 | if (signo == SIGTRAP) |
485 | if (HwasanOnSIGTRAP(signo, info: (siginfo_t *)info, uc: (ucontext_t *)context)) |
486 | return; |
487 | |
488 | HandleDeadlySignal(siginfo: info, context, tid: GetTid(), unwind: &OnStackUnwind, unwind_context: nullptr); |
489 | } |
490 | |
491 | void Thread::InitStackAndTls(const InitState *) { |
492 | uptr tls_size; |
493 | uptr stack_size; |
494 | GetThreadStackAndTls(main: IsMainThread(), stk_addr: &stack_bottom_, stk_size: &stack_size, tls_addr: &tls_begin_, |
495 | tls_size: &tls_size); |
496 | stack_top_ = stack_bottom_ + stack_size; |
497 | tls_end_ = tls_begin_ + tls_size; |
498 | } |
499 | |
500 | uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) { |
501 | CHECK(IsAligned(p, kShadowAlignment)); |
502 | CHECK(IsAligned(size, kShadowAlignment)); |
503 | uptr shadow_start = MemToShadow(untagged_addr: p); |
504 | uptr shadow_size = MemToShadowSize(size); |
505 | |
506 | uptr page_size = GetPageSizeCached(); |
507 | uptr page_start = RoundUpTo(size: shadow_start, boundary: page_size); |
508 | uptr page_end = RoundDownTo(x: shadow_start + shadow_size, boundary: page_size); |
509 | uptr threshold = common_flags()->clear_shadow_mmap_threshold; |
510 | if (SANITIZER_LINUX && |
511 | UNLIKELY(page_end >= page_start + threshold && tag == 0)) { |
512 | internal_memset(s: (void *)shadow_start, c: tag, n: page_start - shadow_start); |
513 | internal_memset(s: (void *)page_end, c: tag, |
514 | n: shadow_start + shadow_size - page_end); |
515 | // For an anonymous private mapping MADV_DONTNEED will return a zero page on |
516 | // Linux. |
517 | ReleaseMemoryPagesToOSAndZeroFill(beg: page_start, end: page_end); |
518 | } else { |
519 | internal_memset(s: (void *)shadow_start, c: tag, n: shadow_size); |
520 | } |
521 | return AddTagToPointer(p, tag); |
522 | } |
523 | |
524 | static void BeforeFork() { |
525 | if (CAN_SANITIZE_LEAKS) { |
526 | __lsan::LockGlobal(); |
527 | } |
528 | // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and lock the |
529 | // stuff we need. |
530 | __lsan::LockThreads(); |
531 | __lsan::LockAllocator(); |
532 | StackDepotLockBeforeFork(); |
533 | } |
534 | |
535 | static void AfterFork(bool fork_child) { |
536 | StackDepotUnlockAfterFork(fork_child); |
537 | // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and unlock |
538 | // the stuff we need. |
539 | __lsan::UnlockAllocator(); |
540 | __lsan::UnlockThreads(); |
541 | if (CAN_SANITIZE_LEAKS) { |
542 | __lsan::UnlockGlobal(); |
543 | } |
544 | } |
545 | |
546 | void HwasanInstallAtForkHandler() { |
547 | pthread_atfork( |
548 | prepare: &BeforeFork, parent: []() { AfterFork(/* fork_child= */ false); }, |
549 | child: []() { AfterFork(/* fork_child= */ true); }); |
550 | } |
551 | |
552 | void InstallAtExitCheckLeaks() { |
553 | if (CAN_SANITIZE_LEAKS) { |
554 | if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) { |
555 | if (flags()->halt_on_error) |
556 | Atexit(function: __lsan::DoLeakCheck); |
557 | else |
558 | Atexit(function: __lsan::DoRecoverableLeakCheckVoid); |
559 | } |
560 | } |
561 | } |
562 | |
563 | } // namespace __hwasan |
564 | |
565 | using namespace __hwasan; |
566 | |
567 | extern "C" void __hwasan_thread_enter() { |
568 | hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited(); |
569 | } |
570 | |
571 | extern "C" void __hwasan_thread_exit() { |
572 | Thread *t = GetCurrentThread(); |
573 | // Make sure that signal handler can not see a stale current thread pointer. |
574 | atomic_signal_fence(mo: memory_order_seq_cst); |
575 | if (t) { |
576 | // Block async signals on the thread as the handler can be instrumented. |
577 | // After this point instrumented code can't access essential data from TLS |
578 | // and will crash. |
579 | // Bionic already calls __hwasan_thread_exit with blocked signals. |
580 | if (SANITIZER_GLIBC) |
581 | BlockSignals(); |
582 | hwasanThreadList().ReleaseThread(t); |
583 | } |
584 | } |
585 | |
586 | #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD |
587 | |