1 | //===-- hwasan_report.cpp -------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of HWAddressSanitizer. |
10 | // |
11 | // Error reporting. |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "hwasan_report.h" |
15 | |
16 | #include <dlfcn.h> |
17 | |
18 | #include "hwasan.h" |
19 | #include "hwasan_allocator.h" |
20 | #include "hwasan_globals.h" |
21 | #include "hwasan_mapping.h" |
22 | #include "hwasan_thread.h" |
23 | #include "hwasan_thread_list.h" |
24 | #include "sanitizer_common/sanitizer_allocator_internal.h" |
25 | #include "sanitizer_common/sanitizer_array_ref.h" |
26 | #include "sanitizer_common/sanitizer_common.h" |
27 | #include "sanitizer_common/sanitizer_flags.h" |
28 | #include "sanitizer_common/sanitizer_internal_defs.h" |
29 | #include "sanitizer_common/sanitizer_mutex.h" |
30 | #include "sanitizer_common/sanitizer_placement_new.h" |
31 | #include "sanitizer_common/sanitizer_report_decorator.h" |
32 | #include "sanitizer_common/sanitizer_stackdepot.h" |
33 | #include "sanitizer_common/sanitizer_stacktrace_printer.h" |
34 | #include "sanitizer_common/sanitizer_symbolizer.h" |
35 | |
36 | using namespace __sanitizer; |
37 | |
38 | namespace __hwasan { |
39 | |
40 | class ScopedReport { |
41 | public: |
42 | explicit ScopedReport(bool fatal) : fatal(fatal) { |
43 | Lock lock(&error_message_lock_); |
44 | error_message_ptr_ = &error_message_; |
45 | ++hwasan_report_count; |
46 | } |
47 | |
48 | ~ScopedReport() { |
49 | void (*report_cb)(const char *); |
50 | { |
51 | Lock lock(&error_message_lock_); |
52 | report_cb = error_report_callback_; |
53 | error_message_ptr_ = nullptr; |
54 | } |
55 | if (report_cb) |
56 | report_cb(error_message_.data()); |
57 | if (fatal) |
58 | SetAbortMessage(error_message_.data()); |
59 | if (common_flags()->print_module_map >= 2 || |
60 | (fatal && common_flags()->print_module_map)) |
61 | DumpProcessMap(); |
62 | if (fatal) |
63 | Die(); |
64 | } |
65 | |
66 | static void MaybeAppendToErrorMessage(const char *msg) { |
67 | Lock lock(&error_message_lock_); |
68 | if (!error_message_ptr_) |
69 | return; |
70 | error_message_ptr_->Append(str: msg); |
71 | } |
72 | |
73 | static void SetErrorReportCallback(void (*callback)(const char *)) { |
74 | Lock lock(&error_message_lock_); |
75 | error_report_callback_ = callback; |
76 | } |
77 | |
78 | private: |
79 | InternalScopedString error_message_; |
80 | bool fatal; |
81 | |
82 | static Mutex error_message_lock_; |
83 | static InternalScopedString *error_message_ptr_ |
84 | SANITIZER_GUARDED_BY(error_message_lock_); |
85 | static void (*error_report_callback_)(const char *); |
86 | }; |
87 | |
88 | Mutex ScopedReport::error_message_lock_; |
89 | InternalScopedString *ScopedReport::error_message_ptr_; |
90 | void (*ScopedReport::error_report_callback_)(const char *); |
91 | |
92 | // If there is an active ScopedReport, append to its error message. |
93 | void AppendToErrorMessageBuffer(const char *buffer) { |
94 | ScopedReport::MaybeAppendToErrorMessage(msg: buffer); |
95 | } |
96 | |
97 | static StackTrace GetStackTraceFromId(u32 id) { |
98 | CHECK(id); |
99 | StackTrace res = StackDepotGet(id); |
100 | CHECK(res.trace); |
101 | return res; |
102 | } |
103 | |
104 | static void MaybePrintAndroidHelpUrl() { |
105 | #if SANITIZER_ANDROID |
106 | Printf( |
107 | "Learn more about HWASan reports: " |
108 | "https://source.android.com/docs/security/test/memory-safety/" |
109 | "hwasan-reports\n" ); |
110 | #endif |
111 | } |
112 | |
113 | namespace { |
114 | // A RAII object that holds a copy of the current thread stack ring buffer. |
115 | // The actual stack buffer may change while we are iterating over it (for |
116 | // example, Printf may call syslog() which can itself be built with hwasan). |
117 | class SavedStackAllocations { |
118 | public: |
119 | SavedStackAllocations() = default; |
120 | |
121 | explicit SavedStackAllocations(Thread *t) { CopyFrom(t); } |
122 | |
123 | void CopyFrom(Thread *t) { |
124 | StackAllocationsRingBuffer *rb = t->stack_allocations(); |
125 | uptr size = rb->size() * sizeof(uptr); |
126 | void *storage = |
127 | MmapAlignedOrDieOnFatalError(size, alignment: size * 2, mem_type: "saved stack allocations" ); |
128 | new (&rb_) StackAllocationsRingBuffer(*rb, storage); |
129 | thread_id_ = t->unique_id(); |
130 | } |
131 | |
132 | ~SavedStackAllocations() { |
133 | if (rb_) { |
134 | StackAllocationsRingBuffer *rb = get(); |
135 | UnmapOrDie(addr: rb->StartOfStorage(), size: rb->size() * sizeof(uptr)); |
136 | } |
137 | } |
138 | |
139 | const StackAllocationsRingBuffer *get() const { |
140 | return (const StackAllocationsRingBuffer *)&rb_; |
141 | } |
142 | |
143 | StackAllocationsRingBuffer *get() { |
144 | return (StackAllocationsRingBuffer *)&rb_; |
145 | } |
146 | |
147 | u32 thread_id() const { return thread_id_; } |
148 | |
149 | private: |
150 | uptr rb_ = 0; |
151 | u32 thread_id_; |
152 | }; |
153 | |
154 | class Decorator: public __sanitizer::SanitizerCommonDecorator { |
155 | public: |
156 | Decorator() : SanitizerCommonDecorator() { } |
157 | const char *Access() { return Blue(); } |
158 | const char *Allocation() const { return Magenta(); } |
159 | const char *Origin() const { return Magenta(); } |
160 | const char *Name() const { return Green(); } |
161 | const char *Location() { return Green(); } |
162 | const char *Thread() { return Green(); } |
163 | }; |
164 | } // namespace |
165 | |
166 | static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr, |
167 | HeapAllocationRecord *har, uptr *ring_index, |
168 | uptr *num_matching_addrs, |
169 | uptr *num_matching_addrs_4b) { |
170 | if (!rb) return false; |
171 | |
172 | *num_matching_addrs = 0; |
173 | *num_matching_addrs_4b = 0; |
174 | for (uptr i = 0, size = rb->size(); i < size; i++) { |
175 | auto h = (*rb)[i]; |
176 | if (h.tagged_addr <= tagged_addr && |
177 | h.tagged_addr + h.requested_size > tagged_addr) { |
178 | *har = h; |
179 | *ring_index = i; |
180 | return true; |
181 | } |
182 | |
183 | // Measure the number of heap ring buffer entries that would have matched |
184 | // if we had only one entry per address (e.g. if the ring buffer data was |
185 | // stored at the address itself). This will help us tune the allocator |
186 | // implementation for MTE. |
187 | if (UntagAddr(tagged_addr: h.tagged_addr) <= UntagAddr(tagged_addr) && |
188 | UntagAddr(tagged_addr: h.tagged_addr) + h.requested_size > UntagAddr(tagged_addr)) { |
189 | ++*num_matching_addrs; |
190 | } |
191 | |
192 | // Measure the number of heap ring buffer entries that would have matched |
193 | // if we only had 4 tag bits, which is the case for MTE. |
194 | auto untag_4b = [](uptr p) { |
195 | return p & ((1ULL << 60) - 1); |
196 | }; |
197 | if (untag_4b(h.tagged_addr) <= untag_4b(tagged_addr) && |
198 | untag_4b(h.tagged_addr) + h.requested_size > untag_4b(tagged_addr)) { |
199 | ++*num_matching_addrs_4b; |
200 | } |
201 | } |
202 | return false; |
203 | } |
204 | |
205 | static void PrintStackAllocations(const StackAllocationsRingBuffer *sa, |
206 | tag_t addr_tag, uptr untagged_addr) { |
207 | uptr frames = Min(a: (uptr)flags()->stack_history_size, b: sa->size()); |
208 | bool found_local = false; |
209 | InternalScopedString location; |
210 | for (uptr i = 0; i < frames; i++) { |
211 | const uptr *record_addr = &(*sa)[i]; |
212 | uptr record = *record_addr; |
213 | if (!record) |
214 | break; |
215 | tag_t base_tag = |
216 | reinterpret_cast<uptr>(record_addr) >> kRecordAddrBaseTagShift; |
217 | const uptr fp = (record >> kRecordFPShift) << kRecordFPLShift; |
218 | CHECK_LT(fp, kRecordFPModulus); |
219 | uptr pc_mask = (1ULL << kRecordFPShift) - 1; |
220 | uptr pc = record & pc_mask; |
221 | FrameInfo frame; |
222 | if (!Symbolizer::GetOrInit()->SymbolizeFrame(address: pc, info: &frame)) |
223 | continue; |
224 | for (LocalInfo &local : frame.locals) { |
225 | if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset) |
226 | continue; |
227 | if (!(local.name && internal_strlen(s: local.name)) && |
228 | !(local.function_name && internal_strlen(s: local.function_name)) && |
229 | !(local.decl_file && internal_strlen(s: local.decl_file))) |
230 | continue; |
231 | tag_t obj_tag = base_tag ^ local.tag_offset; |
232 | if (obj_tag != addr_tag) |
233 | continue; |
234 | |
235 | // We only store bits 4-19 of FP (bits 0-3 are guaranteed to be zero). |
236 | // So we know only `FP % kRecordFPModulus`, and we can only calculate |
237 | // `local_beg % kRecordFPModulus`. |
238 | // Out of all possible `local_beg` we will only consider 2 candidates |
239 | // nearest to the `untagged_addr`. |
240 | uptr local_beg_mod = (fp + local.frame_offset) % kRecordFPModulus; |
241 | // Pick `local_beg` in the same 1 MiB block as `untagged_addr`. |
242 | uptr local_beg = |
243 | RoundDownTo(x: untagged_addr, boundary: kRecordFPModulus) + local_beg_mod; |
244 | // Pick the largest `local_beg <= untagged_addr`. It's either the current |
245 | // one or the one before. |
246 | if (local_beg > untagged_addr) |
247 | local_beg -= kRecordFPModulus; |
248 | |
249 | uptr offset = -1ull; |
250 | const char *whence; |
251 | const char *cause = nullptr; |
252 | uptr best_beg; |
253 | |
254 | // Try two 1 MiB blocks options and pick nearest one. |
255 | for (uptr i = 0; i < 2; ++i, local_beg += kRecordFPModulus) { |
256 | uptr local_end = local_beg + local.size; |
257 | if (local_beg > local_end) |
258 | continue; // This is a wraparound. |
259 | if (local_beg <= untagged_addr && untagged_addr < local_end) { |
260 | offset = untagged_addr - local_beg; |
261 | whence = "inside" ; |
262 | cause = "use-after-scope" ; |
263 | best_beg = local_beg; |
264 | break; // This is as close at it can be. |
265 | } |
266 | |
267 | if (untagged_addr >= local_end) { |
268 | uptr new_offset = untagged_addr - local_end; |
269 | if (new_offset < offset) { |
270 | offset = new_offset; |
271 | whence = "after" ; |
272 | cause = "stack-buffer-overflow" ; |
273 | best_beg = local_beg; |
274 | } |
275 | } else { |
276 | uptr new_offset = local_beg - untagged_addr; |
277 | if (new_offset < offset) { |
278 | offset = new_offset; |
279 | whence = "before" ; |
280 | cause = "stack-buffer-overflow" ; |
281 | best_beg = local_beg; |
282 | } |
283 | } |
284 | } |
285 | |
286 | // To fail the `untagged_addr` must be near nullptr, which is impossible |
287 | // with Linux user space memory layout. |
288 | if (!cause) |
289 | continue; |
290 | |
291 | if (!found_local) { |
292 | Printf(format: "\nPotentially referenced stack objects:\n" ); |
293 | found_local = true; |
294 | } |
295 | |
296 | Decorator d; |
297 | Printf(format: "%s" , d.Error()); |
298 | Printf(format: "Cause: %s\n" , cause); |
299 | Printf(format: "%s" , d.Default()); |
300 | Printf(format: "%s" , d.Location()); |
301 | StackTracePrinter::GetOrInit()->RenderSourceLocation( |
302 | buffer: &location, file: local.decl_file, line: local.decl_line, /* column= */ 0, |
303 | vs_style: common_flags()->symbolize_vs_style, |
304 | strip_path_prefix: common_flags()->strip_path_prefix); |
305 | Printf( |
306 | format: "%p is located %zd bytes %s a %zd-byte local variable %s " |
307 | "[%p,%p) " |
308 | "in %s %s\n" , |
309 | untagged_addr, offset, whence, local.size, local.name, best_beg, |
310 | best_beg + local.size, local.function_name, location.data()); |
311 | location.clear(); |
312 | Printf(format: "%s\n" , d.Default()); |
313 | } |
314 | frame.Clear(); |
315 | } |
316 | |
317 | if (found_local) |
318 | return; |
319 | |
320 | // We didn't find any locals. Most likely we don't have symbols, so dump |
321 | // the information that we have for offline analysis. |
322 | InternalScopedString frame_desc; |
323 | Printf(format: "Previously allocated frames:\n" ); |
324 | for (uptr i = 0; i < frames; i++) { |
325 | const uptr *record_addr = &(*sa)[i]; |
326 | uptr record = *record_addr; |
327 | if (!record) |
328 | break; |
329 | uptr pc_mask = (1ULL << 48) - 1; |
330 | uptr pc = record & pc_mask; |
331 | frame_desc.AppendF(format: " record_addr:0x%zx record:0x%zx" , |
332 | reinterpret_cast<uptr>(record_addr), record); |
333 | SymbolizedStackHolder symbolized_stack( |
334 | Symbolizer::GetOrInit()->SymbolizePC(address: pc)); |
335 | const SymbolizedStack *frame = symbolized_stack.get(); |
336 | if (frame) { |
337 | StackTracePrinter::GetOrInit()->RenderFrame( |
338 | buffer: &frame_desc, format: " %F %L" , frame_no: 0, address: frame->info.address, info: &frame->info, |
339 | vs_style: common_flags()->symbolize_vs_style, |
340 | strip_path_prefix: common_flags()->strip_path_prefix); |
341 | } |
342 | Printf(format: "%s\n" , frame_desc.data()); |
343 | frame_desc.clear(); |
344 | } |
345 | } |
346 | |
347 | // Returns true if tag == *tag_ptr, reading tags from short granules if |
348 | // necessary. This may return a false positive if tags 1-15 are used as a |
349 | // regular tag rather than a short granule marker. |
350 | static bool TagsEqual(tag_t tag, tag_t *tag_ptr) { |
351 | if (tag == *tag_ptr) |
352 | return true; |
353 | if (*tag_ptr == 0 || *tag_ptr > kShadowAlignment - 1) |
354 | return false; |
355 | uptr mem = ShadowToMem(shadow_addr: reinterpret_cast<uptr>(tag_ptr)); |
356 | tag_t inline_tag = *reinterpret_cast<tag_t *>(mem + kShadowAlignment - 1); |
357 | return tag == inline_tag; |
358 | } |
359 | |
360 | // HWASan globals store the size of the global in the descriptor. In cases where |
361 | // we don't have a binary with symbols, we can't grab the size of the global |
362 | // from the debug info - but we might be able to retrieve it from the |
363 | // descriptor. Returns zero if the lookup failed. |
364 | static uptr GetGlobalSizeFromDescriptor(uptr ptr) { |
365 | // Find the ELF object that this global resides in. |
366 | Dl_info info; |
367 | if (dladdr(address: reinterpret_cast<void *>(ptr), info: &info) == 0) |
368 | return 0; |
369 | auto *ehdr = reinterpret_cast<const ElfW(Ehdr) *>(info.dli_fbase); |
370 | auto *phdr_begin = reinterpret_cast<const ElfW(Phdr) *>( |
371 | reinterpret_cast<const u8 *>(ehdr) + ehdr->e_phoff); |
372 | |
373 | // Get the load bias. This is normally the same as the dli_fbase address on |
374 | // position-independent code, but can be different on non-PIE executables, |
375 | // binaries using LLD's partitioning feature, or binaries compiled with a |
376 | // linker script. |
377 | ElfW(Addr) load_bias = 0; |
378 | for (const auto &phdr : |
379 | ArrayRef<const ElfW(Phdr)>(phdr_begin, phdr_begin + ehdr->e_phnum)) { |
380 | if (phdr.p_type != PT_LOAD || phdr.p_offset != 0) |
381 | continue; |
382 | load_bias = reinterpret_cast<ElfW(Addr)>(ehdr) - phdr.p_vaddr; |
383 | break; |
384 | } |
385 | |
386 | // Walk all globals in this ELF object, looking for the one we're interested |
387 | // in. Once we find it, we can stop iterating and return the size of the |
388 | // global we're interested in. |
389 | for (const hwasan_global &global : |
390 | HwasanGlobalsFor(base: load_bias, phdr: phdr_begin, phnum: ehdr->e_phnum)) |
391 | if (global.addr() <= ptr && ptr < global.addr() + global.size()) |
392 | return global.size(); |
393 | |
394 | return 0; |
395 | } |
396 | |
397 | void ReportStats() {} |
398 | |
399 | constexpr uptr kDumpWidth = 16; |
400 | constexpr uptr kShadowLines = 17; |
401 | constexpr uptr kShadowDumpSize = kShadowLines * kDumpWidth; |
402 | |
403 | constexpr uptr kShortLines = 3; |
404 | constexpr uptr kShortDumpSize = kShortLines * kDumpWidth; |
405 | constexpr uptr kShortDumpOffset = (kShadowLines - kShortLines) / 2 * kDumpWidth; |
406 | |
407 | static uptr GetPrintTagStart(uptr addr) { |
408 | addr = MemToShadow(untagged_addr: addr); |
409 | addr = RoundDownTo(x: addr, boundary: kDumpWidth); |
410 | addr -= kDumpWidth * (kShadowLines / 2); |
411 | return addr; |
412 | } |
413 | |
414 | template <typename PrintTag> |
415 | static void PrintTagInfoAroundAddr(uptr addr, uptr num_rows, |
416 | InternalScopedString &s, |
417 | PrintTag print_tag) { |
418 | uptr center_row_beg = RoundDownTo(x: addr, boundary: kDumpWidth); |
419 | uptr beg_row = center_row_beg - kDumpWidth * (num_rows / 2); |
420 | uptr end_row = center_row_beg + kDumpWidth * ((num_rows + 1) / 2); |
421 | for (uptr row = beg_row; row < end_row; row += kDumpWidth) { |
422 | s.Append(str: row == center_row_beg ? "=>" : " " ); |
423 | s.AppendF(format: "%p:" , (void *)ShadowToMem(shadow_addr: row)); |
424 | for (uptr i = 0; i < kDumpWidth; i++) { |
425 | s.Append(str: row + i == addr ? "[" : " " ); |
426 | print_tag(s, row + i); |
427 | s.Append(str: row + i == addr ? "]" : " " ); |
428 | } |
429 | s.Append(str: "\n" ); |
430 | } |
431 | } |
432 | |
433 | template <typename GetTag, typename GetShortTag> |
434 | static void PrintTagsAroundAddr(uptr addr, GetTag get_tag, |
435 | GetShortTag get_short_tag) { |
436 | InternalScopedString s; |
437 | addr = MemToShadow(untagged_addr: addr); |
438 | s.AppendF( |
439 | format: "\nMemory tags around the buggy address (one tag corresponds to %zd " |
440 | "bytes):\n" , |
441 | kShadowAlignment); |
442 | PrintTagInfoAroundAddr(addr, kShadowLines, s, |
443 | [&](InternalScopedString &s, uptr tag_addr) { |
444 | tag_t tag = get_tag(tag_addr); |
445 | s.AppendF(format: "%02x" , tag); |
446 | }); |
447 | |
448 | s.AppendF( |
449 | format: "Tags for short granules around the buggy address (one tag corresponds " |
450 | "to %zd bytes):\n" , |
451 | kShadowAlignment); |
452 | PrintTagInfoAroundAddr(addr, kShortLines, s, |
453 | [&](InternalScopedString &s, uptr tag_addr) { |
454 | tag_t tag = get_tag(tag_addr); |
455 | if (tag >= 1 && tag <= kShadowAlignment) { |
456 | tag_t short_tag = get_short_tag(tag_addr); |
457 | s.AppendF(format: "%02x" , short_tag); |
458 | } else { |
459 | s.Append(str: ".." ); |
460 | } |
461 | }); |
462 | s.Append( |
463 | str: "See " |
464 | "https://clang.llvm.org/docs/" |
465 | "HardwareAssistedAddressSanitizerDesign.html#short-granules for a " |
466 | "description of short granule tags\n" ); |
467 | Printf(format: "%s" , s.data()); |
468 | } |
469 | |
470 | static uptr GetTopPc(const StackTrace *stack) { |
471 | return stack->size ? StackTrace::GetPreviousInstructionPc(pc: stack->trace[0]) |
472 | : 0; |
473 | } |
474 | |
475 | namespace { |
476 | class BaseReport { |
477 | public: |
478 | BaseReport(StackTrace *stack, bool fatal, uptr tagged_addr, uptr access_size) |
479 | : scoped_report(fatal), |
480 | stack(stack), |
481 | tagged_addr(tagged_addr), |
482 | access_size(access_size), |
483 | untagged_addr(UntagAddr(tagged_addr)), |
484 | ptr_tag(GetTagFromPointer(p: tagged_addr)), |
485 | mismatch_offset(FindMismatchOffset()), |
486 | heap(CopyHeapChunk()), |
487 | allocations(CopyAllocations()), |
488 | candidate(FindBufferOverflowCandidate()), |
489 | shadow(CopyShadow()) {} |
490 | |
491 | protected: |
492 | struct OverflowCandidate { |
493 | uptr untagged_addr = 0; |
494 | bool after = false; |
495 | bool is_close = false; |
496 | |
497 | struct { |
498 | uptr begin = 0; |
499 | uptr end = 0; |
500 | u32 thread_id = 0; |
501 | u32 stack_id = 0; |
502 | bool is_allocated = false; |
503 | } heap; |
504 | }; |
505 | |
506 | struct HeapAllocation { |
507 | HeapAllocationRecord har = {}; |
508 | uptr ring_index = 0; |
509 | uptr num_matching_addrs = 0; |
510 | uptr num_matching_addrs_4b = 0; |
511 | u32 free_thread_id = 0; |
512 | }; |
513 | |
514 | struct Allocations { |
515 | ArrayRef<SavedStackAllocations> stack; |
516 | ArrayRef<HeapAllocation> heap; |
517 | }; |
518 | |
519 | struct HeapChunk { |
520 | uptr begin = 0; |
521 | uptr size = 0; |
522 | u32 stack_id = 0; |
523 | bool from_small_heap = false; |
524 | bool is_allocated = false; |
525 | }; |
526 | |
527 | struct Shadow { |
528 | uptr addr = 0; |
529 | tag_t tags[kShadowDumpSize] = {}; |
530 | tag_t short_tags[kShortDumpSize] = {}; |
531 | }; |
532 | |
533 | sptr FindMismatchOffset() const; |
534 | Shadow CopyShadow() const; |
535 | tag_t GetTagCopy(uptr addr) const; |
536 | tag_t GetShortTagCopy(uptr addr) const; |
537 | HeapChunk CopyHeapChunk() const; |
538 | Allocations CopyAllocations(); |
539 | OverflowCandidate FindBufferOverflowCandidate() const; |
540 | void PrintAddressDescription() const; |
541 | void PrintHeapOrGlobalCandidate() const; |
542 | void PrintTags(uptr addr) const; |
543 | |
544 | SavedStackAllocations stack_allocations_storage[16]; |
545 | HeapAllocation heap_allocations_storage[256]; |
546 | |
547 | const ScopedReport scoped_report; |
548 | const StackTrace *stack = nullptr; |
549 | const uptr tagged_addr = 0; |
550 | const uptr access_size = 0; |
551 | const uptr untagged_addr = 0; |
552 | const tag_t ptr_tag = 0; |
553 | const sptr mismatch_offset = 0; |
554 | |
555 | const HeapChunk heap; |
556 | const Allocations allocations; |
557 | const OverflowCandidate candidate; |
558 | |
559 | const Shadow shadow; |
560 | }; |
561 | |
562 | sptr BaseReport::FindMismatchOffset() const { |
563 | if (!access_size) |
564 | return 0; |
565 | sptr offset = |
566 | __hwasan_test_shadow(x: reinterpret_cast<void *>(tagged_addr), size: access_size); |
567 | CHECK_GE(offset, 0); |
568 | CHECK_LT(offset, static_cast<sptr>(access_size)); |
569 | tag_t *tag_ptr = |
570 | reinterpret_cast<tag_t *>(MemToShadow(untagged_addr: untagged_addr + offset)); |
571 | tag_t mem_tag = *tag_ptr; |
572 | |
573 | if (mem_tag && mem_tag < kShadowAlignment) { |
574 | tag_t *granule_ptr = reinterpret_cast<tag_t *>((untagged_addr + offset) & |
575 | ~(kShadowAlignment - 1)); |
576 | // If offset is 0, (untagged_addr + offset) is not aligned to granules. |
577 | // This is the offset of the leftmost accessed byte within the bad granule. |
578 | u8 in_granule_offset = (untagged_addr + offset) & (kShadowAlignment - 1); |
579 | tag_t short_tag = granule_ptr[kShadowAlignment - 1]; |
580 | // The first mismatch was a short granule that matched the ptr_tag. |
581 | if (short_tag == ptr_tag) { |
582 | // If the access starts after the end of the short granule, then the first |
583 | // bad byte is the first byte of the access; otherwise it is the first |
584 | // byte past the end of the short granule |
585 | if (mem_tag > in_granule_offset) { |
586 | offset += mem_tag - in_granule_offset; |
587 | } |
588 | } |
589 | } |
590 | return offset; |
591 | } |
592 | |
593 | BaseReport::Shadow BaseReport::CopyShadow() const { |
594 | Shadow result; |
595 | if (!MemIsApp(p: untagged_addr)) |
596 | return result; |
597 | |
598 | result.addr = GetPrintTagStart(addr: untagged_addr + mismatch_offset); |
599 | uptr tag_addr = result.addr; |
600 | uptr short_end = kShortDumpOffset + ARRAY_SIZE(shadow.short_tags); |
601 | for (uptr i = 0; i < ARRAY_SIZE(result.tags); ++i, ++tag_addr) { |
602 | if (!MemIsShadow(p: tag_addr)) |
603 | continue; |
604 | result.tags[i] = *reinterpret_cast<tag_t *>(tag_addr); |
605 | if (i < kShortDumpOffset || i >= short_end) |
606 | continue; |
607 | uptr granule_addr = ShadowToMem(shadow_addr: tag_addr); |
608 | if (1 <= result.tags[i] && result.tags[i] <= kShadowAlignment && |
609 | IsAccessibleMemoryRange(beg: granule_addr, size: kShadowAlignment)) { |
610 | result.short_tags[i - kShortDumpOffset] = |
611 | *reinterpret_cast<tag_t *>(granule_addr + kShadowAlignment - 1); |
612 | } |
613 | } |
614 | return result; |
615 | } |
616 | |
617 | tag_t BaseReport::GetTagCopy(uptr addr) const { |
618 | CHECK_GE(addr, shadow.addr); |
619 | uptr idx = addr - shadow.addr; |
620 | CHECK_LT(idx, ARRAY_SIZE(shadow.tags)); |
621 | return shadow.tags[idx]; |
622 | } |
623 | |
624 | tag_t BaseReport::GetShortTagCopy(uptr addr) const { |
625 | CHECK_GE(addr, shadow.addr + kShortDumpOffset); |
626 | uptr idx = addr - shadow.addr - kShortDumpOffset; |
627 | CHECK_LT(idx, ARRAY_SIZE(shadow.short_tags)); |
628 | return shadow.short_tags[idx]; |
629 | } |
630 | |
631 | BaseReport::HeapChunk BaseReport::CopyHeapChunk() const { |
632 | HeapChunk result = {}; |
633 | if (MemIsShadow(p: untagged_addr)) |
634 | return result; |
635 | HwasanChunkView chunk = FindHeapChunkByAddress(address: untagged_addr); |
636 | result.begin = chunk.Beg(); |
637 | if (result.begin) { |
638 | result.size = chunk.ActualSize(); |
639 | result.from_small_heap = chunk.FromSmallHeap(); |
640 | result.is_allocated = chunk.IsAllocated(); |
641 | result.stack_id = chunk.GetAllocStackId(); |
642 | } |
643 | return result; |
644 | } |
645 | |
646 | BaseReport::Allocations BaseReport::CopyAllocations() { |
647 | if (MemIsShadow(p: untagged_addr)) |
648 | return {}; |
649 | uptr stack_allocations_count = 0; |
650 | uptr heap_allocations_count = 0; |
651 | hwasanThreadList().VisitAllLiveThreads(cb: [&](Thread *t) { |
652 | if (stack_allocations_count < ARRAY_SIZE(stack_allocations_storage) && |
653 | t->AddrIsInStack(addr: untagged_addr)) { |
654 | stack_allocations_storage[stack_allocations_count++].CopyFrom(t); |
655 | } |
656 | |
657 | if (heap_allocations_count < ARRAY_SIZE(heap_allocations_storage)) { |
658 | // Scan all threads' ring buffers to find if it's a heap-use-after-free. |
659 | HeapAllocationRecord har; |
660 | uptr ring_index, num_matching_addrs, num_matching_addrs_4b; |
661 | if (FindHeapAllocation(rb: t->heap_allocations(), tagged_addr, har: &har, |
662 | ring_index: &ring_index, num_matching_addrs: &num_matching_addrs, |
663 | num_matching_addrs_4b: &num_matching_addrs_4b)) { |
664 | auto &ha = heap_allocations_storage[heap_allocations_count++]; |
665 | ha.har = har; |
666 | ha.ring_index = ring_index; |
667 | ha.num_matching_addrs = num_matching_addrs; |
668 | ha.num_matching_addrs_4b = num_matching_addrs_4b; |
669 | ha.free_thread_id = t->unique_id(); |
670 | } |
671 | } |
672 | }); |
673 | |
674 | return {.stack: {stack_allocations_storage, stack_allocations_count}, |
675 | .heap: {heap_allocations_storage, heap_allocations_count}}; |
676 | } |
677 | |
678 | BaseReport::OverflowCandidate BaseReport::FindBufferOverflowCandidate() const { |
679 | OverflowCandidate result = {}; |
680 | if (MemIsShadow(p: untagged_addr)) |
681 | return result; |
682 | // Check if this looks like a heap buffer overflow by scanning |
683 | // the shadow left and right and looking for the first adjacent |
684 | // object with a different memory tag. If that tag matches ptr_tag, |
685 | // check the allocator if it has a live chunk there. |
686 | tag_t *tag_ptr = reinterpret_cast<tag_t *>(MemToShadow(untagged_addr)); |
687 | tag_t *candidate_tag_ptr = nullptr, *left = tag_ptr, *right = tag_ptr; |
688 | uptr candidate_distance = 0; |
689 | for (; candidate_distance < 1000; candidate_distance++) { |
690 | if (MemIsShadow(p: reinterpret_cast<uptr>(left)) && TagsEqual(tag: ptr_tag, tag_ptr: left)) { |
691 | candidate_tag_ptr = left; |
692 | break; |
693 | } |
694 | --left; |
695 | if (MemIsShadow(p: reinterpret_cast<uptr>(right)) && |
696 | TagsEqual(tag: ptr_tag, tag_ptr: right)) { |
697 | candidate_tag_ptr = right; |
698 | break; |
699 | } |
700 | ++right; |
701 | } |
702 | |
703 | constexpr auto kCloseCandidateDistance = 1; |
704 | result.is_close = candidate_distance <= kCloseCandidateDistance; |
705 | |
706 | result.after = candidate_tag_ptr == left; |
707 | result.untagged_addr = ShadowToMem(shadow_addr: reinterpret_cast<uptr>(candidate_tag_ptr)); |
708 | HwasanChunkView chunk = FindHeapChunkByAddress(address: result.untagged_addr); |
709 | if (chunk.IsAllocated()) { |
710 | result.heap.is_allocated = true; |
711 | result.heap.begin = chunk.Beg(); |
712 | result.heap.end = chunk.End(); |
713 | result.heap.thread_id = chunk.GetAllocThreadId(); |
714 | result.heap.stack_id = chunk.GetAllocStackId(); |
715 | } |
716 | return result; |
717 | } |
718 | |
719 | void BaseReport::PrintHeapOrGlobalCandidate() const { |
720 | Decorator d; |
721 | if (candidate.heap.is_allocated) { |
722 | uptr offset; |
723 | const char *whence; |
724 | if (candidate.heap.begin <= untagged_addr && |
725 | untagged_addr < candidate.heap.end) { |
726 | offset = untagged_addr - candidate.heap.begin; |
727 | whence = "inside" ; |
728 | } else if (candidate.after) { |
729 | offset = untagged_addr - candidate.heap.end; |
730 | whence = "after" ; |
731 | } else { |
732 | offset = candidate.heap.begin - untagged_addr; |
733 | whence = "before" ; |
734 | } |
735 | Printf(format: "%s" , d.Error()); |
736 | Printf(format: "\nCause: heap-buffer-overflow\n" ); |
737 | Printf(format: "%s" , d.Default()); |
738 | Printf(format: "%s" , d.Location()); |
739 | Printf(format: "%p is located %zd bytes %s a %zd-byte region [%p,%p)\n" , |
740 | untagged_addr, offset, whence, |
741 | candidate.heap.end - candidate.heap.begin, candidate.heap.begin, |
742 | candidate.heap.end); |
743 | Printf(format: "%s" , d.Allocation()); |
744 | Printf(format: "allocated by thread T%u here:\n" , candidate.heap.thread_id); |
745 | Printf(format: "%s" , d.Default()); |
746 | GetStackTraceFromId(id: candidate.heap.stack_id).Print(); |
747 | return; |
748 | } |
749 | // Check whether the address points into a loaded library. If so, this is |
750 | // most likely a global variable. |
751 | const char *module_name; |
752 | uptr module_address; |
753 | Symbolizer *sym = Symbolizer::GetOrInit(); |
754 | if (sym->GetModuleNameAndOffsetForPC(pc: candidate.untagged_addr, module_name: &module_name, |
755 | module_address: &module_address)) { |
756 | Printf(format: "%s" , d.Error()); |
757 | Printf(format: "\nCause: global-overflow\n" ); |
758 | Printf(format: "%s" , d.Default()); |
759 | DataInfo info; |
760 | Printf(format: "%s" , d.Location()); |
761 | if (sym->SymbolizeData(address: candidate.untagged_addr, info: &info) && info.start) { |
762 | Printf( |
763 | format: "%p is located %zd bytes %s a %zd-byte global variable " |
764 | "%s [%p,%p) in %s\n" , |
765 | untagged_addr, |
766 | candidate.after ? untagged_addr - (info.start + info.size) |
767 | : info.start - untagged_addr, |
768 | candidate.after ? "after" : "before" , info.size, info.name, |
769 | info.start, info.start + info.size, module_name); |
770 | } else { |
771 | uptr size = GetGlobalSizeFromDescriptor(ptr: candidate.untagged_addr); |
772 | if (size == 0) |
773 | // We couldn't find the size of the global from the descriptors. |
774 | Printf( |
775 | format: "%p is located %s a global variable in " |
776 | "\n #0 0x%x (%s+0x%x)\n" , |
777 | untagged_addr, candidate.after ? "after" : "before" , |
778 | candidate.untagged_addr, module_name, module_address); |
779 | else |
780 | Printf( |
781 | format: "%p is located %s a %zd-byte global variable in " |
782 | "\n #0 0x%x (%s+0x%x)\n" , |
783 | untagged_addr, candidate.after ? "after" : "before" , size, |
784 | candidate.untagged_addr, module_name, module_address); |
785 | } |
786 | Printf(format: "%s" , d.Default()); |
787 | } |
788 | } |
789 | |
790 | void BaseReport::PrintAddressDescription() const { |
791 | Decorator d; |
792 | int num_descriptions_printed = 0; |
793 | |
794 | if (MemIsShadow(p: untagged_addr)) { |
795 | Printf(format: "%s%p is HWAsan shadow memory.\n%s" , d.Location(), untagged_addr, |
796 | d.Default()); |
797 | return; |
798 | } |
799 | |
800 | // Print some very basic information about the address, if it's a heap. |
801 | if (heap.begin) { |
802 | Printf( |
803 | format: "%s[%p,%p) is a %s %s heap chunk; " |
804 | "size: %zd offset: %zd\n%s" , |
805 | d.Location(), heap.begin, heap.begin + heap.size, |
806 | heap.from_small_heap ? "small" : "large" , |
807 | heap.is_allocated ? "allocated" : "unallocated" , heap.size, |
808 | untagged_addr - heap.begin, d.Default()); |
809 | } |
810 | |
811 | auto announce_by_id = [](u32 thread_id) { |
812 | hwasanThreadList().VisitAllLiveThreads(cb: [&](Thread *t) { |
813 | if (thread_id == t->unique_id()) |
814 | t->Announce(); |
815 | }); |
816 | }; |
817 | |
818 | // Check stack first. If the address is on the stack of a live thread, we |
819 | // know it cannot be a heap / global overflow. |
820 | for (const auto &sa : allocations.stack) { |
821 | Printf(format: "%s" , d.Error()); |
822 | Printf(format: "\nCause: stack tag-mismatch\n" ); |
823 | Printf(format: "%s" , d.Location()); |
824 | Printf(format: "Address %p is located in stack of thread T%zd\n" , untagged_addr, |
825 | sa.thread_id()); |
826 | Printf(format: "%s" , d.Default()); |
827 | announce_by_id(sa.thread_id()); |
828 | PrintStackAllocations(sa: sa.get(), addr_tag: ptr_tag, untagged_addr); |
829 | num_descriptions_printed++; |
830 | } |
831 | |
832 | if (allocations.stack.empty() && candidate.untagged_addr && |
833 | candidate.is_close) { |
834 | PrintHeapOrGlobalCandidate(); |
835 | num_descriptions_printed++; |
836 | } |
837 | |
838 | for (const auto &ha : allocations.heap) { |
839 | const HeapAllocationRecord har = ha.har; |
840 | |
841 | Printf(format: "%s" , d.Error()); |
842 | Printf(format: "\nCause: use-after-free\n" ); |
843 | Printf(format: "%s" , d.Location()); |
844 | Printf(format: "%p is located %zd bytes inside a %zd-byte region [%p,%p)\n" , |
845 | untagged_addr, untagged_addr - UntagAddr(tagged_addr: har.tagged_addr), |
846 | har.requested_size, UntagAddr(tagged_addr: har.tagged_addr), |
847 | UntagAddr(tagged_addr: har.tagged_addr) + har.requested_size); |
848 | Printf(format: "%s" , d.Allocation()); |
849 | Printf(format: "freed by thread T%u here:\n" , ha.free_thread_id); |
850 | Printf(format: "%s" , d.Default()); |
851 | GetStackTraceFromId(id: har.free_context_id).Print(); |
852 | |
853 | Printf(format: "%s" , d.Allocation()); |
854 | Printf(format: "previously allocated by thread T%u here:\n" , har.alloc_thread_id); |
855 | Printf(format: "%s" , d.Default()); |
856 | GetStackTraceFromId(id: har.alloc_context_id).Print(); |
857 | |
858 | // Print a developer note: the index of this heap object |
859 | // in the thread's deallocation ring buffer. |
860 | Printf(format: "hwasan_dev_note_heap_rb_distance: %zd %zd\n" , ha.ring_index + 1, |
861 | flags()->heap_history_size); |
862 | Printf(format: "hwasan_dev_note_num_matching_addrs: %zd\n" , ha.num_matching_addrs); |
863 | Printf(format: "hwasan_dev_note_num_matching_addrs_4b: %zd\n" , |
864 | ha.num_matching_addrs_4b); |
865 | |
866 | announce_by_id(ha.free_thread_id); |
867 | // TODO: announce_by_id(har.alloc_thread_id); |
868 | num_descriptions_printed++; |
869 | } |
870 | |
871 | if (candidate.untagged_addr && num_descriptions_printed == 0) { |
872 | PrintHeapOrGlobalCandidate(); |
873 | num_descriptions_printed++; |
874 | } |
875 | |
876 | // Print the remaining threads, as an extra information, 1 line per thread. |
877 | if (flags()->print_live_threads_info) { |
878 | Printf(format: "\n" ); |
879 | hwasanThreadList().VisitAllLiveThreads(cb: [&](Thread *t) { t->Announce(); }); |
880 | } |
881 | |
882 | if (!num_descriptions_printed) |
883 | // We exhausted our possibilities. Bail out. |
884 | Printf(format: "HWAddressSanitizer can not describe address in more detail.\n" ); |
885 | if (num_descriptions_printed > 1) { |
886 | Printf( |
887 | format: "There are %d potential causes, printed above in order " |
888 | "of likeliness.\n" , |
889 | num_descriptions_printed); |
890 | } |
891 | } |
892 | |
893 | void BaseReport::PrintTags(uptr addr) const { |
894 | if (shadow.addr) { |
895 | PrintTagsAroundAddr( |
896 | addr, get_tag: [&](uptr addr) { return GetTagCopy(addr); }, |
897 | get_short_tag: [&](uptr addr) { return GetShortTagCopy(addr); }); |
898 | } |
899 | } |
900 | |
901 | class InvalidFreeReport : public BaseReport { |
902 | public: |
903 | InvalidFreeReport(StackTrace *stack, uptr tagged_addr) |
904 | : BaseReport(stack, flags()->halt_on_error, tagged_addr, 0) {} |
905 | ~InvalidFreeReport(); |
906 | |
907 | private: |
908 | }; |
909 | |
910 | InvalidFreeReport::~InvalidFreeReport() { |
911 | Decorator d; |
912 | Printf(format: "%s" , d.Error()); |
913 | uptr pc = GetTopPc(stack); |
914 | const char *bug_type = "invalid-free" ; |
915 | const Thread *thread = GetCurrentThread(); |
916 | if (thread) { |
917 | Report(format: "ERROR: %s: %s on address %p at pc %p on thread T%zd\n" , |
918 | SanitizerToolName, bug_type, untagged_addr, pc, thread->unique_id()); |
919 | } else { |
920 | Report(format: "ERROR: %s: %s on address %p at pc %p on unknown thread\n" , |
921 | SanitizerToolName, bug_type, untagged_addr, pc); |
922 | } |
923 | Printf(format: "%s" , d.Access()); |
924 | if (shadow.addr) { |
925 | Printf(format: "tags: %02x/%02x (ptr/mem)\n" , ptr_tag, |
926 | GetTagCopy(addr: MemToShadow(untagged_addr))); |
927 | } |
928 | Printf(format: "%s" , d.Default()); |
929 | |
930 | stack->Print(); |
931 | |
932 | PrintAddressDescription(); |
933 | PrintTags(addr: untagged_addr); |
934 | MaybePrintAndroidHelpUrl(); |
935 | ReportErrorSummary(error_type: bug_type, trace: stack); |
936 | } |
937 | |
938 | class TailOverwrittenReport : public BaseReport { |
939 | public: |
940 | explicit TailOverwrittenReport(StackTrace *stack, uptr tagged_addr, |
941 | uptr orig_size, const u8 *expected) |
942 | : BaseReport(stack, flags()->halt_on_error, tagged_addr, 0), |
943 | orig_size(orig_size), |
944 | tail_size(kShadowAlignment - (orig_size % kShadowAlignment)) { |
945 | CHECK_GT(tail_size, 0U); |
946 | CHECK_LT(tail_size, kShadowAlignment); |
947 | internal_memcpy(dest: tail_copy, |
948 | src: reinterpret_cast<u8 *>(untagged_addr + orig_size), |
949 | n: tail_size); |
950 | internal_memcpy(dest: actual_expected, src: expected, n: tail_size); |
951 | // Short granule is stashed in the last byte of the magic string. To avoid |
952 | // confusion, make the expected magic string contain the short granule tag. |
953 | if (orig_size % kShadowAlignment != 0) |
954 | actual_expected[tail_size - 1] = ptr_tag; |
955 | } |
956 | ~TailOverwrittenReport(); |
957 | |
958 | private: |
959 | const uptr orig_size = 0; |
960 | const uptr tail_size = 0; |
961 | u8 actual_expected[kShadowAlignment] = {}; |
962 | u8 tail_copy[kShadowAlignment] = {}; |
963 | }; |
964 | |
965 | TailOverwrittenReport::~TailOverwrittenReport() { |
966 | Decorator d; |
967 | Printf(format: "%s" , d.Error()); |
968 | const char *bug_type = "allocation-tail-overwritten" ; |
969 | Report(format: "ERROR: %s: %s; heap object [%p,%p) of size %zd\n" , SanitizerToolName, |
970 | bug_type, untagged_addr, untagged_addr + orig_size, orig_size); |
971 | Printf(format: "\n%s" , d.Default()); |
972 | Printf( |
973 | format: "Stack of invalid access unknown. Issue detected at deallocation " |
974 | "time.\n" ); |
975 | Printf(format: "%s" , d.Allocation()); |
976 | Printf(format: "deallocated here:\n" ); |
977 | Printf(format: "%s" , d.Default()); |
978 | stack->Print(); |
979 | if (heap.begin) { |
980 | Printf(format: "%s" , d.Allocation()); |
981 | Printf(format: "allocated here:\n" ); |
982 | Printf(format: "%s" , d.Default()); |
983 | GetStackTraceFromId(id: heap.stack_id).Print(); |
984 | } |
985 | |
986 | InternalScopedString s; |
987 | u8 *tail = tail_copy; |
988 | s.Append(str: "Tail contains: " ); |
989 | for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.Append(str: ".. " ); |
990 | for (uptr i = 0; i < tail_size; i++) s.AppendF(format: "%02x " , tail[i]); |
991 | s.Append(str: "\n" ); |
992 | s.Append(str: "Expected: " ); |
993 | for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.Append(str: ".. " ); |
994 | for (uptr i = 0; i < tail_size; i++) s.AppendF(format: "%02x " , actual_expected[i]); |
995 | s.Append(str: "\n" ); |
996 | s.Append(str: " " ); |
997 | for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.Append(str: " " ); |
998 | for (uptr i = 0; i < tail_size; i++) |
999 | s.AppendF(format: "%s " , actual_expected[i] != tail[i] ? "^^" : " " ); |
1000 | |
1001 | s.AppendF( |
1002 | format: "\nThis error occurs when a buffer overflow overwrites memory\n" |
1003 | "after a heap object, but within the %zd-byte granule, e.g.\n" |
1004 | " char *x = new char[20];\n" |
1005 | " x[25] = 42;\n" |
1006 | "%s does not detect such bugs in uninstrumented code at the time of " |
1007 | "write," |
1008 | "\nbut can detect them at the time of free/delete.\n" |
1009 | "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n" , |
1010 | kShadowAlignment, SanitizerToolName); |
1011 | Printf(format: "%s" , s.data()); |
1012 | GetCurrentThread()->Announce(); |
1013 | PrintTags(addr: untagged_addr); |
1014 | MaybePrintAndroidHelpUrl(); |
1015 | ReportErrorSummary(error_type: bug_type, trace: stack); |
1016 | } |
1017 | |
1018 | class TagMismatchReport : public BaseReport { |
1019 | public: |
1020 | explicit TagMismatchReport(StackTrace *stack, uptr tagged_addr, |
1021 | uptr access_size, bool is_store, bool fatal, |
1022 | uptr *registers_frame) |
1023 | : BaseReport(stack, fatal, tagged_addr, access_size), |
1024 | is_store(is_store), |
1025 | registers_frame(registers_frame) {} |
1026 | ~TagMismatchReport(); |
1027 | |
1028 | private: |
1029 | const bool is_store; |
1030 | const uptr *registers_frame; |
1031 | }; |
1032 | |
1033 | TagMismatchReport::~TagMismatchReport() { |
1034 | Decorator d; |
1035 | // TODO: when possible, try to print heap-use-after-free, etc. |
1036 | const char *bug_type = "tag-mismatch" ; |
1037 | uptr pc = GetTopPc(stack); |
1038 | Printf(format: "%s" , d.Error()); |
1039 | Report(format: "ERROR: %s: %s on address %p at pc %p\n" , SanitizerToolName, bug_type, |
1040 | untagged_addr, pc); |
1041 | |
1042 | Thread *t = GetCurrentThread(); |
1043 | |
1044 | tag_t mem_tag = GetTagCopy(addr: MemToShadow(untagged_addr: untagged_addr + mismatch_offset)); |
1045 | |
1046 | Printf(format: "%s" , d.Access()); |
1047 | if (mem_tag && mem_tag < kShadowAlignment) { |
1048 | tag_t short_tag = |
1049 | GetShortTagCopy(addr: MemToShadow(untagged_addr: untagged_addr + mismatch_offset)); |
1050 | Printf( |
1051 | format: "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n" , |
1052 | is_store ? "WRITE" : "READ" , access_size, untagged_addr, ptr_tag, |
1053 | mem_tag, short_tag, t->unique_id()); |
1054 | } else { |
1055 | Printf(format: "%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n" , |
1056 | is_store ? "WRITE" : "READ" , access_size, untagged_addr, ptr_tag, |
1057 | mem_tag, t->unique_id()); |
1058 | } |
1059 | if (mismatch_offset) |
1060 | Printf(format: "Invalid access starting at offset %zu\n" , mismatch_offset); |
1061 | Printf(format: "%s" , d.Default()); |
1062 | |
1063 | stack->Print(); |
1064 | |
1065 | PrintAddressDescription(); |
1066 | t->Announce(); |
1067 | |
1068 | PrintTags(addr: untagged_addr + mismatch_offset); |
1069 | |
1070 | if (registers_frame) |
1071 | ReportRegisters(registers_frame, pc); |
1072 | |
1073 | MaybePrintAndroidHelpUrl(); |
1074 | ReportErrorSummary(error_type: bug_type, trace: stack); |
1075 | } |
1076 | } // namespace |
1077 | |
1078 | void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) { |
1079 | InvalidFreeReport R(stack, tagged_addr); |
1080 | } |
1081 | |
1082 | void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size, |
1083 | const u8 *expected) { |
1084 | TailOverwrittenReport R(stack, tagged_addr, orig_size, expected); |
1085 | } |
1086 | |
1087 | void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size, |
1088 | bool is_store, bool fatal, uptr *registers_frame) { |
1089 | TagMismatchReport R(stack, tagged_addr, access_size, is_store, fatal, |
1090 | registers_frame); |
1091 | } |
1092 | |
1093 | // See the frame breakdown defined in __hwasan_tag_mismatch (from |
1094 | // hwasan_tag_mismatch_{aarch64,riscv64}.S). |
1095 | void ReportRegisters(const uptr *frame, uptr pc) { |
1096 | Printf(format: "\nRegisters where the failure occurred (pc %p):\n" , pc); |
1097 | |
1098 | // We explicitly print a single line (4 registers/line) each iteration to |
1099 | // reduce the amount of logcat error messages printed. Each Printf() will |
1100 | // result in a new logcat line, irrespective of whether a newline is present, |
1101 | // and so we wish to reduce the number of Printf() calls we have to make. |
1102 | #if defined(__aarch64__) |
1103 | Printf(" x0 %016llx x1 %016llx x2 %016llx x3 %016llx\n" , |
1104 | frame[0], frame[1], frame[2], frame[3]); |
1105 | #elif SANITIZER_RISCV64 |
1106 | Printf(" sp %016llx x1 %016llx x2 %016llx x3 %016llx\n" , |
1107 | reinterpret_cast<const u8 *>(frame) + 256, frame[1], frame[2], |
1108 | frame[3]); |
1109 | #endif |
1110 | Printf(format: " x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n" , |
1111 | frame[4], frame[5], frame[6], frame[7]); |
1112 | Printf(format: " x8 %016llx x9 %016llx x10 %016llx x11 %016llx\n" , |
1113 | frame[8], frame[9], frame[10], frame[11]); |
1114 | Printf(format: " x12 %016llx x13 %016llx x14 %016llx x15 %016llx\n" , |
1115 | frame[12], frame[13], frame[14], frame[15]); |
1116 | Printf(format: " x16 %016llx x17 %016llx x18 %016llx x19 %016llx\n" , |
1117 | frame[16], frame[17], frame[18], frame[19]); |
1118 | Printf(format: " x20 %016llx x21 %016llx x22 %016llx x23 %016llx\n" , |
1119 | frame[20], frame[21], frame[22], frame[23]); |
1120 | Printf(format: " x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n" , |
1121 | frame[24], frame[25], frame[26], frame[27]); |
1122 | // hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch |
1123 | // passes it to this function. |
1124 | #if defined(__aarch64__) |
1125 | Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n" , frame[28], |
1126 | frame[29], frame[30], reinterpret_cast<const u8 *>(frame) + 256); |
1127 | #elif SANITIZER_RISCV64 |
1128 | Printf(" x28 %016llx x29 %016llx x30 %016llx x31 %016llx\n" , frame[28], |
1129 | frame[29], frame[30], frame[31]); |
1130 | #else |
1131 | #endif |
1132 | } |
1133 | |
1134 | } // namespace __hwasan |
1135 | |
1136 | void __hwasan_set_error_report_callback(void (*callback)(const char *)) { |
1137 | __hwasan::ScopedReport::SetErrorReportCallback(callback); |
1138 | } |
1139 | |