1//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is shared between run-time libraries of sanitizers.
10//
11// It declares common functions and classes that are used in both runtimes.
12// Implementation of some functions are provided in sanitizer_common, while
13// others must be defined by run-time library itself.
14//===----------------------------------------------------------------------===//
15#ifndef SANITIZER_COMMON_H
16#define SANITIZER_COMMON_H
17
18#include "sanitizer_flags.h"
19#include "sanitizer_internal_defs.h"
20#include "sanitizer_libc.h"
21#include "sanitizer_list.h"
22#include "sanitizer_mutex.h"
23
24#if defined(_MSC_VER) && !defined(__clang__)
25extern "C" void _ReadWriteBarrier();
26#pragma intrinsic(_ReadWriteBarrier)
27#endif
28
29namespace __sanitizer {
30
31struct AddressInfo;
32struct BufferedStackTrace;
33struct SignalContext;
34struct StackTrace;
35
36// Constants.
37const uptr kWordSize = SANITIZER_WORDSIZE / 8;
38const uptr kWordSizeInBits = 8 * kWordSize;
39
40const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
41
42const uptr kMaxPathLength = 4096;
43
44const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
45
46const uptr kErrorMessageBufferSize = 1 << 16;
47
48// Denotes fake PC values that come from JIT/JAVA/etc.
49// For such PC values __tsan_symbolize_external_ex() will be called.
50const u64 kExternalPCBit = 1ULL << 60;
51
52extern const char *SanitizerToolName; // Can be changed by the tool.
53
54extern atomic_uint32_t current_verbosity;
55inline void SetVerbosity(int verbosity) {
56 atomic_store(a: &current_verbosity, v: verbosity, mo: memory_order_relaxed);
57}
58inline int Verbosity() {
59 return atomic_load(a: &current_verbosity, mo: memory_order_relaxed);
60}
61
62#if SANITIZER_ANDROID
63inline uptr GetPageSize() {
64// Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
65 return 4096;
66}
67inline uptr GetPageSizeCached() {
68 return 4096;
69}
70#else
71uptr GetPageSize();
72extern uptr PageSizeCached;
73inline uptr GetPageSizeCached() {
74 if (!PageSizeCached)
75 PageSizeCached = GetPageSize();
76 return PageSizeCached;
77}
78#endif
79uptr GetMmapGranularity();
80uptr GetMaxVirtualAddress();
81uptr GetMaxUserVirtualAddress();
82// Threads
83tid_t GetTid();
84int TgKill(pid_t pid, tid_t tid, int sig);
85uptr GetThreadSelf();
86void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
87 uptr *stack_bottom);
88void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
89 uptr *tls_addr, uptr *tls_size);
90
91// Memory management
92void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
93inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
94 return MmapOrDie(size, mem_type, /*raw_report*/ raw_report: true);
95}
96void UnmapOrDie(void *addr, uptr size);
97// Behaves just like MmapOrDie, but tolerates out of memory condition, in that
98// case returns nullptr.
99void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
100bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
101 WARN_UNUSED_RESULT;
102bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size,
103 const char *name = nullptr) WARN_UNUSED_RESULT;
104void *MmapNoReserveOrDie(uptr size, const char *mem_type);
105void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
106// Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
107// that case returns nullptr.
108void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
109 const char *name = nullptr);
110void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
111void *MmapNoAccess(uptr size);
112// Map aligned chunk of address space; size and alignment are powers of two.
113// Dies on all but out of memory errors, in the latter case returns nullptr.
114void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
115 const char *mem_type);
116// Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
117// unaccessible memory.
118bool MprotectNoAccess(uptr addr, uptr size);
119bool MprotectReadOnly(uptr addr, uptr size);
120
121void MprotectMallocZones(void *addr, int prot);
122
123#if SANITIZER_WINDOWS
124// Zero previously mmap'd memory. Currently used only on Windows.
125bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) WARN_UNUSED_RESULT;
126#endif
127
128#if SANITIZER_LINUX
129// Unmap memory. Currently only used on Linux.
130void UnmapFromTo(uptr from, uptr to);
131#endif
132
133// Maps shadow_size_bytes of shadow memory and returns shadow address. It will
134// be aligned to the mmap granularity * 2^shadow_scale, or to
135// 2^min_shadow_base_alignment if that is larger. The returned address will
136// have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
137// shadow_size_bytes bytes on the right, which on linux is mapped no access.
138// The high_mem_end may be updated if the original shadow size doesn't fit.
139uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
140 uptr min_shadow_base_alignment, uptr &high_mem_end);
141
142// Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
143// Reserves 2*S bytes of address space to the right of the returned address and
144// ring_buffer_size bytes to the left. The returned address is aligned to 2*S.
145// Also creates num_aliases regions of accessible memory starting at offset S
146// from the returned address. Each region has size alias_size and is backed by
147// the same physical memory.
148uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
149 uptr num_aliases, uptr ring_buffer_size);
150
151// Reserve memory range [beg, end]. If madvise_shadow is true then apply
152// madvise (e.g. hugepages, core dumping) requested by options.
153void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
154 bool madvise_shadow = true);
155
156// Protect size bytes of memory starting at addr. Also try to protect
157// several pages at the start of the address space as specified by
158// zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
159void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
160 uptr zero_base_max_shadow_start);
161
162// Find an available address space.
163uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
164 uptr *largest_gap_found, uptr *max_occupied_addr);
165
166// Used to check if we can map shadow memory to a fixed location.
167bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
168// Releases memory pages entirely within the [beg, end] address range. Noop if
169// the provided range does not contain at least one entire page.
170void ReleaseMemoryPagesToOS(uptr beg, uptr end);
171void IncreaseTotalMmap(uptr size);
172void DecreaseTotalMmap(uptr size);
173uptr GetRSS();
174void SetShadowRegionHugePageMode(uptr addr, uptr length);
175bool DontDumpShadowMemory(uptr addr, uptr length);
176// Check if the built VMA size matches the runtime one.
177void CheckVMASize();
178void RunMallocHooks(void *ptr, uptr size);
179void RunFreeHooks(void *ptr);
180
181class ReservedAddressRange {
182 public:
183 uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
184 uptr InitAligned(uptr size, uptr align, const char *name = nullptr);
185 uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
186 uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
187 void Unmap(uptr addr, uptr size);
188 void *base() const { return base_; }
189 uptr size() const { return size_; }
190
191 private:
192 void* base_;
193 uptr size_;
194 const char* name_;
195 uptr os_handle_;
196};
197
198typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
199 /*out*/ uptr *stats);
200
201// Parse the contents of /proc/self/smaps and generate a memory profile.
202// |cb| is a tool-specific callback that fills the |stats| array.
203void GetMemoryProfile(fill_profile_f cb, uptr *stats);
204void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
205 uptr smaps_len);
206
207// Simple low-level (mmap-based) allocator for internal use. Doesn't have
208// constructor, so all instances of LowLevelAllocator should be
209// linker initialized.
210class LowLevelAllocator {
211 public:
212 // Requires an external lock.
213 void *Allocate(uptr size);
214
215 private:
216 char *allocated_end_;
217 char *allocated_current_;
218};
219// Set the min alignment of LowLevelAllocator to at least alignment.
220void SetLowLevelAllocateMinAlignment(uptr alignment);
221typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
222// Allows to register tool-specific callbacks for LowLevelAllocator.
223// Passing NULL removes the callback.
224void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
225
226// IO
227void CatastrophicErrorWrite(const char *buffer, uptr length);
228void RawWrite(const char *buffer);
229bool ColorizeReports();
230void RemoveANSIEscapeSequencesFromString(char *buffer);
231void Printf(const char *format, ...) FORMAT(1, 2);
232void Report(const char *format, ...) FORMAT(1, 2);
233void SetPrintfAndReportCallback(void (*callback)(const char *));
234#define VReport(level, ...) \
235 do { \
236 if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
237 } while (0)
238#define VPrintf(level, ...) \
239 do { \
240 if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
241 } while (0)
242
243// Lock sanitizer error reporting and protects against nested errors.
244class ScopedErrorReportLock {
245 public:
246 ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
247 ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
248
249 static void Lock() SANITIZER_ACQUIRE(mutex_);
250 static void Unlock() SANITIZER_RELEASE(mutex_);
251 static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
252
253 private:
254 static atomic_uintptr_t reporting_thread_;
255 static StaticSpinMutex mutex_;
256};
257
258extern uptr stoptheworld_tracer_pid;
259extern uptr stoptheworld_tracer_ppid;
260
261bool IsAccessibleMemoryRange(uptr beg, uptr size);
262
263// Error report formatting.
264const char *StripPathPrefix(const char *filepath,
265 const char *strip_file_prefix);
266// Strip the directories from the module name.
267const char *StripModuleName(const char *module);
268
269// OS
270uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
271uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
272uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);
273uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
274const char *GetProcessName();
275void UpdateProcessName();
276void CacheBinaryName();
277void DisableCoreDumperIfNecessary();
278void DumpProcessMap();
279const char *GetEnv(const char *name);
280bool SetEnv(const char *name, const char *value);
281
282u32 GetUid();
283void ReExec();
284void CheckASLR();
285void CheckMPROTECT();
286char **GetArgv();
287char **GetEnviron();
288void PrintCmdline();
289bool StackSizeIsUnlimited();
290void SetStackSizeLimitInBytes(uptr limit);
291bool AddressSpaceIsUnlimited();
292void SetAddressSpaceUnlimited();
293void AdjustStackSize(void *attr);
294void PlatformPrepareForSandboxing(void *args);
295void SetSandboxingCallback(void (*f)());
296
297void InitializeCoverage(bool enabled, const char *coverage_dir);
298
299void InitTlsSize();
300uptr GetTlsSize();
301
302// Other
303void WaitForDebugger(unsigned seconds, const char *label);
304void SleepForSeconds(unsigned seconds);
305void SleepForMillis(unsigned millis);
306u64 NanoTime();
307u64 MonotonicNanoTime();
308int Atexit(void (*function)(void));
309bool TemplateMatch(const char *templ, const char *str);
310
311// Exit
312void NORETURN Abort();
313void NORETURN Die();
314void NORETURN
315CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
316void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
317 const char *mmap_type, error_t err,
318 bool raw_report = false);
319void NORETURN ReportMunmapFailureAndDie(void *ptr, uptr size, error_t err,
320 bool raw_report = false);
321
322// Returns true if the platform-specific error reported is an OOM error.
323bool ErrorIsOOM(error_t err);
324
325// This reports an error in the form:
326//
327// `ERROR: {{SanitizerToolName}}: out of memory: {{err_msg}}`
328//
329// Downstream tools that read sanitizer output will know that errors starting
330// in this format are specifically OOM errors.
331#define ERROR_OOM(err_msg, ...) \
332 Report("ERROR: %s: out of memory: " err_msg, SanitizerToolName, __VA_ARGS__)
333
334// Specific tools may override behavior of "Die" function to do tool-specific
335// job.
336typedef void (*DieCallbackType)(void);
337
338// It's possible to add several callbacks that would be run when "Die" is
339// called. The callbacks will be run in the opposite order. The tools are
340// strongly recommended to setup all callbacks during initialization, when there
341// is only a single thread.
342bool AddDieCallback(DieCallbackType callback);
343bool RemoveDieCallback(DieCallbackType callback);
344
345void SetUserDieCallback(DieCallbackType callback);
346
347void SetCheckUnwindCallback(void (*callback)());
348
349// Functions related to signal handling.
350typedef void (*SignalHandlerType)(int, void *, void *);
351HandleSignalMode GetHandleSignalMode(int signum);
352void InstallDeadlySignalHandlers(SignalHandlerType handler);
353
354// Signal reporting.
355// Each sanitizer uses slightly different implementation of stack unwinding.
356typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
357 const void *callback_context,
358 BufferedStackTrace *stack);
359// Print deadly signal report and die.
360void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
361 UnwindSignalStackCallbackType unwind,
362 const void *unwind_context);
363
364// Part of HandleDeadlySignal, exposed for asan.
365void StartReportDeadlySignal();
366// Part of HandleDeadlySignal, exposed for asan.
367void ReportDeadlySignal(const SignalContext &sig, u32 tid,
368 UnwindSignalStackCallbackType unwind,
369 const void *unwind_context);
370
371// Alternative signal stack (POSIX-only).
372void SetAlternateSignalStack();
373void UnsetAlternateSignalStack();
374
375// Construct a one-line string:
376// SUMMARY: SanitizerToolName: error_message
377// and pass it to __sanitizer_report_error_summary.
378// If alt_tool_name is provided, it's used in place of SanitizerToolName.
379void ReportErrorSummary(const char *error_message,
380 const char *alt_tool_name = nullptr);
381// Same as above, but construct error_message as:
382// error_type file:line[:column][ function]
383void ReportErrorSummary(const char *error_type, const AddressInfo &info,
384 const char *alt_tool_name = nullptr);
385// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
386void ReportErrorSummary(const char *error_type, const StackTrace *trace,
387 const char *alt_tool_name = nullptr);
388
389void ReportMmapWriteExec(int prot, int mflags);
390
391// Math
392#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
393extern "C" {
394unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
395unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
396#if defined(_WIN64)
397unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);
398unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
399#endif
400}
401#endif
402
403inline uptr MostSignificantSetBitIndex(uptr x) {
404 CHECK_NE(x, 0U);
405 unsigned long up;
406#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
407# ifdef _WIN64
408 up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
409# else
410 up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
411# endif
412#elif defined(_WIN64)
413 _BitScanReverse64(&up, x);
414#else
415 _BitScanReverse(&up, x);
416#endif
417 return up;
418}
419
420inline uptr LeastSignificantSetBitIndex(uptr x) {
421 CHECK_NE(x, 0U);
422 unsigned long up;
423#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
424# ifdef _WIN64
425 up = __builtin_ctzll(x);
426# else
427 up = __builtin_ctzl(x);
428# endif
429#elif defined(_WIN64)
430 _BitScanForward64(&up, x);
431#else
432 _BitScanForward(&up, x);
433#endif
434 return up;
435}
436
437inline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; }
438
439inline uptr RoundUpToPowerOfTwo(uptr size) {
440 CHECK(size);
441 if (IsPowerOfTwo(x: size)) return size;
442
443 uptr up = MostSignificantSetBitIndex(x: size);
444 CHECK_LT(size, (1ULL << (up + 1)));
445 CHECK_GT(size, (1ULL << up));
446 return 1ULL << (up + 1);
447}
448
449inline constexpr uptr RoundUpTo(uptr size, uptr boundary) {
450 RAW_CHECK(IsPowerOfTwo(boundary));
451 return (size + boundary - 1) & ~(boundary - 1);
452}
453
454inline constexpr uptr RoundDownTo(uptr x, uptr boundary) {
455 return x & ~(boundary - 1);
456}
457
458inline constexpr bool IsAligned(uptr a, uptr alignment) {
459 return (a & (alignment - 1)) == 0;
460}
461
462inline uptr Log2(uptr x) {
463 CHECK(IsPowerOfTwo(x));
464 return LeastSignificantSetBitIndex(x);
465}
466
467// Don't use std::min, std::max or std::swap, to minimize dependency
468// on libstdc++.
469template <class T>
470constexpr T Min(T a, T b) {
471 return a < b ? a : b;
472}
473template <class T>
474constexpr T Max(T a, T b) {
475 return a > b ? a : b;
476}
477template <class T>
478constexpr T Abs(T a) {
479 return a < 0 ? -a : a;
480}
481template<class T> void Swap(T& a, T& b) {
482 T tmp = a;
483 a = b;
484 b = tmp;
485}
486
487// Char handling
488inline bool IsSpace(int c) {
489 return (c == ' ') || (c == '\n') || (c == '\t') ||
490 (c == '\f') || (c == '\r') || (c == '\v');
491}
492inline bool IsDigit(int c) {
493 return (c >= '0') && (c <= '9');
494}
495inline int ToLower(int c) {
496 return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
497}
498
499// A low-level vector based on mmap. May incur a significant memory overhead for
500// small vectors.
501// WARNING: The current implementation supports only POD types.
502template<typename T>
503class InternalMmapVectorNoCtor {
504 public:
505 using value_type = T;
506 void Initialize(uptr initial_capacity) {
507 capacity_bytes_ = 0;
508 size_ = 0;
509 data_ = 0;
510 reserve(new_size: initial_capacity);
511 }
512 void Destroy() { UnmapOrDie(data_, capacity_bytes_); }
513 T &operator[](uptr i) {
514 CHECK_LT(i, size_);
515 return data_[i];
516 }
517 const T &operator[](uptr i) const {
518 CHECK_LT(i, size_);
519 return data_[i];
520 }
521 void push_back(const T &element) {
522 CHECK_LE(size_, capacity());
523 if (size_ == capacity()) {
524 uptr new_capacity = RoundUpToPowerOfTwo(size: size_ + 1);
525 Realloc(new_capacity);
526 }
527 internal_memcpy(&data_[size_++], &element, sizeof(T));
528 }
529 T &back() {
530 CHECK_GT(size_, 0);
531 return data_[size_ - 1];
532 }
533 void pop_back() {
534 CHECK_GT(size_, 0);
535 size_--;
536 }
537 uptr size() const {
538 return size_;
539 }
540 const T *data() const {
541 return data_;
542 }
543 T *data() {
544 return data_;
545 }
546 uptr capacity() const { return capacity_bytes_ / sizeof(T); }
547 void reserve(uptr new_size) {
548 // Never downsize internal buffer.
549 if (new_size > capacity())
550 Realloc(new_capacity: new_size);
551 }
552 void resize(uptr new_size) {
553 if (new_size > size_) {
554 reserve(new_size);
555 internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
556 }
557 size_ = new_size;
558 }
559
560 void clear() { size_ = 0; }
561 bool empty() const { return size() == 0; }
562
563 const T *begin() const {
564 return data();
565 }
566 T *begin() {
567 return data();
568 }
569 const T *end() const {
570 return data() + size();
571 }
572 T *end() {
573 return data() + size();
574 }
575
576 void swap(InternalMmapVectorNoCtor &other) {
577 Swap(data_, other.data_);
578 Swap(capacity_bytes_, other.capacity_bytes_);
579 Swap(size_, other.size_);
580 }
581
582 private:
583 void Realloc(uptr new_capacity) {
584 CHECK_GT(new_capacity, 0);
585 CHECK_LE(size_, new_capacity);
586 uptr new_capacity_bytes =
587 RoundUpTo(size: new_capacity * sizeof(T), boundary: GetPageSizeCached());
588 T *new_data = (T *)MmapOrDie(size: new_capacity_bytes, mem_type: "InternalMmapVector");
589 internal_memcpy(new_data, data_, size_ * sizeof(T));
590 UnmapOrDie(data_, capacity_bytes_);
591 data_ = new_data;
592 capacity_bytes_ = new_capacity_bytes;
593 }
594
595 T *data_;
596 uptr capacity_bytes_;
597 uptr size_;
598};
599
600template <typename T>
601bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
602 const InternalMmapVectorNoCtor<T> &rhs) {
603 if (lhs.size() != rhs.size()) return false;
604 return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
605}
606
607template <typename T>
608bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
609 const InternalMmapVectorNoCtor<T> &rhs) {
610 return !(lhs == rhs);
611}
612
613template<typename T>
614class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
615 public:
616 InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }
617 explicit InternalMmapVector(uptr cnt) {
618 InternalMmapVectorNoCtor<T>::Initialize(cnt);
619 this->resize(cnt);
620 }
621 ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
622 // Disallow copies and moves.
623 InternalMmapVector(const InternalMmapVector &) = delete;
624 InternalMmapVector &operator=(const InternalMmapVector &) = delete;
625 InternalMmapVector(InternalMmapVector &&) = delete;
626 InternalMmapVector &operator=(InternalMmapVector &&) = delete;
627};
628
629class InternalScopedString {
630 public:
631 InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; }
632
633 uptr length() const { return buffer_.size() - 1; }
634 void clear() {
635 buffer_.resize(new_size: 1);
636 buffer_[0] = '\0';
637 }
638 void append(const char *format, ...) FORMAT(2, 3);
639 const char *data() const { return buffer_.data(); }
640 char *data() { return buffer_.data(); }
641
642 private:
643 InternalMmapVector<char> buffer_;
644};
645
646template <class T>
647struct CompareLess {
648 bool operator()(const T &a, const T &b) const { return a < b; }
649};
650
651// HeapSort for arrays and InternalMmapVector.
652template <class T, class Compare = CompareLess<T>>
653void Sort(T *v, uptr size, Compare comp = {}) {
654 if (size < 2)
655 return;
656 // Stage 1: insert elements to the heap.
657 for (uptr i = 1; i < size; i++) {
658 uptr j, p;
659 for (j = i; j > 0; j = p) {
660 p = (j - 1) / 2;
661 if (comp(v[p], v[j]))
662 Swap(v[j], v[p]);
663 else
664 break;
665 }
666 }
667 // Stage 2: swap largest element with the last one,
668 // and sink the new top.
669 for (uptr i = size - 1; i > 0; i--) {
670 Swap(v[0], v[i]);
671 uptr j, max_ind;
672 for (j = 0; j < i; j = max_ind) {
673 uptr left = 2 * j + 1;
674 uptr right = 2 * j + 2;
675 max_ind = j;
676 if (left < i && comp(v[max_ind], v[left]))
677 max_ind = left;
678 if (right < i && comp(v[max_ind], v[right]))
679 max_ind = right;
680 if (max_ind != j)
681 Swap(v[j], v[max_ind]);
682 else
683 break;
684 }
685 }
686}
687
688// Works like std::lower_bound: finds the first element that is not less
689// than the val.
690template <class Container, class T,
691 class Compare = CompareLess<typename Container::value_type>>
692uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) {
693 uptr first = 0;
694 uptr last = v.size();
695 while (last > first) {
696 uptr mid = (first + last) / 2;
697 if (comp(v[mid], val))
698 first = mid + 1;
699 else
700 last = mid;
701 }
702 return first;
703}
704
705enum ModuleArch {
706 kModuleArchUnknown,
707 kModuleArchI386,
708 kModuleArchX86_64,
709 kModuleArchX86_64H,
710 kModuleArchARMV6,
711 kModuleArchARMV7,
712 kModuleArchARMV7S,
713 kModuleArchARMV7K,
714 kModuleArchARM64,
715 kModuleArchLoongArch64,
716 kModuleArchRISCV64,
717 kModuleArchHexagon
718};
719
720// Sorts and removes duplicates from the container.
721template <class Container,
722 class Compare = CompareLess<typename Container::value_type>>
723void SortAndDedup(Container &v, Compare comp = {}) {
724 Sort(v.data(), v.size(), comp);
725 uptr size = v.size();
726 if (size < 2)
727 return;
728 uptr last = 0;
729 for (uptr i = 1; i < size; ++i) {
730 if (comp(v[last], v[i])) {
731 ++last;
732 if (last != i)
733 v[last] = v[i];
734 } else {
735 CHECK(!comp(v[i], v[last]));
736 }
737 }
738 v.resize(last + 1);
739}
740
741constexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28);
742
743// Opens the file 'file_name" and reads up to 'max_len' bytes.
744// The resulting buffer is mmaped and stored in '*buff'.
745// Returns true if file was successfully opened and read.
746bool ReadFileToVector(const char *file_name,
747 InternalMmapVectorNoCtor<char> *buff,
748 uptr max_len = kDefaultFileMaxSize,
749 error_t *errno_p = nullptr);
750
751// Opens the file 'file_name" and reads up to 'max_len' bytes.
752// This function is less I/O efficient than ReadFileToVector as it may reread
753// file multiple times to avoid mmap during read attempts. It's used to read
754// procmap, so short reads with mmap in between can produce inconsistent result.
755// The resulting buffer is mmaped and stored in '*buff'.
756// The size of the mmaped region is stored in '*buff_size'.
757// The total number of read bytes is stored in '*read_len'.
758// Returns true if file was successfully opened and read.
759bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
760 uptr *read_len, uptr max_len = kDefaultFileMaxSize,
761 error_t *errno_p = nullptr);
762
763int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len,
764 uptr *pc_offset);
765
766// When adding a new architecture, don't forget to also update
767// script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
768inline const char *ModuleArchToString(ModuleArch arch) {
769 switch (arch) {
770 case kModuleArchUnknown:
771 return "";
772 case kModuleArchI386:
773 return "i386";
774 case kModuleArchX86_64:
775 return "x86_64";
776 case kModuleArchX86_64H:
777 return "x86_64h";
778 case kModuleArchARMV6:
779 return "armv6";
780 case kModuleArchARMV7:
781 return "armv7";
782 case kModuleArchARMV7S:
783 return "armv7s";
784 case kModuleArchARMV7K:
785 return "armv7k";
786 case kModuleArchARM64:
787 return "arm64";
788 case kModuleArchLoongArch64:
789 return "loongarch64";
790 case kModuleArchRISCV64:
791 return "riscv64";
792 case kModuleArchHexagon:
793 return "hexagon";
794 }
795 CHECK(0 && "Invalid module arch");
796 return "";
797}
798
799const uptr kModuleUUIDSize = 32;
800const uptr kMaxSegName = 16;
801
802// Represents a binary loaded into virtual memory (e.g. this can be an
803// executable or a shared object).
804class LoadedModule {
805 public:
806 LoadedModule()
807 : full_name_(nullptr),
808 base_address_(0),
809 max_address_(0),
810 arch_(kModuleArchUnknown),
811 uuid_size_(0),
812 instrumented_(false) {
813 internal_memset(s: uuid_, c: 0, n: kModuleUUIDSize);
814 ranges_.clear();
815 }
816 void set(const char *module_name, uptr base_address);
817 void set(const char *module_name, uptr base_address, ModuleArch arch,
818 u8 uuid[kModuleUUIDSize], bool instrumented);
819 void setUuid(const char *uuid, uptr size);
820 void clear();
821 void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
822 const char *name = nullptr);
823 bool containsAddress(uptr address) const;
824
825 const char *full_name() const { return full_name_; }
826 uptr base_address() const { return base_address_; }
827 uptr max_address() const { return max_address_; }
828 ModuleArch arch() const { return arch_; }
829 const u8 *uuid() const { return uuid_; }
830 uptr uuid_size() const { return uuid_size_; }
831 bool instrumented() const { return instrumented_; }
832
833 struct AddressRange {
834 AddressRange *next;
835 uptr beg;
836 uptr end;
837 bool executable;
838 bool writable;
839 char name[kMaxSegName];
840
841 AddressRange(uptr beg, uptr end, bool executable, bool writable,
842 const char *name)
843 : next(nullptr),
844 beg(beg),
845 end(end),
846 executable(executable),
847 writable(writable) {
848 internal_strncpy(dst: this->name, src: (name ? name : ""), ARRAY_SIZE(this->name));
849 }
850 };
851
852 const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
853
854 private:
855 char *full_name_; // Owned.
856 uptr base_address_;
857 uptr max_address_;
858 ModuleArch arch_;
859 uptr uuid_size_;
860 u8 uuid_[kModuleUUIDSize];
861 bool instrumented_;
862 IntrusiveList<AddressRange> ranges_;
863};
864
865// List of LoadedModules. OS-dependent implementation is responsible for
866// filling this information.
867class ListOfModules {
868 public:
869 ListOfModules() : initialized(false) {}
870 ~ListOfModules() { clear(); }
871 void init();
872 void fallbackInit(); // Uses fallback init if available, otherwise clears
873 const LoadedModule *begin() const { return modules_.begin(); }
874 LoadedModule *begin() { return modules_.begin(); }
875 const LoadedModule *end() const { return modules_.end(); }
876 LoadedModule *end() { return modules_.end(); }
877 uptr size() const { return modules_.size(); }
878 const LoadedModule &operator[](uptr i) const {
879 CHECK_LT(i, modules_.size());
880 return modules_[i];
881 }
882
883 private:
884 void clear() {
885 for (auto &module : modules_) module.clear();
886 modules_.clear();
887 }
888 void clearOrInit() {
889 initialized ? clear() : modules_.Initialize(initial_capacity: kInitialCapacity);
890 initialized = true;
891 }
892
893 InternalMmapVectorNoCtor<LoadedModule> modules_;
894 // We rarely have more than 16K loaded modules.
895 static const uptr kInitialCapacity = 1 << 14;
896 bool initialized;
897};
898
899// Callback type for iterating over a set of memory ranges.
900typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
901
902enum AndroidApiLevel {
903 ANDROID_NOT_ANDROID = 0,
904 ANDROID_KITKAT = 19,
905 ANDROID_LOLLIPOP_MR1 = 22,
906 ANDROID_POST_LOLLIPOP = 23
907};
908
909void WriteToSyslog(const char *buffer);
910
911#if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
912#define SANITIZER_WIN_TRACE 1
913#else
914#define SANITIZER_WIN_TRACE 0
915#endif
916
917#if SANITIZER_APPLE || SANITIZER_WIN_TRACE
918void LogFullErrorReport(const char *buffer);
919#else
920inline void LogFullErrorReport(const char *buffer) {}
921#endif
922
923#if SANITIZER_LINUX || SANITIZER_APPLE
924void WriteOneLineToSyslog(const char *s);
925void LogMessageOnPrintf(const char *str);
926#else
927inline void WriteOneLineToSyslog(const char *s) {}
928inline void LogMessageOnPrintf(const char *str) {}
929#endif
930
931#if SANITIZER_LINUX || SANITIZER_WIN_TRACE
932// Initialize Android logging. Any writes before this are silently lost.
933void AndroidLogInit();
934void SetAbortMessage(const char *);
935#else
936inline void AndroidLogInit() {}
937// FIXME: MacOS implementation could use CRSetCrashLogMessage.
938inline void SetAbortMessage(const char *) {}
939#endif
940
941#if SANITIZER_ANDROID
942void SanitizerInitializeUnwinder();
943AndroidApiLevel AndroidGetApiLevel();
944#else
945inline void AndroidLogWrite(const char *buffer_unused) {}
946inline void SanitizerInitializeUnwinder() {}
947inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
948#endif
949
950inline uptr GetPthreadDestructorIterations() {
951#if SANITIZER_ANDROID
952 return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
953#elif SANITIZER_POSIX
954 return 4;
955#else
956// Unused on Windows.
957 return 0;
958#endif
959}
960
961void *internal_start_thread(void *(*func)(void*), void *arg);
962void internal_join_thread(void *th);
963void MaybeStartBackgroudThread();
964
965// Make the compiler think that something is going on there.
966// Use this inside a loop that looks like memset/memcpy/etc to prevent the
967// compiler from recognising it and turning it into an actual call to
968// memset/memcpy/etc.
969static inline void SanitizerBreakOptimization(void *arg) {
970#if defined(_MSC_VER) && !defined(__clang__)
971 _ReadWriteBarrier();
972#else
973 __asm__ __volatile__("" : : "r" (arg) : "memory");
974#endif
975}
976
977struct SignalContext {
978 void *siginfo;
979 void *context;
980 uptr addr;
981 uptr pc;
982 uptr sp;
983 uptr bp;
984 bool is_memory_access;
985 enum WriteFlag { Unknown, Read, Write } write_flag;
986
987 // In some cases the kernel cannot provide the true faulting address; `addr`
988 // will be zero then. This field allows to distinguish between these cases
989 // and dereferences of null.
990 bool is_true_faulting_addr;
991
992 // VS2013 doesn't implement unrestricted unions, so we need a trivial default
993 // constructor
994 SignalContext() = default;
995
996 // Creates signal context in a platform-specific manner.
997 // SignalContext is going to keep pointers to siginfo and context without
998 // owning them.
999 SignalContext(void *siginfo, void *context)
1000 : siginfo(siginfo),
1001 context(context),
1002 addr(GetAddress()),
1003 is_memory_access(IsMemoryAccess()),
1004 write_flag(GetWriteFlag()),
1005 is_true_faulting_addr(IsTrueFaultingAddress()) {
1006 InitPcSpBp();
1007 }
1008
1009 static void DumpAllRegisters(void *context);
1010
1011 // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
1012 int GetType() const;
1013
1014 // String description of the signal.
1015 const char *Describe() const;
1016
1017 // Returns true if signal is stack overflow.
1018 bool IsStackOverflow() const;
1019
1020 private:
1021 // Platform specific initialization.
1022 void InitPcSpBp();
1023 uptr GetAddress() const;
1024 WriteFlag GetWriteFlag() const;
1025 bool IsMemoryAccess() const;
1026 bool IsTrueFaultingAddress() const;
1027};
1028
1029void InitializePlatformEarly();
1030
1031template <typename Fn>
1032class RunOnDestruction {
1033 public:
1034 explicit RunOnDestruction(Fn fn) : fn_(fn) {}
1035 ~RunOnDestruction() { fn_(); }
1036
1037 private:
1038 Fn fn_;
1039};
1040
1041// A simple scope guard. Usage:
1042// auto cleanup = at_scope_exit([]{ do_cleanup; });
1043template <typename Fn>
1044RunOnDestruction<Fn> at_scope_exit(Fn fn) {
1045 return RunOnDestruction<Fn>(fn);
1046}
1047
1048// Linux on 64-bit s390 had a nasty bug that crashes the whole machine
1049// if a process uses virtual memory over 4TB (as many sanitizers like
1050// to do). This function will abort the process if running on a kernel
1051// that looks vulnerable.
1052#if SANITIZER_LINUX && SANITIZER_S390_64
1053void AvoidCVE_2016_2143();
1054#else
1055inline void AvoidCVE_2016_2143() {}
1056#endif
1057
1058struct StackDepotStats {
1059 uptr n_uniq_ids;
1060 uptr allocated;
1061};
1062
1063// The default value for allocator_release_to_os_interval_ms common flag to
1064// indicate that sanitizer allocator should not attempt to release memory to OS.
1065const s32 kReleaseToOSIntervalNever = -1;
1066
1067void CheckNoDeepBind(const char *filename, int flag);
1068
1069// Returns the requested amount of random data (up to 256 bytes) that can then
1070// be used to seed a PRNG. Defaults to blocking like the underlying syscall.
1071bool GetRandom(void *buffer, uptr length, bool blocking = true);
1072
1073// Returns the number of logical processors on the system.
1074u32 GetNumberOfCPUs();
1075extern u32 NumberOfCPUsCached;
1076inline u32 GetNumberOfCPUsCached() {
1077 if (!NumberOfCPUsCached)
1078 NumberOfCPUsCached = GetNumberOfCPUs();
1079 return NumberOfCPUsCached;
1080}
1081
1082template <typename T>
1083class ArrayRef {
1084 public:
1085 ArrayRef() {}
1086 ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {}
1087
1088 T *begin() { return begin_; }
1089 T *end() { return end_; }
1090
1091 private:
1092 T *begin_ = nullptr;
1093 T *end_ = nullptr;
1094};
1095
1096} // namespace __sanitizer
1097
1098inline void *operator new(__sanitizer::operator_new_size_type size,
1099 __sanitizer::LowLevelAllocator &alloc) {
1100 return alloc.Allocate(size);
1101}
1102
1103#endif // SANITIZER_COMMON_H
1104

source code of compiler-rt/lib/sanitizer_common/sanitizer_common.h