1//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is shared between run-time libraries of sanitizers.
10//
11// It declares common functions and classes that are used in both runtimes.
12// Implementation of some functions are provided in sanitizer_common, while
13// others must be defined by run-time library itself.
14//===----------------------------------------------------------------------===//
15#ifndef SANITIZER_COMMON_H
16#define SANITIZER_COMMON_H
17
18#include "sanitizer_flags.h"
19#include "sanitizer_internal_defs.h"
20#include "sanitizer_libc.h"
21#include "sanitizer_list.h"
22#include "sanitizer_mutex.h"
23
24#if defined(_MSC_VER) && !defined(__clang__)
25extern "C" void _ReadWriteBarrier();
26#pragma intrinsic(_ReadWriteBarrier)
27#endif
28
29namespace __sanitizer {
30
31struct AddressInfo;
32struct BufferedStackTrace;
33struct SignalContext;
34struct StackTrace;
35struct SymbolizedStack;
36
37// Constants.
38const uptr kWordSize = SANITIZER_WORDSIZE / 8;
39const uptr kWordSizeInBits = 8 * kWordSize;
40
41const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
42
43const uptr kMaxPathLength = 4096;
44
45const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
46
47const uptr kErrorMessageBufferSize = 1 << 16;
48
49// Denotes fake PC values that come from JIT/JAVA/etc.
50// For such PC values __tsan_symbolize_external_ex() will be called.
51const u64 kExternalPCBit = 1ULL << 60;
52
53extern const char *SanitizerToolName; // Can be changed by the tool.
54
55extern atomic_uint32_t current_verbosity;
56inline void SetVerbosity(int verbosity) {
57 atomic_store(a: &current_verbosity, v: verbosity, mo: memory_order_relaxed);
58}
59inline int Verbosity() {
60 return atomic_load(a: &current_verbosity, mo: memory_order_relaxed);
61}
62
63#if SANITIZER_ANDROID
64inline uptr GetPageSize() {
65// Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
66 return 4096;
67}
68inline uptr GetPageSizeCached() {
69 return 4096;
70}
71#else
72uptr GetPageSize();
73extern uptr PageSizeCached;
74inline uptr GetPageSizeCached() {
75 if (!PageSizeCached)
76 PageSizeCached = GetPageSize();
77 return PageSizeCached;
78}
79#endif
80uptr GetMmapGranularity();
81uptr GetMaxVirtualAddress();
82uptr GetMaxUserVirtualAddress();
83// Threads
84tid_t GetTid();
85int TgKill(pid_t pid, tid_t tid, int sig);
86uptr GetThreadSelf();
87void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
88 uptr *stack_bottom);
89void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
90 uptr *tls_addr, uptr *tls_size);
91
92// Memory management
93void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
94inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
95 return MmapOrDie(size, mem_type, /*raw_report*/ raw_report: true);
96}
97void UnmapOrDie(void *addr, uptr size, bool raw_report = false);
98// Behaves just like MmapOrDie, but tolerates out of memory condition, in that
99// case returns nullptr.
100void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
101bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
102 WARN_UNUSED_RESULT;
103bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size,
104 const char *name = nullptr) WARN_UNUSED_RESULT;
105void *MmapNoReserveOrDie(uptr size, const char *mem_type);
106void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
107// Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
108// that case returns nullptr.
109void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
110 const char *name = nullptr);
111void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
112void *MmapNoAccess(uptr size);
113// Map aligned chunk of address space; size and alignment are powers of two.
114// Dies on all but out of memory errors, in the latter case returns nullptr.
115void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
116 const char *mem_type);
117// Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
118// unaccessible memory.
119bool MprotectNoAccess(uptr addr, uptr size);
120bool MprotectReadOnly(uptr addr, uptr size);
121bool MprotectReadWrite(uptr addr, uptr size);
122
123void MprotectMallocZones(void *addr, int prot);
124
125#if SANITIZER_WINDOWS
126// Zero previously mmap'd memory. Currently used only on Windows.
127bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) WARN_UNUSED_RESULT;
128#endif
129
130#if SANITIZER_LINUX
131// Unmap memory. Currently only used on Linux.
132void UnmapFromTo(uptr from, uptr to);
133#endif
134
135// Maps shadow_size_bytes of shadow memory and returns shadow address. It will
136// be aligned to the mmap granularity * 2^shadow_scale, or to
137// 2^min_shadow_base_alignment if that is larger. The returned address will
138// have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
139// shadow_size_bytes bytes on the right, which on linux is mapped no access.
140// The high_mem_end may be updated if the original shadow size doesn't fit.
141uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
142 uptr min_shadow_base_alignment, uptr &high_mem_end);
143
144// Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
145// Reserves 2*S bytes of address space to the right of the returned address and
146// ring_buffer_size bytes to the left. The returned address is aligned to 2*S.
147// Also creates num_aliases regions of accessible memory starting at offset S
148// from the returned address. Each region has size alias_size and is backed by
149// the same physical memory.
150uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
151 uptr num_aliases, uptr ring_buffer_size);
152
153// Reserve memory range [beg, end]. If madvise_shadow is true then apply
154// madvise (e.g. hugepages, core dumping) requested by options.
155void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
156 bool madvise_shadow = true);
157
158// Protect size bytes of memory starting at addr. Also try to protect
159// several pages at the start of the address space as specified by
160// zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
161void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
162 uptr zero_base_max_shadow_start);
163
164// Find an available address space.
165uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
166 uptr *largest_gap_found, uptr *max_occupied_addr);
167
168// Used to check if we can map shadow memory to a fixed location.
169bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
170// Releases memory pages entirely within the [beg, end] address range. Noop if
171// the provided range does not contain at least one entire page.
172void ReleaseMemoryPagesToOS(uptr beg, uptr end);
173void IncreaseTotalMmap(uptr size);
174void DecreaseTotalMmap(uptr size);
175uptr GetRSS();
176void SetShadowRegionHugePageMode(uptr addr, uptr length);
177bool DontDumpShadowMemory(uptr addr, uptr length);
178// Check if the built VMA size matches the runtime one.
179void CheckVMASize();
180void RunMallocHooks(void *ptr, uptr size);
181void RunFreeHooks(void *ptr);
182
183class ReservedAddressRange {
184 public:
185 uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
186 uptr InitAligned(uptr size, uptr align, const char *name = nullptr);
187 uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
188 uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
189 void Unmap(uptr addr, uptr size);
190 void *base() const { return base_; }
191 uptr size() const { return size_; }
192
193 private:
194 void* base_;
195 uptr size_;
196 const char* name_;
197 uptr os_handle_;
198};
199
200typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
201 /*out*/ uptr *stats);
202
203// Parse the contents of /proc/self/smaps and generate a memory profile.
204// |cb| is a tool-specific callback that fills the |stats| array.
205void GetMemoryProfile(fill_profile_f cb, uptr *stats);
206void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
207 uptr smaps_len);
208
209// Simple low-level (mmap-based) allocator for internal use. Doesn't have
210// constructor, so all instances of LowLevelAllocator should be
211// linker initialized.
212//
213// NOTE: Users should instead use the singleton provided via
214// `GetGlobalLowLevelAllocator()` rather than create a new one. This way, the
215// number of mmap fragments can be reduced and use the same contiguous mmap
216// provided by this singleton.
217class LowLevelAllocator {
218 public:
219 // Requires an external lock.
220 void *Allocate(uptr size);
221
222 private:
223 char *allocated_end_;
224 char *allocated_current_;
225};
226// Set the min alignment of LowLevelAllocator to at least alignment.
227void SetLowLevelAllocateMinAlignment(uptr alignment);
228typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
229// Allows to register tool-specific callbacks for LowLevelAllocator.
230// Passing NULL removes the callback.
231void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
232
233LowLevelAllocator &GetGlobalLowLevelAllocator();
234
235// IO
236void CatastrophicErrorWrite(const char *buffer, uptr length);
237void RawWrite(const char *buffer);
238bool ColorizeReports();
239void RemoveANSIEscapeSequencesFromString(char *buffer);
240void Printf(const char *format, ...) FORMAT(1, 2);
241void Report(const char *format, ...) FORMAT(1, 2);
242void SetPrintfAndReportCallback(void (*callback)(const char *));
243#define VReport(level, ...) \
244 do { \
245 if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
246 } while (0)
247#define VPrintf(level, ...) \
248 do { \
249 if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
250 } while (0)
251
252// Lock sanitizer error reporting and protects against nested errors.
253class ScopedErrorReportLock {
254 public:
255 ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
256 ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
257
258 static void Lock() SANITIZER_ACQUIRE(mutex_);
259 static void Unlock() SANITIZER_RELEASE(mutex_);
260 static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
261
262 private:
263 static atomic_uintptr_t reporting_thread_;
264 static StaticSpinMutex mutex_;
265};
266
267extern uptr stoptheworld_tracer_pid;
268extern uptr stoptheworld_tracer_ppid;
269
270bool IsAccessibleMemoryRange(uptr beg, uptr size);
271
272// Error report formatting.
273const char *StripPathPrefix(const char *filepath,
274 const char *strip_file_prefix);
275// Strip the directories from the module name.
276const char *StripModuleName(const char *module);
277
278// OS
279uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
280uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
281uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);
282uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
283const char *GetProcessName();
284void UpdateProcessName();
285void CacheBinaryName();
286void DisableCoreDumperIfNecessary();
287void DumpProcessMap();
288const char *GetEnv(const char *name);
289bool SetEnv(const char *name, const char *value);
290
291u32 GetUid();
292void ReExec();
293void CheckASLR();
294void CheckMPROTECT();
295char **GetArgv();
296char **GetEnviron();
297void PrintCmdline();
298bool StackSizeIsUnlimited();
299void SetStackSizeLimitInBytes(uptr limit);
300bool AddressSpaceIsUnlimited();
301void SetAddressSpaceUnlimited();
302void AdjustStackSize(void *attr);
303void PlatformPrepareForSandboxing(void *args);
304void SetSandboxingCallback(void (*f)());
305
306void InitializeCoverage(bool enabled, const char *coverage_dir);
307
308void InitTlsSize();
309uptr GetTlsSize();
310
311// Other
312void WaitForDebugger(unsigned seconds, const char *label);
313void SleepForSeconds(unsigned seconds);
314void SleepForMillis(unsigned millis);
315u64 NanoTime();
316u64 MonotonicNanoTime();
317int Atexit(void (*function)(void));
318bool TemplateMatch(const char *templ, const char *str);
319
320// Exit
321void NORETURN Abort();
322void NORETURN Die();
323void NORETURN
324CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
325void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
326 const char *mmap_type, error_t err,
327 bool raw_report = false);
328void NORETURN ReportMunmapFailureAndDie(void *ptr, uptr size, error_t err,
329 bool raw_report = false);
330
331// Returns true if the platform-specific error reported is an OOM error.
332bool ErrorIsOOM(error_t err);
333
334// This reports an error in the form:
335//
336// `ERROR: {{SanitizerToolName}}: out of memory: {{err_msg}}`
337//
338// Downstream tools that read sanitizer output will know that errors starting
339// in this format are specifically OOM errors.
340#define ERROR_OOM(err_msg, ...) \
341 Report("ERROR: %s: out of memory: " err_msg, SanitizerToolName, __VA_ARGS__)
342
343// Specific tools may override behavior of "Die" function to do tool-specific
344// job.
345typedef void (*DieCallbackType)(void);
346
347// It's possible to add several callbacks that would be run when "Die" is
348// called. The callbacks will be run in the opposite order. The tools are
349// strongly recommended to setup all callbacks during initialization, when there
350// is only a single thread.
351bool AddDieCallback(DieCallbackType callback);
352bool RemoveDieCallback(DieCallbackType callback);
353
354void SetUserDieCallback(DieCallbackType callback);
355
356void SetCheckUnwindCallback(void (*callback)());
357
358// Functions related to signal handling.
359typedef void (*SignalHandlerType)(int, void *, void *);
360HandleSignalMode GetHandleSignalMode(int signum);
361void InstallDeadlySignalHandlers(SignalHandlerType handler);
362
363// Signal reporting.
364// Each sanitizer uses slightly different implementation of stack unwinding.
365typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
366 const void *callback_context,
367 BufferedStackTrace *stack);
368// Print deadly signal report and die.
369void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
370 UnwindSignalStackCallbackType unwind,
371 const void *unwind_context);
372
373// Part of HandleDeadlySignal, exposed for asan.
374void StartReportDeadlySignal();
375// Part of HandleDeadlySignal, exposed for asan.
376void ReportDeadlySignal(const SignalContext &sig, u32 tid,
377 UnwindSignalStackCallbackType unwind,
378 const void *unwind_context);
379
380// Alternative signal stack (POSIX-only).
381void SetAlternateSignalStack();
382void UnsetAlternateSignalStack();
383
384// Construct a one-line string:
385// SUMMARY: SanitizerToolName: error_message
386// and pass it to __sanitizer_report_error_summary.
387// If alt_tool_name is provided, it's used in place of SanitizerToolName.
388void ReportErrorSummary(const char *error_message,
389 const char *alt_tool_name = nullptr);
390// Same as above, but construct error_message as:
391// error_type file:line[:column][ function]
392void ReportErrorSummary(const char *error_type, const AddressInfo &info,
393 const char *alt_tool_name = nullptr);
394// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
395void ReportErrorSummary(const char *error_type, const StackTrace *trace,
396 const char *alt_tool_name = nullptr);
397// Skips frames which we consider internal and not usefull to the users.
398const SymbolizedStack *SkipInternalFrames(const SymbolizedStack *frames);
399
400void ReportMmapWriteExec(int prot, int mflags);
401
402// Math
403#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
404extern "C" {
405unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
406unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
407#if defined(_WIN64)
408unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);
409unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
410#endif
411}
412#endif
413
414inline uptr MostSignificantSetBitIndex(uptr x) {
415 CHECK_NE(x, 0U);
416 unsigned long up;
417#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
418# ifdef _WIN64
419 up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
420# else
421 up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
422# endif
423#elif defined(_WIN64)
424 _BitScanReverse64(&up, x);
425#else
426 _BitScanReverse(&up, x);
427#endif
428 return up;
429}
430
431inline uptr LeastSignificantSetBitIndex(uptr x) {
432 CHECK_NE(x, 0U);
433 unsigned long up;
434#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
435# ifdef _WIN64
436 up = __builtin_ctzll(x);
437# else
438 up = __builtin_ctzl(x);
439# endif
440#elif defined(_WIN64)
441 _BitScanForward64(&up, x);
442#else
443 _BitScanForward(&up, x);
444#endif
445 return up;
446}
447
448inline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; }
449
450inline uptr RoundUpToPowerOfTwo(uptr size) {
451 CHECK(size);
452 if (IsPowerOfTwo(x: size)) return size;
453
454 uptr up = MostSignificantSetBitIndex(x: size);
455 CHECK_LT(size, (1ULL << (up + 1)));
456 CHECK_GT(size, (1ULL << up));
457 return 1ULL << (up + 1);
458}
459
460inline constexpr uptr RoundUpTo(uptr size, uptr boundary) {
461 RAW_CHECK(IsPowerOfTwo(boundary));
462 return (size + boundary - 1) & ~(boundary - 1);
463}
464
465inline constexpr uptr RoundDownTo(uptr x, uptr boundary) {
466 return x & ~(boundary - 1);
467}
468
469inline constexpr bool IsAligned(uptr a, uptr alignment) {
470 return (a & (alignment - 1)) == 0;
471}
472
473inline uptr Log2(uptr x) {
474 CHECK(IsPowerOfTwo(x));
475 return LeastSignificantSetBitIndex(x);
476}
477
478// Don't use std::min, std::max or std::swap, to minimize dependency
479// on libstdc++.
480template <class T>
481constexpr T Min(T a, T b) {
482 return a < b ? a : b;
483}
484template <class T>
485constexpr T Max(T a, T b) {
486 return a > b ? a : b;
487}
488template <class T>
489constexpr T Abs(T a) {
490 return a < 0 ? -a : a;
491}
492template<class T> void Swap(T& a, T& b) {
493 T tmp = a;
494 a = b;
495 b = tmp;
496}
497
498// Char handling
499inline bool IsSpace(int c) {
500 return (c == ' ') || (c == '\n') || (c == '\t') ||
501 (c == '\f') || (c == '\r') || (c == '\v');
502}
503inline bool IsDigit(int c) {
504 return (c >= '0') && (c <= '9');
505}
506inline int ToLower(int c) {
507 return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
508}
509
510// A low-level vector based on mmap. May incur a significant memory overhead for
511// small vectors.
512// WARNING: The current implementation supports only POD types.
513template <typename T, bool raw_report = false>
514class InternalMmapVectorNoCtor {
515 public:
516 using value_type = T;
517 void Initialize(uptr initial_capacity) {
518 capacity_bytes_ = 0;
519 size_ = 0;
520 data_ = 0;
521 reserve(new_size: initial_capacity);
522 }
523 void Destroy() { UnmapOrDie(data_, capacity_bytes_, raw_report); }
524 T &operator[](uptr i) {
525 CHECK_LT(i, size_);
526 return data_[i];
527 }
528 const T &operator[](uptr i) const {
529 CHECK_LT(i, size_);
530 return data_[i];
531 }
532 void push_back(const T &element) {
533 if (UNLIKELY(size_ >= capacity())) {
534 CHECK_EQ(size_, capacity());
535 uptr new_capacity = RoundUpToPowerOfTwo(size: size_ + 1);
536 Realloc(new_capacity);
537 }
538 internal_memcpy(&data_[size_++], &element, sizeof(T));
539 }
540 T &back() {
541 CHECK_GT(size_, 0);
542 return data_[size_ - 1];
543 }
544 void pop_back() {
545 CHECK_GT(size_, 0);
546 size_--;
547 }
548 uptr size() const {
549 return size_;
550 }
551 const T *data() const {
552 return data_;
553 }
554 T *data() {
555 return data_;
556 }
557 uptr capacity() const { return capacity_bytes_ / sizeof(T); }
558 void reserve(uptr new_size) {
559 // Never downsize internal buffer.
560 if (new_size > capacity())
561 Realloc(new_capacity: new_size);
562 }
563 void resize(uptr new_size) {
564 if (new_size > size_) {
565 reserve(new_size);
566 internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
567 }
568 size_ = new_size;
569 }
570
571 void clear() { size_ = 0; }
572 bool empty() const { return size() == 0; }
573
574 const T *begin() const {
575 return data();
576 }
577 T *begin() {
578 return data();
579 }
580 const T *end() const {
581 return data() + size();
582 }
583 T *end() {
584 return data() + size();
585 }
586
587 void swap(InternalMmapVectorNoCtor &other) {
588 Swap(data_, other.data_);
589 Swap(capacity_bytes_, other.capacity_bytes_);
590 Swap(size_, other.size_);
591 }
592
593 private:
594 NOINLINE void Realloc(uptr new_capacity) {
595 CHECK_GT(new_capacity, 0);
596 CHECK_LE(size_, new_capacity);
597 uptr new_capacity_bytes =
598 RoundUpTo(size: new_capacity * sizeof(T), boundary: GetPageSizeCached());
599 T *new_data =
600 (T *)MmapOrDie(size: new_capacity_bytes, mem_type: "InternalMmapVector", raw_report);
601 internal_memcpy(new_data, data_, size_ * sizeof(T));
602 UnmapOrDie(data_, capacity_bytes_, raw_report);
603 data_ = new_data;
604 capacity_bytes_ = new_capacity_bytes;
605 }
606
607 T *data_;
608 uptr capacity_bytes_;
609 uptr size_;
610};
611
612template <typename T>
613bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
614 const InternalMmapVectorNoCtor<T> &rhs) {
615 if (lhs.size() != rhs.size()) return false;
616 return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
617}
618
619template <typename T>
620bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
621 const InternalMmapVectorNoCtor<T> &rhs) {
622 return !(lhs == rhs);
623}
624
625template<typename T>
626class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
627 public:
628 InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }
629 explicit InternalMmapVector(uptr cnt) {
630 InternalMmapVectorNoCtor<T>::Initialize(cnt);
631 this->resize(cnt);
632 }
633 ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
634 // Disallow copies and moves.
635 InternalMmapVector(const InternalMmapVector &) = delete;
636 InternalMmapVector &operator=(const InternalMmapVector &) = delete;
637 InternalMmapVector(InternalMmapVector &&) = delete;
638 InternalMmapVector &operator=(InternalMmapVector &&) = delete;
639};
640
641class InternalScopedString {
642 public:
643 InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; }
644
645 uptr length() const { return buffer_.size() - 1; }
646 void clear() {
647 buffer_.resize(new_size: 1);
648 buffer_[0] = '\0';
649 }
650 void Append(const char *str);
651 void AppendF(const char *format, ...) FORMAT(2, 3);
652 const char *data() const { return buffer_.data(); }
653 char *data() { return buffer_.data(); }
654
655 private:
656 InternalMmapVector<char> buffer_;
657};
658
659template <class T>
660struct CompareLess {
661 bool operator()(const T &a, const T &b) const { return a < b; }
662};
663
664// HeapSort for arrays and InternalMmapVector.
665template <class T, class Compare = CompareLess<T>>
666void Sort(T *v, uptr size, Compare comp = {}) {
667 if (size < 2)
668 return;
669 // Stage 1: insert elements to the heap.
670 for (uptr i = 1; i < size; i++) {
671 uptr j, p;
672 for (j = i; j > 0; j = p) {
673 p = (j - 1) / 2;
674 if (comp(v[p], v[j]))
675 Swap(v[j], v[p]);
676 else
677 break;
678 }
679 }
680 // Stage 2: swap largest element with the last one,
681 // and sink the new top.
682 for (uptr i = size - 1; i > 0; i--) {
683 Swap(v[0], v[i]);
684 uptr j, max_ind;
685 for (j = 0; j < i; j = max_ind) {
686 uptr left = 2 * j + 1;
687 uptr right = 2 * j + 2;
688 max_ind = j;
689 if (left < i && comp(v[max_ind], v[left]))
690 max_ind = left;
691 if (right < i && comp(v[max_ind], v[right]))
692 max_ind = right;
693 if (max_ind != j)
694 Swap(v[j], v[max_ind]);
695 else
696 break;
697 }
698 }
699}
700
701// Works like std::lower_bound: finds the first element that is not less
702// than the val.
703template <class Container, class T,
704 class Compare = CompareLess<typename Container::value_type>>
705uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) {
706 uptr first = 0;
707 uptr last = v.size();
708 while (last > first) {
709 uptr mid = (first + last) / 2;
710 if (comp(v[mid], val))
711 first = mid + 1;
712 else
713 last = mid;
714 }
715 return first;
716}
717
718enum ModuleArch {
719 kModuleArchUnknown,
720 kModuleArchI386,
721 kModuleArchX86_64,
722 kModuleArchX86_64H,
723 kModuleArchARMV6,
724 kModuleArchARMV7,
725 kModuleArchARMV7S,
726 kModuleArchARMV7K,
727 kModuleArchARM64,
728 kModuleArchLoongArch64,
729 kModuleArchRISCV64,
730 kModuleArchHexagon
731};
732
733// Sorts and removes duplicates from the container.
734template <class Container,
735 class Compare = CompareLess<typename Container::value_type>>
736void SortAndDedup(Container &v, Compare comp = {}) {
737 Sort(v.data(), v.size(), comp);
738 uptr size = v.size();
739 if (size < 2)
740 return;
741 uptr last = 0;
742 for (uptr i = 1; i < size; ++i) {
743 if (comp(v[last], v[i])) {
744 ++last;
745 if (last != i)
746 v[last] = v[i];
747 } else {
748 CHECK(!comp(v[i], v[last]));
749 }
750 }
751 v.resize(last + 1);
752}
753
754constexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28);
755
756// Opens the file 'file_name" and reads up to 'max_len' bytes.
757// The resulting buffer is mmaped and stored in '*buff'.
758// Returns true if file was successfully opened and read.
759bool ReadFileToVector(const char *file_name,
760 InternalMmapVectorNoCtor<char> *buff,
761 uptr max_len = kDefaultFileMaxSize,
762 error_t *errno_p = nullptr);
763
764// Opens the file 'file_name" and reads up to 'max_len' bytes.
765// This function is less I/O efficient than ReadFileToVector as it may reread
766// file multiple times to avoid mmap during read attempts. It's used to read
767// procmap, so short reads with mmap in between can produce inconsistent result.
768// The resulting buffer is mmaped and stored in '*buff'.
769// The size of the mmaped region is stored in '*buff_size'.
770// The total number of read bytes is stored in '*read_len'.
771// Returns true if file was successfully opened and read.
772bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
773 uptr *read_len, uptr max_len = kDefaultFileMaxSize,
774 error_t *errno_p = nullptr);
775
776int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len,
777 uptr *pc_offset);
778
779// When adding a new architecture, don't forget to also update
780// script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
781inline const char *ModuleArchToString(ModuleArch arch) {
782 switch (arch) {
783 case kModuleArchUnknown:
784 return "";
785 case kModuleArchI386:
786 return "i386";
787 case kModuleArchX86_64:
788 return "x86_64";
789 case kModuleArchX86_64H:
790 return "x86_64h";
791 case kModuleArchARMV6:
792 return "armv6";
793 case kModuleArchARMV7:
794 return "armv7";
795 case kModuleArchARMV7S:
796 return "armv7s";
797 case kModuleArchARMV7K:
798 return "armv7k";
799 case kModuleArchARM64:
800 return "arm64";
801 case kModuleArchLoongArch64:
802 return "loongarch64";
803 case kModuleArchRISCV64:
804 return "riscv64";
805 case kModuleArchHexagon:
806 return "hexagon";
807 }
808 CHECK(0 && "Invalid module arch");
809 return "";
810}
811
812#if SANITIZER_APPLE
813const uptr kModuleUUIDSize = 16;
814#else
815const uptr kModuleUUIDSize = 32;
816#endif
817const uptr kMaxSegName = 16;
818
819// Represents a binary loaded into virtual memory (e.g. this can be an
820// executable or a shared object).
821class LoadedModule {
822 public:
823 LoadedModule()
824 : full_name_(nullptr),
825 base_address_(0),
826 max_address_(0),
827 arch_(kModuleArchUnknown),
828 uuid_size_(0),
829 instrumented_(false) {
830 internal_memset(s: uuid_, c: 0, n: kModuleUUIDSize);
831 ranges_.clear();
832 }
833 void set(const char *module_name, uptr base_address);
834 void set(const char *module_name, uptr base_address, ModuleArch arch,
835 u8 uuid[kModuleUUIDSize], bool instrumented);
836 void setUuid(const char *uuid, uptr size);
837 void clear();
838 void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
839 const char *name = nullptr);
840 bool containsAddress(uptr address) const;
841
842 const char *full_name() const { return full_name_; }
843 uptr base_address() const { return base_address_; }
844 uptr max_address() const { return max_address_; }
845 ModuleArch arch() const { return arch_; }
846 const u8 *uuid() const { return uuid_; }
847 uptr uuid_size() const { return uuid_size_; }
848 bool instrumented() const { return instrumented_; }
849
850 struct AddressRange {
851 AddressRange *next;
852 uptr beg;
853 uptr end;
854 bool executable;
855 bool writable;
856 char name[kMaxSegName];
857
858 AddressRange(uptr beg, uptr end, bool executable, bool writable,
859 const char *name)
860 : next(nullptr),
861 beg(beg),
862 end(end),
863 executable(executable),
864 writable(writable) {
865 internal_strncpy(dst: this->name, src: (name ? name : ""), ARRAY_SIZE(this->name));
866 }
867 };
868
869 const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
870
871 private:
872 char *full_name_; // Owned.
873 uptr base_address_;
874 uptr max_address_;
875 ModuleArch arch_;
876 uptr uuid_size_;
877 u8 uuid_[kModuleUUIDSize];
878 bool instrumented_;
879 IntrusiveList<AddressRange> ranges_;
880};
881
882// List of LoadedModules. OS-dependent implementation is responsible for
883// filling this information.
884class ListOfModules {
885 public:
886 ListOfModules() : initialized(false) {}
887 ~ListOfModules() { clear(); }
888 void init();
889 void fallbackInit(); // Uses fallback init if available, otherwise clears
890 const LoadedModule *begin() const { return modules_.begin(); }
891 LoadedModule *begin() { return modules_.begin(); }
892 const LoadedModule *end() const { return modules_.end(); }
893 LoadedModule *end() { return modules_.end(); }
894 uptr size() const { return modules_.size(); }
895 const LoadedModule &operator[](uptr i) const {
896 CHECK_LT(i, modules_.size());
897 return modules_[i];
898 }
899
900 private:
901 void clear() {
902 for (auto &module : modules_) module.clear();
903 modules_.clear();
904 }
905 void clearOrInit() {
906 initialized ? clear() : modules_.Initialize(initial_capacity: kInitialCapacity);
907 initialized = true;
908 }
909
910 InternalMmapVectorNoCtor<LoadedModule> modules_;
911 // We rarely have more than 16K loaded modules.
912 static const uptr kInitialCapacity = 1 << 14;
913 bool initialized;
914};
915
916// Callback type for iterating over a set of memory ranges.
917typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
918
919enum AndroidApiLevel {
920 ANDROID_NOT_ANDROID = 0,
921 ANDROID_KITKAT = 19,
922 ANDROID_LOLLIPOP_MR1 = 22,
923 ANDROID_POST_LOLLIPOP = 23
924};
925
926void WriteToSyslog(const char *buffer);
927
928#if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
929#define SANITIZER_WIN_TRACE 1
930#else
931#define SANITIZER_WIN_TRACE 0
932#endif
933
934#if SANITIZER_APPLE || SANITIZER_WIN_TRACE
935void LogFullErrorReport(const char *buffer);
936#else
937inline void LogFullErrorReport(const char *buffer) {}
938#endif
939
940#if SANITIZER_LINUX || SANITIZER_APPLE
941void WriteOneLineToSyslog(const char *s);
942void LogMessageOnPrintf(const char *str);
943#else
944inline void WriteOneLineToSyslog(const char *s) {}
945inline void LogMessageOnPrintf(const char *str) {}
946#endif
947
948#if SANITIZER_LINUX || SANITIZER_WIN_TRACE
949// Initialize Android logging. Any writes before this are silently lost.
950void AndroidLogInit();
951void SetAbortMessage(const char *);
952#else
953inline void AndroidLogInit() {}
954// FIXME: MacOS implementation could use CRSetCrashLogMessage.
955inline void SetAbortMessage(const char *) {}
956#endif
957
958#if SANITIZER_ANDROID
959void SanitizerInitializeUnwinder();
960AndroidApiLevel AndroidGetApiLevel();
961#else
962inline void AndroidLogWrite(const char *buffer_unused) {}
963inline void SanitizerInitializeUnwinder() {}
964inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
965#endif
966
967inline uptr GetPthreadDestructorIterations() {
968#if SANITIZER_ANDROID
969 return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
970#elif SANITIZER_POSIX
971 return 4;
972#else
973// Unused on Windows.
974 return 0;
975#endif
976}
977
978void *internal_start_thread(void *(*func)(void*), void *arg);
979void internal_join_thread(void *th);
980void MaybeStartBackgroudThread();
981
982// Make the compiler think that something is going on there.
983// Use this inside a loop that looks like memset/memcpy/etc to prevent the
984// compiler from recognising it and turning it into an actual call to
985// memset/memcpy/etc.
986static inline void SanitizerBreakOptimization(void *arg) {
987#if defined(_MSC_VER) && !defined(__clang__)
988 _ReadWriteBarrier();
989#else
990 __asm__ __volatile__("" : : "r" (arg) : "memory");
991#endif
992}
993
994struct SignalContext {
995 void *siginfo;
996 void *context;
997 uptr addr;
998 uptr pc;
999 uptr sp;
1000 uptr bp;
1001 bool is_memory_access;
1002 enum WriteFlag { Unknown, Read, Write } write_flag;
1003
1004 // In some cases the kernel cannot provide the true faulting address; `addr`
1005 // will be zero then. This field allows to distinguish between these cases
1006 // and dereferences of null.
1007 bool is_true_faulting_addr;
1008
1009 // VS2013 doesn't implement unrestricted unions, so we need a trivial default
1010 // constructor
1011 SignalContext() = default;
1012
1013 // Creates signal context in a platform-specific manner.
1014 // SignalContext is going to keep pointers to siginfo and context without
1015 // owning them.
1016 SignalContext(void *siginfo, void *context)
1017 : siginfo(siginfo),
1018 context(context),
1019 addr(GetAddress()),
1020 is_memory_access(IsMemoryAccess()),
1021 write_flag(GetWriteFlag()),
1022 is_true_faulting_addr(IsTrueFaultingAddress()) {
1023 InitPcSpBp();
1024 }
1025
1026 static void DumpAllRegisters(void *context);
1027
1028 // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
1029 int GetType() const;
1030
1031 // String description of the signal.
1032 const char *Describe() const;
1033
1034 // Returns true if signal is stack overflow.
1035 bool IsStackOverflow() const;
1036
1037 private:
1038 // Platform specific initialization.
1039 void InitPcSpBp();
1040 uptr GetAddress() const;
1041 WriteFlag GetWriteFlag() const;
1042 bool IsMemoryAccess() const;
1043 bool IsTrueFaultingAddress() const;
1044};
1045
1046void InitializePlatformEarly();
1047
1048template <typename Fn>
1049class RunOnDestruction {
1050 public:
1051 explicit RunOnDestruction(Fn fn) : fn_(fn) {}
1052 ~RunOnDestruction() { fn_(); }
1053
1054 private:
1055 Fn fn_;
1056};
1057
1058// A simple scope guard. Usage:
1059// auto cleanup = at_scope_exit([]{ do_cleanup; });
1060template <typename Fn>
1061RunOnDestruction<Fn> at_scope_exit(Fn fn) {
1062 return RunOnDestruction<Fn>(fn);
1063}
1064
1065// Linux on 64-bit s390 had a nasty bug that crashes the whole machine
1066// if a process uses virtual memory over 4TB (as many sanitizers like
1067// to do). This function will abort the process if running on a kernel
1068// that looks vulnerable.
1069#if SANITIZER_LINUX && SANITIZER_S390_64
1070void AvoidCVE_2016_2143();
1071#else
1072inline void AvoidCVE_2016_2143() {}
1073#endif
1074
1075struct StackDepotStats {
1076 uptr n_uniq_ids;
1077 uptr allocated;
1078};
1079
1080// The default value for allocator_release_to_os_interval_ms common flag to
1081// indicate that sanitizer allocator should not attempt to release memory to OS.
1082const s32 kReleaseToOSIntervalNever = -1;
1083
1084void CheckNoDeepBind(const char *filename, int flag);
1085
1086// Returns the requested amount of random data (up to 256 bytes) that can then
1087// be used to seed a PRNG. Defaults to blocking like the underlying syscall.
1088bool GetRandom(void *buffer, uptr length, bool blocking = true);
1089
1090// Returns the number of logical processors on the system.
1091u32 GetNumberOfCPUs();
1092extern u32 NumberOfCPUsCached;
1093inline u32 GetNumberOfCPUsCached() {
1094 if (!NumberOfCPUsCached)
1095 NumberOfCPUsCached = GetNumberOfCPUs();
1096 return NumberOfCPUsCached;
1097}
1098
1099} // namespace __sanitizer
1100
1101inline void *operator new(__sanitizer::usize size,
1102 __sanitizer::LowLevelAllocator &alloc) {
1103 return alloc.Allocate(size);
1104}
1105
1106#endif // SANITIZER_COMMON_H
1107

source code of compiler-rt/lib/sanitizer_common/sanitizer_common.h