1 | //===-- sanitizer_allocator.cpp -------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is shared between AddressSanitizer and ThreadSanitizer |
10 | // run-time libraries. |
11 | // This allocator is used inside run-times. |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "sanitizer_allocator.h" |
15 | |
16 | #include "sanitizer_allocator_checks.h" |
17 | #include "sanitizer_allocator_internal.h" |
18 | #include "sanitizer_atomic.h" |
19 | #include "sanitizer_common.h" |
20 | #include "sanitizer_platform.h" |
21 | |
22 | namespace __sanitizer { |
23 | |
24 | // Default allocator names. |
25 | const char *PrimaryAllocatorName = "SizeClassAllocator" ; |
26 | const char *SecondaryAllocatorName = "LargeMmapAllocator" ; |
27 | |
28 | static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)]; |
29 | static atomic_uint8_t internal_allocator_initialized; |
30 | static StaticSpinMutex internal_alloc_init_mu; |
31 | |
32 | static InternalAllocatorCache internal_allocator_cache; |
33 | static StaticSpinMutex internal_allocator_cache_mu; |
34 | |
35 | InternalAllocator *internal_allocator() { |
36 | InternalAllocator *internal_allocator_instance = |
37 | reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder); |
38 | if (atomic_load(a: &internal_allocator_initialized, mo: memory_order_acquire) == 0) { |
39 | SpinMutexLock l(&internal_alloc_init_mu); |
40 | if (atomic_load(a: &internal_allocator_initialized, mo: memory_order_relaxed) == |
41 | 0) { |
42 | internal_allocator_instance->Init(release_to_os_interval_ms: kReleaseToOSIntervalNever); |
43 | atomic_store(a: &internal_allocator_initialized, v: 1, mo: memory_order_release); |
44 | } |
45 | } |
46 | return internal_allocator_instance; |
47 | } |
48 | |
49 | static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache, |
50 | uptr alignment) { |
51 | if (alignment == 0) alignment = 8; |
52 | if (cache == 0) { |
53 | SpinMutexLock l(&internal_allocator_cache_mu); |
54 | return internal_allocator()->Allocate(cache: &internal_allocator_cache, size, |
55 | alignment); |
56 | } |
57 | return internal_allocator()->Allocate(cache, size, alignment); |
58 | } |
59 | |
60 | static void *RawInternalRealloc(void *ptr, uptr size, |
61 | InternalAllocatorCache *cache) { |
62 | uptr alignment = 8; |
63 | if (cache == 0) { |
64 | SpinMutexLock l(&internal_allocator_cache_mu); |
65 | return internal_allocator()->Reallocate(cache: &internal_allocator_cache, p: ptr, |
66 | new_size: size, alignment); |
67 | } |
68 | return internal_allocator()->Reallocate(cache, p: ptr, new_size: size, alignment); |
69 | } |
70 | |
71 | static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { |
72 | if (!cache) { |
73 | SpinMutexLock l(&internal_allocator_cache_mu); |
74 | return internal_allocator()->Deallocate(cache: &internal_allocator_cache, p: ptr); |
75 | } |
76 | internal_allocator()->Deallocate(cache, p: ptr); |
77 | } |
78 | |
79 | static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) { |
80 | SetAllocatorOutOfMemory(); |
81 | Report(format: "FATAL: %s: internal allocator is out of memory trying to allocate " |
82 | "0x%zx bytes\n" , SanitizerToolName, requested_size); |
83 | Die(); |
84 | } |
85 | |
86 | void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) { |
87 | void *p = RawInternalAlloc(size, cache, alignment); |
88 | if (UNLIKELY(!p)) |
89 | ReportInternalAllocatorOutOfMemory(requested_size: size); |
90 | return p; |
91 | } |
92 | |
93 | void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) { |
94 | void *p = RawInternalRealloc(ptr: addr, size, cache); |
95 | if (UNLIKELY(!p)) |
96 | ReportInternalAllocatorOutOfMemory(requested_size: size); |
97 | return p; |
98 | } |
99 | |
100 | void *InternalReallocArray(void *addr, uptr count, uptr size, |
101 | InternalAllocatorCache *cache) { |
102 | if (UNLIKELY(CheckForCallocOverflow(count, size))) { |
103 | Report( |
104 | format: "FATAL: %s: reallocarray parameters overflow: count * size (%zd * %zd) " |
105 | "cannot be represented in type size_t\n" , |
106 | SanitizerToolName, count, size); |
107 | Die(); |
108 | } |
109 | return InternalRealloc(addr, size: count * size, cache); |
110 | } |
111 | |
112 | void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) { |
113 | if (UNLIKELY(CheckForCallocOverflow(count, size))) { |
114 | Report(format: "FATAL: %s: calloc parameters overflow: count * size (%zd * %zd) " |
115 | "cannot be represented in type size_t\n" , SanitizerToolName, count, |
116 | size); |
117 | Die(); |
118 | } |
119 | void *p = InternalAlloc(size: count * size, cache); |
120 | if (LIKELY(p)) |
121 | internal_memset(s: p, c: 0, n: count * size); |
122 | return p; |
123 | } |
124 | |
125 | void InternalFree(void *addr, InternalAllocatorCache *cache) { |
126 | RawInternalFree(ptr: addr, cache); |
127 | } |
128 | |
129 | void InternalAllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { |
130 | internal_allocator_cache_mu.Lock(); |
131 | internal_allocator()->ForceLock(); |
132 | } |
133 | |
134 | void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { |
135 | internal_allocator()->ForceUnlock(); |
136 | internal_allocator_cache_mu.Unlock(); |
137 | } |
138 | |
139 | // LowLevelAllocator |
140 | constexpr uptr kLowLevelAllocatorDefaultAlignment = 8; |
141 | constexpr uptr kMinNumPagesRounded = 16; |
142 | constexpr uptr kMinRoundedSize = 65536; |
143 | static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment; |
144 | static LowLevelAllocateCallback low_level_alloc_callback; |
145 | |
146 | static LowLevelAllocator Alloc; |
147 | LowLevelAllocator &GetGlobalLowLevelAllocator() { return Alloc; } |
148 | |
149 | void *LowLevelAllocator::Allocate(uptr size) { |
150 | // Align allocation size. |
151 | size = RoundUpTo(size, boundary: low_level_alloc_min_alignment); |
152 | if (allocated_end_ - allocated_current_ < (sptr)size) { |
153 | uptr size_to_allocate = RoundUpTo( |
154 | size, boundary: Min(a: GetPageSizeCached() * kMinNumPagesRounded, b: kMinRoundedSize)); |
155 | allocated_current_ = (char *)MmapOrDie(size: size_to_allocate, mem_type: __func__); |
156 | allocated_end_ = allocated_current_ + size_to_allocate; |
157 | if (low_level_alloc_callback) { |
158 | low_level_alloc_callback((uptr)allocated_current_, size_to_allocate); |
159 | } |
160 | } |
161 | CHECK(allocated_end_ - allocated_current_ >= (sptr)size); |
162 | void *res = allocated_current_; |
163 | allocated_current_ += size; |
164 | return res; |
165 | } |
166 | |
167 | void SetLowLevelAllocateMinAlignment(uptr alignment) { |
168 | CHECK(IsPowerOfTwo(alignment)); |
169 | low_level_alloc_min_alignment = Max(a: alignment, b: low_level_alloc_min_alignment); |
170 | } |
171 | |
172 | void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) { |
173 | low_level_alloc_callback = callback; |
174 | } |
175 | |
176 | // Allocator's OOM and other errors handling support. |
177 | |
178 | static atomic_uint8_t allocator_out_of_memory = {.val_dont_use: 0}; |
179 | static atomic_uint8_t allocator_may_return_null = {.val_dont_use: 0}; |
180 | |
181 | bool IsAllocatorOutOfMemory() { |
182 | return atomic_load_relaxed(a: &allocator_out_of_memory); |
183 | } |
184 | |
185 | void SetAllocatorOutOfMemory() { |
186 | atomic_store_relaxed(a: &allocator_out_of_memory, v: 1); |
187 | } |
188 | |
189 | bool AllocatorMayReturnNull() { |
190 | return atomic_load(a: &allocator_may_return_null, mo: memory_order_relaxed); |
191 | } |
192 | |
193 | void SetAllocatorMayReturnNull(bool may_return_null) { |
194 | atomic_store(a: &allocator_may_return_null, v: may_return_null, |
195 | mo: memory_order_relaxed); |
196 | } |
197 | |
198 | void PrintHintAllocatorCannotReturnNull() { |
199 | Report(format: "HINT: if you don't care about these errors you may set " |
200 | "allocator_may_return_null=1\n" ); |
201 | } |
202 | |
203 | static atomic_uint8_t ; |
204 | |
205 | bool () { |
206 | return atomic_load(a: &rss_limit_exceeded, mo: memory_order_relaxed); |
207 | } |
208 | |
209 | void (bool limit_exceeded) { |
210 | atomic_store(a: &rss_limit_exceeded, v: limit_exceeded, mo: memory_order_relaxed); |
211 | } |
212 | |
213 | } // namespace __sanitizer |
214 | |