1 | //=-- lsan_allocator.cpp --------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of LeakSanitizer. |
10 | // See lsan_allocator.h for details. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "lsan_allocator.h" |
15 | |
16 | #include "sanitizer_common/sanitizer_allocator.h" |
17 | #include "sanitizer_common/sanitizer_allocator_checks.h" |
18 | #include "sanitizer_common/sanitizer_allocator_interface.h" |
19 | #include "sanitizer_common/sanitizer_allocator_report.h" |
20 | #include "sanitizer_common/sanitizer_errno.h" |
21 | #include "sanitizer_common/sanitizer_internal_defs.h" |
22 | #include "sanitizer_common/sanitizer_stackdepot.h" |
23 | #include "sanitizer_common/sanitizer_stacktrace.h" |
24 | #include "lsan_common.h" |
25 | |
26 | extern "C" void *memset(void *ptr, int value, uptr num); |
27 | |
28 | namespace __lsan { |
29 | #if defined(__i386__) || defined(__arm__) |
30 | static const uptr kMaxAllowedMallocSize = 1ULL << 30; |
31 | #elif defined(__mips64) || defined(__aarch64__) |
32 | static const uptr kMaxAllowedMallocSize = 4ULL << 30; |
33 | #else |
34 | static const uptr kMaxAllowedMallocSize = 8ULL << 30; |
35 | #endif |
36 | |
37 | static Allocator allocator; |
38 | |
39 | static uptr max_malloc_size; |
40 | |
41 | void InitializeAllocator() { |
42 | SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); |
43 | allocator.InitLinkerInitialized( |
44 | release_to_os_interval_ms: common_flags()->allocator_release_to_os_interval_ms); |
45 | if (common_flags()->max_allocation_size_mb) |
46 | max_malloc_size = Min(a: common_flags()->max_allocation_size_mb << 20, |
47 | b: kMaxAllowedMallocSize); |
48 | else |
49 | max_malloc_size = kMaxAllowedMallocSize; |
50 | } |
51 | |
52 | void AllocatorThreadStart() { allocator.InitCache(cache: GetAllocatorCache()); } |
53 | |
54 | void AllocatorThreadFinish() { |
55 | allocator.SwallowCache(cache: GetAllocatorCache()); |
56 | allocator.DestroyCache(cache: GetAllocatorCache()); |
57 | } |
58 | |
59 | static ChunkMetadata *Metadata(const void *p) { |
60 | return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p)); |
61 | } |
62 | |
63 | static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { |
64 | if (!p) return; |
65 | ChunkMetadata *m = Metadata(p); |
66 | CHECK(m); |
67 | m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; |
68 | m->stack_trace_id = StackDepotPut(stack); |
69 | m->requested_size = size; |
70 | atomic_store(a: reinterpret_cast<atomic_uint8_t *>(m), v: 1, mo: memory_order_relaxed); |
71 | RunMallocHooks(ptr: p, size); |
72 | } |
73 | |
74 | static void RegisterDeallocation(void *p) { |
75 | if (!p) return; |
76 | ChunkMetadata *m = Metadata(p); |
77 | CHECK(m); |
78 | RunFreeHooks(ptr: p); |
79 | atomic_store(a: reinterpret_cast<atomic_uint8_t *>(m), v: 0, mo: memory_order_relaxed); |
80 | } |
81 | |
82 | static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) { |
83 | if (AllocatorMayReturnNull()) { |
84 | Report(format: "WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n" , size); |
85 | return nullptr; |
86 | } |
87 | ReportAllocationSizeTooBig(user_size: size, max_size: max_malloc_size, stack: &stack); |
88 | } |
89 | |
90 | void *Allocate(const StackTrace &stack, uptr size, uptr alignment, |
91 | bool cleared) { |
92 | if (size == 0) |
93 | size = 1; |
94 | if (size > max_malloc_size) |
95 | return ReportAllocationSizeTooBig(size, stack); |
96 | if (UNLIKELY(IsRssLimitExceeded())) { |
97 | if (AllocatorMayReturnNull()) |
98 | return nullptr; |
99 | ReportRssLimitExceeded(stack: &stack); |
100 | } |
101 | void *p = allocator.Allocate(cache: GetAllocatorCache(), size, alignment); |
102 | if (UNLIKELY(!p)) { |
103 | SetAllocatorOutOfMemory(); |
104 | if (AllocatorMayReturnNull()) |
105 | return nullptr; |
106 | ReportOutOfMemory(requested_size: size, stack: &stack); |
107 | } |
108 | // Do not rely on the allocator to clear the memory (it's slow). |
109 | if (cleared && allocator.FromPrimary(p)) |
110 | memset(ptr: p, value: 0, num: size); |
111 | RegisterAllocation(stack, p, size); |
112 | return p; |
113 | } |
114 | |
115 | static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) { |
116 | if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { |
117 | if (AllocatorMayReturnNull()) |
118 | return nullptr; |
119 | ReportCallocOverflow(count: nmemb, size, stack: &stack); |
120 | } |
121 | size *= nmemb; |
122 | return Allocate(stack, size, alignment: 1, cleared: true); |
123 | } |
124 | |
125 | void Deallocate(void *p) { |
126 | RegisterDeallocation(p); |
127 | allocator.Deallocate(cache: GetAllocatorCache(), p); |
128 | } |
129 | |
130 | void *Reallocate(const StackTrace &stack, void *p, uptr new_size, |
131 | uptr alignment) { |
132 | if (new_size > max_malloc_size) { |
133 | ReportAllocationSizeTooBig(size: new_size, stack); |
134 | return nullptr; |
135 | } |
136 | RegisterDeallocation(p); |
137 | void *new_p = |
138 | allocator.Reallocate(cache: GetAllocatorCache(), p, new_size, alignment); |
139 | if (new_p) |
140 | RegisterAllocation(stack, p: new_p, size: new_size); |
141 | else if (new_size != 0) |
142 | RegisterAllocation(stack, p, size: new_size); |
143 | return new_p; |
144 | } |
145 | |
146 | void GetAllocatorCacheRange(uptr *begin, uptr *end) { |
147 | *begin = (uptr)GetAllocatorCache(); |
148 | *end = *begin + sizeof(AllocatorCache); |
149 | } |
150 | |
151 | static const void *GetMallocBegin(const void *p) { |
152 | if (!p) |
153 | return nullptr; |
154 | void *beg = allocator.GetBlockBegin(p); |
155 | if (!beg) |
156 | return nullptr; |
157 | ChunkMetadata *m = Metadata(p: beg); |
158 | if (!m) |
159 | return nullptr; |
160 | if (!m->allocated) |
161 | return nullptr; |
162 | if (m->requested_size == 0) |
163 | return nullptr; |
164 | return (const void *)beg; |
165 | } |
166 | |
167 | uptr GetMallocUsableSize(const void *p) { |
168 | if (!p) |
169 | return 0; |
170 | ChunkMetadata *m = Metadata(p); |
171 | if (!m) return 0; |
172 | return m->requested_size; |
173 | } |
174 | |
175 | uptr GetMallocUsableSizeFast(const void *p) { |
176 | return Metadata(p)->requested_size; |
177 | } |
178 | |
179 | int lsan_posix_memalign(void **memptr, uptr alignment, uptr size, |
180 | const StackTrace &stack) { |
181 | if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { |
182 | if (AllocatorMayReturnNull()) |
183 | return errno_EINVAL; |
184 | ReportInvalidPosixMemalignAlignment(alignment, stack: &stack); |
185 | } |
186 | void *ptr = Allocate(stack, size, alignment, cleared: kAlwaysClearMemory); |
187 | if (UNLIKELY(!ptr)) |
188 | // OOM error is already taken care of by Allocate. |
189 | return errno_ENOMEM; |
190 | CHECK(IsAligned((uptr)ptr, alignment)); |
191 | *memptr = ptr; |
192 | return 0; |
193 | } |
194 | |
195 | void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) { |
196 | if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { |
197 | errno = errno_EINVAL; |
198 | if (AllocatorMayReturnNull()) |
199 | return nullptr; |
200 | ReportInvalidAlignedAllocAlignment(size, alignment, stack: &stack); |
201 | } |
202 | return SetErrnoOnNull(Allocate(stack, size, alignment, cleared: kAlwaysClearMemory)); |
203 | } |
204 | |
205 | void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) { |
206 | if (UNLIKELY(!IsPowerOfTwo(alignment))) { |
207 | errno = errno_EINVAL; |
208 | if (AllocatorMayReturnNull()) |
209 | return nullptr; |
210 | ReportInvalidAllocationAlignment(alignment, stack: &stack); |
211 | } |
212 | return SetErrnoOnNull(Allocate(stack, size, alignment, cleared: kAlwaysClearMemory)); |
213 | } |
214 | |
215 | void *lsan_malloc(uptr size, const StackTrace &stack) { |
216 | return SetErrnoOnNull(Allocate(stack, size, alignment: 1, cleared: kAlwaysClearMemory)); |
217 | } |
218 | |
219 | void lsan_free(void *p) { |
220 | Deallocate(p); |
221 | } |
222 | |
223 | void *lsan_realloc(void *p, uptr size, const StackTrace &stack) { |
224 | return SetErrnoOnNull(Reallocate(stack, p, new_size: size, alignment: 1)); |
225 | } |
226 | |
227 | void *lsan_reallocarray(void *ptr, uptr nmemb, uptr size, |
228 | const StackTrace &stack) { |
229 | if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { |
230 | errno = errno_ENOMEM; |
231 | if (AllocatorMayReturnNull()) |
232 | return nullptr; |
233 | ReportReallocArrayOverflow(count: nmemb, size, stack: &stack); |
234 | } |
235 | return lsan_realloc(p: ptr, size: nmemb * size, stack); |
236 | } |
237 | |
238 | void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) { |
239 | return SetErrnoOnNull(Calloc(nmemb, size, stack)); |
240 | } |
241 | |
242 | void *lsan_valloc(uptr size, const StackTrace &stack) { |
243 | return SetErrnoOnNull( |
244 | Allocate(stack, size, alignment: GetPageSizeCached(), cleared: kAlwaysClearMemory)); |
245 | } |
246 | |
247 | void *lsan_pvalloc(uptr size, const StackTrace &stack) { |
248 | uptr PageSize = GetPageSizeCached(); |
249 | if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { |
250 | errno = errno_ENOMEM; |
251 | if (AllocatorMayReturnNull()) |
252 | return nullptr; |
253 | ReportPvallocOverflow(size, stack: &stack); |
254 | } |
255 | // pvalloc(0) should allocate one page. |
256 | size = size ? RoundUpTo(size, boundary: PageSize) : PageSize; |
257 | return SetErrnoOnNull(Allocate(stack, size, alignment: PageSize, cleared: kAlwaysClearMemory)); |
258 | } |
259 | |
260 | uptr lsan_mz_size(const void *p) { |
261 | return GetMallocUsableSize(p); |
262 | } |
263 | |
264 | ///// Interface to the common LSan module. ///// |
265 | |
266 | void LockAllocator() { |
267 | allocator.ForceLock(); |
268 | } |
269 | |
270 | void UnlockAllocator() { |
271 | allocator.ForceUnlock(); |
272 | } |
273 | |
274 | void GetAllocatorGlobalRange(uptr *begin, uptr *end) { |
275 | *begin = (uptr)&allocator; |
276 | *end = *begin + sizeof(allocator); |
277 | } |
278 | |
279 | uptr PointsIntoChunk(void* p) { |
280 | uptr addr = reinterpret_cast<uptr>(p); |
281 | uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p)); |
282 | if (!chunk) return 0; |
283 | // LargeMmapAllocator considers pointers to the meta-region of a chunk to be |
284 | // valid, but we don't want that. |
285 | if (addr < chunk) return 0; |
286 | ChunkMetadata *m = Metadata(p: reinterpret_cast<void *>(chunk)); |
287 | CHECK(m); |
288 | if (!m->allocated) |
289 | return 0; |
290 | if (addr < chunk + m->requested_size) |
291 | return chunk; |
292 | if (IsSpecialCaseOfOperatorNew0(chunk_beg: chunk, chunk_size: m->requested_size, addr)) |
293 | return chunk; |
294 | return 0; |
295 | } |
296 | |
297 | uptr GetUserBegin(uptr chunk) { |
298 | return chunk; |
299 | } |
300 | |
301 | uptr GetUserAddr(uptr chunk) { |
302 | return chunk; |
303 | } |
304 | |
305 | LsanMetadata::LsanMetadata(uptr chunk) { |
306 | metadata_ = Metadata(p: reinterpret_cast<void *>(chunk)); |
307 | CHECK(metadata_); |
308 | } |
309 | |
310 | bool LsanMetadata::allocated() const { |
311 | return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated; |
312 | } |
313 | |
314 | ChunkTag LsanMetadata::tag() const { |
315 | return reinterpret_cast<ChunkMetadata *>(metadata_)->tag; |
316 | } |
317 | |
318 | void LsanMetadata::set_tag(ChunkTag value) { |
319 | reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value; |
320 | } |
321 | |
322 | uptr LsanMetadata::requested_size() const { |
323 | return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size; |
324 | } |
325 | |
326 | u32 LsanMetadata::stack_trace_id() const { |
327 | return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id; |
328 | } |
329 | |
330 | void ForEachChunk(ForEachChunkCallback callback, void *arg) { |
331 | allocator.ForEachChunk(callback, arg); |
332 | } |
333 | |
334 | IgnoreObjectResult IgnoreObject(const void *p) { |
335 | void *chunk = allocator.GetBlockBegin(p); |
336 | if (!chunk || p < chunk) return kIgnoreObjectInvalid; |
337 | ChunkMetadata *m = Metadata(p: chunk); |
338 | CHECK(m); |
339 | if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) { |
340 | if (m->tag == kIgnored) |
341 | return kIgnoreObjectAlreadyIgnored; |
342 | m->tag = kIgnored; |
343 | return kIgnoreObjectSuccess; |
344 | } else { |
345 | return kIgnoreObjectInvalid; |
346 | } |
347 | } |
348 | |
349 | } // namespace __lsan |
350 | |
351 | using namespace __lsan; |
352 | |
353 | extern "C" { |
354 | SANITIZER_INTERFACE_ATTRIBUTE |
355 | uptr __sanitizer_get_current_allocated_bytes() { |
356 | uptr stats[AllocatorStatCount]; |
357 | allocator.GetStats(s: stats); |
358 | return stats[AllocatorStatAllocated]; |
359 | } |
360 | |
361 | SANITIZER_INTERFACE_ATTRIBUTE |
362 | uptr __sanitizer_get_heap_size() { |
363 | uptr stats[AllocatorStatCount]; |
364 | allocator.GetStats(s: stats); |
365 | return stats[AllocatorStatMapped]; |
366 | } |
367 | |
368 | SANITIZER_INTERFACE_ATTRIBUTE |
369 | uptr __sanitizer_get_free_bytes() { return 1; } |
370 | |
371 | SANITIZER_INTERFACE_ATTRIBUTE |
372 | uptr __sanitizer_get_unmapped_bytes() { return 0; } |
373 | |
374 | SANITIZER_INTERFACE_ATTRIBUTE |
375 | uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } |
376 | |
377 | SANITIZER_INTERFACE_ATTRIBUTE |
378 | int __sanitizer_get_ownership(const void *p) { |
379 | return GetMallocBegin(p) != nullptr; |
380 | } |
381 | |
382 | SANITIZER_INTERFACE_ATTRIBUTE |
383 | const void * __sanitizer_get_allocated_begin(const void *p) { |
384 | return GetMallocBegin(p); |
385 | } |
386 | |
387 | SANITIZER_INTERFACE_ATTRIBUTE |
388 | uptr __sanitizer_get_allocated_size(const void *p) { |
389 | return GetMallocUsableSize(p); |
390 | } |
391 | |
392 | SANITIZER_INTERFACE_ATTRIBUTE |
393 | uptr __sanitizer_get_allocated_size_fast(const void *p) { |
394 | DCHECK_EQ(p, __sanitizer_get_allocated_begin(p)); |
395 | uptr ret = GetMallocUsableSizeFast(p); |
396 | DCHECK_EQ(ret, __sanitizer_get_allocated_size(p)); |
397 | return ret; |
398 | } |
399 | |
400 | SANITIZER_INTERFACE_ATTRIBUTE |
401 | void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); } |
402 | |
403 | } // extern "C" |
404 | |