1 | //===-- sanitizer_allocator_test.cpp --------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of ThreadSanitizer/AddressSanitizer runtime. |
10 | // Tests for sanitizer_allocator.h. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | #include "sanitizer_common/sanitizer_allocator.h" |
14 | |
15 | #include <stdio.h> |
16 | #include <stdlib.h> |
17 | |
18 | #include <algorithm> |
19 | #include <random> |
20 | #include <set> |
21 | #include <vector> |
22 | |
23 | #include "gtest/gtest.h" |
24 | #include "sanitizer_common/sanitizer_allocator_internal.h" |
25 | #include "sanitizer_common/sanitizer_common.h" |
26 | #include "sanitizer_pthread_wrappers.h" |
27 | #include "sanitizer_test_utils.h" |
28 | |
29 | using namespace __sanitizer; |
30 | |
31 | #if defined(__sparcv9) |
32 | // FIXME: These tests probably fail because Solaris/sparcv9 uses the full |
33 | // 64-bit address space. Same on Linux/sparc64, so probably a general SPARC |
34 | // issue. Needs more investigation |
35 | # define SKIP_ON_SPARCV9(x) DISABLED_##x |
36 | #else |
37 | # define SKIP_ON_SPARCV9(x) x |
38 | #endif |
39 | |
40 | // On 64-bit systems with small virtual address spaces (e.g. 39-bit) we can't |
41 | // use size class maps with a large number of classes, as that will make the |
42 | // SizeClassAllocator64 region size too small (< 2^32). |
43 | #if SANITIZER_ANDROID && defined(__aarch64__) |
44 | #define ALLOCATOR64_SMALL_SIZE 1 |
45 | #elif SANITIZER_RISCV64 |
46 | #define ALLOCATOR64_SMALL_SIZE 1 |
47 | #else |
48 | #define ALLOCATOR64_SMALL_SIZE 0 |
49 | #endif |
50 | |
51 | // Too slow for debug build |
52 | #if !SANITIZER_DEBUG |
53 | |
54 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
55 | #if SANITIZER_WINDOWS |
56 | // On Windows 64-bit there is no easy way to find a large enough fixed address |
57 | // space that is always available. Thus, a dynamically allocated address space |
58 | // is used instead (i.e. ~(uptr)0). |
59 | static const uptr kAllocatorSpace = ~(uptr)0; |
60 | static const uptr kAllocatorSize = 0x8000000000ULL; // 500G |
61 | static const u64 kAddressSpaceSize = 1ULL << 47; |
62 | typedef DefaultSizeClassMap SizeClassMap; |
63 | #elif SANITIZER_ANDROID && defined(__aarch64__) |
64 | static const uptr kAllocatorSpace = 0x3000000000ULL; |
65 | static const uptr kAllocatorSize = 0x2000000000ULL; |
66 | static const u64 kAddressSpaceSize = 1ULL << 39; |
67 | typedef VeryCompactSizeClassMap SizeClassMap; |
68 | #elif SANITIZER_RISCV64 |
69 | const uptr kAllocatorSpace = ~(uptr)0; |
70 | const uptr kAllocatorSize = 0x2000000000ULL; // 128G. |
71 | static const u64 kAddressSpaceSize = 1ULL << 38; |
72 | typedef VeryDenseSizeClassMap SizeClassMap; |
73 | # elif SANITIZER_APPLE |
74 | static const uptr kAllocatorSpace = 0x700000000000ULL; |
75 | static const uptr kAllocatorSize = 0x010000000000ULL; // 1T. |
76 | static const u64 kAddressSpaceSize = 1ULL << 47; |
77 | typedef DefaultSizeClassMap SizeClassMap; |
78 | # else |
79 | static const uptr kAllocatorSpace = 0x500000000000ULL; |
80 | static const uptr kAllocatorSize = 0x010000000000ULL; // 1T. |
81 | static const u64 kAddressSpaceSize = 1ULL << 47; |
82 | typedef DefaultSizeClassMap SizeClassMap; |
83 | # endif |
84 | |
85 | template <typename AddressSpaceViewTy> |
86 | struct AP64 { // Allocator Params. Short name for shorter demangled names.. |
87 | static const uptr kSpaceBeg = kAllocatorSpace; |
88 | static const uptr kSpaceSize = kAllocatorSize; |
89 | static const uptr kMetadataSize = 16; |
90 | typedef ::SizeClassMap SizeClassMap; |
91 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
92 | static const uptr kFlags = 0; |
93 | using AddressSpaceView = AddressSpaceViewTy; |
94 | }; |
95 | |
96 | template <typename AddressSpaceViewTy> |
97 | struct AP64Dyn { |
98 | static const uptr kSpaceBeg = ~(uptr)0; |
99 | static const uptr kSpaceSize = kAllocatorSize; |
100 | static const uptr kMetadataSize = 16; |
101 | typedef ::SizeClassMap SizeClassMap; |
102 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
103 | static const uptr kFlags = 0; |
104 | using AddressSpaceView = AddressSpaceViewTy; |
105 | }; |
106 | |
107 | template <typename AddressSpaceViewTy> |
108 | struct AP64Compact { |
109 | static const uptr kSpaceBeg = ~(uptr)0; |
110 | static const uptr kSpaceSize = kAllocatorSize; |
111 | static const uptr kMetadataSize = 16; |
112 | typedef CompactSizeClassMap SizeClassMap; |
113 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
114 | static const uptr kFlags = 0; |
115 | using AddressSpaceView = AddressSpaceViewTy; |
116 | }; |
117 | |
118 | template <typename AddressSpaceViewTy> |
119 | struct AP64VeryCompact { |
120 | static const uptr kSpaceBeg = ~(uptr)0; |
121 | static const uptr kSpaceSize = 1ULL << 37; |
122 | static const uptr kMetadataSize = 16; |
123 | typedef VeryCompactSizeClassMap SizeClassMap; |
124 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
125 | static const uptr kFlags = 0; |
126 | using AddressSpaceView = AddressSpaceViewTy; |
127 | }; |
128 | |
129 | template <typename AddressSpaceViewTy> |
130 | struct AP64Dense { |
131 | static const uptr kSpaceBeg = kAllocatorSpace; |
132 | static const uptr kSpaceSize = kAllocatorSize; |
133 | static const uptr kMetadataSize = 16; |
134 | typedef DenseSizeClassMap SizeClassMap; |
135 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
136 | static const uptr kFlags = 0; |
137 | using AddressSpaceView = AddressSpaceViewTy; |
138 | }; |
139 | |
140 | template <typename AddressSpaceView> |
141 | using Allocator64ASVT = SizeClassAllocator64<AP64<AddressSpaceView>>; |
142 | using Allocator64 = Allocator64ASVT<LocalAddressSpaceView>; |
143 | |
144 | template <typename AddressSpaceView> |
145 | using Allocator64DynamicASVT = SizeClassAllocator64<AP64Dyn<AddressSpaceView>>; |
146 | using Allocator64Dynamic = Allocator64DynamicASVT<LocalAddressSpaceView>; |
147 | |
148 | template <typename AddressSpaceView> |
149 | using Allocator64CompactASVT = |
150 | SizeClassAllocator64<AP64Compact<AddressSpaceView>>; |
151 | using Allocator64Compact = Allocator64CompactASVT<LocalAddressSpaceView>; |
152 | |
153 | template <typename AddressSpaceView> |
154 | using Allocator64VeryCompactASVT = |
155 | SizeClassAllocator64<AP64VeryCompact<AddressSpaceView>>; |
156 | using Allocator64VeryCompact = |
157 | Allocator64VeryCompactASVT<LocalAddressSpaceView>; |
158 | |
159 | template <typename AddressSpaceView> |
160 | using Allocator64DenseASVT = SizeClassAllocator64<AP64Dense<AddressSpaceView>>; |
161 | using Allocator64Dense = Allocator64DenseASVT<LocalAddressSpaceView>; |
162 | |
163 | #elif defined(__mips64) |
164 | static const u64 kAddressSpaceSize = 1ULL << 40; |
165 | #elif defined(__aarch64__) |
166 | static const u64 kAddressSpaceSize = 1ULL << 39; |
167 | #elif defined(__s390x__) |
168 | static const u64 kAddressSpaceSize = 1ULL << 53; |
169 | #elif defined(__s390__) |
170 | static const u64 kAddressSpaceSize = 1ULL << 31; |
171 | #else |
172 | static const u64 kAddressSpaceSize = 1ULL << 32; |
173 | #endif |
174 | |
175 | static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24); |
176 | |
177 | template <typename AddressSpaceViewTy> |
178 | struct AP32Compact { |
179 | static const uptr kSpaceBeg = 0; |
180 | static const u64 kSpaceSize = kAddressSpaceSize; |
181 | static const uptr kMetadataSize = 16; |
182 | typedef CompactSizeClassMap SizeClassMap; |
183 | static const uptr kRegionSizeLog = ::kRegionSizeLog; |
184 | using AddressSpaceView = AddressSpaceViewTy; |
185 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
186 | static const uptr kFlags = 0; |
187 | }; |
188 | template <typename AddressSpaceView> |
189 | using Allocator32CompactASVT = |
190 | SizeClassAllocator32<AP32Compact<AddressSpaceView>>; |
191 | using Allocator32Compact = Allocator32CompactASVT<LocalAddressSpaceView>; |
192 | |
193 | template <class SizeClassMap> |
194 | void TestSizeClassMap() { |
195 | typedef SizeClassMap SCMap; |
196 | SCMap::Print(); |
197 | SCMap::Validate(); |
198 | } |
199 | |
200 | TEST(SanitizerCommon, DefaultSizeClassMap) { |
201 | TestSizeClassMap<DefaultSizeClassMap>(); |
202 | } |
203 | |
204 | TEST(SanitizerCommon, CompactSizeClassMap) { |
205 | TestSizeClassMap<CompactSizeClassMap>(); |
206 | } |
207 | |
208 | TEST(SanitizerCommon, VeryCompactSizeClassMap) { |
209 | TestSizeClassMap<VeryCompactSizeClassMap>(); |
210 | } |
211 | |
212 | TEST(SanitizerCommon, InternalSizeClassMap) { |
213 | TestSizeClassMap<InternalSizeClassMap>(); |
214 | } |
215 | |
216 | TEST(SanitizerCommon, DenseSizeClassMap) { |
217 | TestSizeClassMap<VeryCompactSizeClassMap>(); |
218 | } |
219 | |
220 | template <class Allocator> |
221 | void TestSizeClassAllocator(uptr premapped_heap = 0) { |
222 | Allocator *a = new Allocator; |
223 | a->Init(kReleaseToOSIntervalNever, premapped_heap); |
224 | typename Allocator::AllocatorCache cache; |
225 | memset(&cache, 0, sizeof(cache)); |
226 | cache.Init(0); |
227 | |
228 | static const uptr sizes[] = { |
229 | 1, 16, 30, 40, 100, 1000, 10000, |
230 | 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000 |
231 | }; |
232 | |
233 | std::vector<void *> allocated; |
234 | |
235 | uptr last_total_allocated = 0; |
236 | for (int i = 0; i < 3; i++) { |
237 | // Allocate a bunch of chunks. |
238 | for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) { |
239 | uptr size = sizes[s]; |
240 | if (!a->CanAllocate(size, 1)) continue; |
241 | // printf("s = %ld\n", size); |
242 | uptr n_iter = std::max((uptr)6, 4000000 / size); |
243 | // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter); |
244 | for (uptr i = 0; i < n_iter; i++) { |
245 | uptr class_id0 = Allocator::SizeClassMapT::ClassID(size); |
246 | char *x = (char*)cache.Allocate(a, class_id0); |
247 | x[0] = 0; |
248 | x[size - 1] = 0; |
249 | x[size / 2] = 0; |
250 | allocated.push_back(x); |
251 | CHECK_EQ(x, a->GetBlockBegin(x)); |
252 | CHECK_EQ(x, a->GetBlockBegin(x + size - 1)); |
253 | CHECK(a->PointerIsMine(x)); |
254 | CHECK(a->PointerIsMine(x + size - 1)); |
255 | CHECK(a->PointerIsMine(x + size / 2)); |
256 | CHECK_GE(a->GetActuallyAllocatedSize(x), size); |
257 | uptr class_id = a->GetSizeClass(x); |
258 | CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size)); |
259 | uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x)); |
260 | metadata[0] = reinterpret_cast<uptr>(x) + 1; |
261 | metadata[1] = 0xABCD; |
262 | } |
263 | } |
264 | // Deallocate all. |
265 | for (uptr i = 0; i < allocated.size(); i++) { |
266 | void *x = allocated[i]; |
267 | uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x)); |
268 | CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1); |
269 | CHECK_EQ(metadata[1], 0xABCD); |
270 | cache.Deallocate(a, a->GetSizeClass(x), x); |
271 | } |
272 | allocated.clear(); |
273 | uptr total_allocated = a->TotalMemoryUsed(); |
274 | if (last_total_allocated == 0) |
275 | last_total_allocated = total_allocated; |
276 | CHECK_EQ(last_total_allocated, total_allocated); |
277 | } |
278 | |
279 | // Check that GetBlockBegin never crashes. |
280 | for (uptr x = 0, step = kAddressSpaceSize / 100000; |
281 | x < kAddressSpaceSize - step; x += step) |
282 | if (a->PointerIsMine(reinterpret_cast<void *>(x))) |
283 | Ident(a->GetBlockBegin(reinterpret_cast<void *>(x))); |
284 | |
285 | a->TestOnlyUnmap(); |
286 | delete a; |
287 | } |
288 | |
289 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
290 | |
291 | // Allocates kAllocatorSize aligned bytes on construction and frees it on |
292 | // destruction. |
293 | class ScopedPremappedHeap { |
294 | public: |
295 | ScopedPremappedHeap() { |
296 | BasePtr = MmapNoReserveOrDie(size: 2 * kAllocatorSize, mem_type: "preallocated heap" ); |
297 | AlignedAddr = RoundUpTo(size: reinterpret_cast<uptr>(BasePtr), boundary: kAllocatorSize); |
298 | } |
299 | |
300 | ~ScopedPremappedHeap() { UnmapOrDie(addr: BasePtr, size: kAllocatorSize); } |
301 | |
302 | uptr Addr() { return AlignedAddr; } |
303 | |
304 | private: |
305 | void *BasePtr; |
306 | uptr AlignedAddr; |
307 | }; |
308 | |
309 | // These tests can fail on Windows if memory is somewhat full and lit happens |
310 | // to run them all at the same time. FIXME: Make them not flaky and reenable. |
311 | #if !SANITIZER_WINDOWS |
312 | TEST(SanitizerCommon, SizeClassAllocator64) { |
313 | TestSizeClassAllocator<Allocator64>(); |
314 | } |
315 | |
316 | TEST(SanitizerCommon, SizeClassAllocator64Dynamic) { |
317 | TestSizeClassAllocator<Allocator64Dynamic>(); |
318 | } |
319 | |
320 | #if !ALLOCATOR64_SMALL_SIZE |
321 | // Android only has 39-bit address space, so mapping 2 * kAllocatorSize |
322 | // sometimes fails. |
323 | TEST(SanitizerCommon, SizeClassAllocator64DynamicPremapped) { |
324 | ScopedPremappedHeap h; |
325 | TestSizeClassAllocator<Allocator64Dynamic>(premapped_heap: h.Addr()); |
326 | } |
327 | |
328 | TEST(SanitizerCommon, SizeClassAllocator64Compact) { |
329 | TestSizeClassAllocator<Allocator64Compact>(); |
330 | } |
331 | |
332 | TEST(SanitizerCommon, SizeClassAllocator64Dense) { |
333 | TestSizeClassAllocator<Allocator64Dense>(); |
334 | } |
335 | #endif |
336 | |
337 | TEST(SanitizerCommon, SizeClassAllocator64VeryCompact) { |
338 | TestSizeClassAllocator<Allocator64VeryCompact>(); |
339 | } |
340 | #endif |
341 | #endif |
342 | |
343 | TEST(SanitizerCommon, SizeClassAllocator32Compact) { |
344 | TestSizeClassAllocator<Allocator32Compact>(); |
345 | } |
346 | |
347 | template <typename AddressSpaceViewTy> |
348 | struct AP32SeparateBatches { |
349 | static const uptr kSpaceBeg = 0; |
350 | static const u64 kSpaceSize = kAddressSpaceSize; |
351 | static const uptr kMetadataSize = 16; |
352 | typedef DefaultSizeClassMap SizeClassMap; |
353 | static const uptr kRegionSizeLog = ::kRegionSizeLog; |
354 | using AddressSpaceView = AddressSpaceViewTy; |
355 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
356 | static const uptr kFlags = |
357 | SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch; |
358 | }; |
359 | template <typename AddressSpaceView> |
360 | using Allocator32SeparateBatchesASVT = |
361 | SizeClassAllocator32<AP32SeparateBatches<AddressSpaceView>>; |
362 | using Allocator32SeparateBatches = |
363 | Allocator32SeparateBatchesASVT<LocalAddressSpaceView>; |
364 | |
365 | TEST(SanitizerCommon, SizeClassAllocator32SeparateBatches) { |
366 | TestSizeClassAllocator<Allocator32SeparateBatches>(); |
367 | } |
368 | |
369 | template <class Allocator> |
370 | void SizeClassAllocatorMetadataStress(uptr premapped_heap = 0) { |
371 | Allocator *a = new Allocator; |
372 | a->Init(kReleaseToOSIntervalNever, premapped_heap); |
373 | typename Allocator::AllocatorCache cache; |
374 | memset(&cache, 0, sizeof(cache)); |
375 | cache.Init(0); |
376 | |
377 | const uptr kNumAllocs = 1 << 13; |
378 | void *allocated[kNumAllocs]; |
379 | void *meta[kNumAllocs]; |
380 | for (uptr i = 0; i < kNumAllocs; i++) { |
381 | void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1)); |
382 | allocated[i] = x; |
383 | meta[i] = a->GetMetaData(x); |
384 | } |
385 | // Get Metadata kNumAllocs^2 times. |
386 | for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) { |
387 | uptr idx = i % kNumAllocs; |
388 | void *m = a->GetMetaData(allocated[idx]); |
389 | EXPECT_EQ(m, meta[idx]); |
390 | } |
391 | for (uptr i = 0; i < kNumAllocs; i++) { |
392 | cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]); |
393 | } |
394 | |
395 | a->TestOnlyUnmap(); |
396 | delete a; |
397 | } |
398 | |
399 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
400 | // These tests can fail on Windows if memory is somewhat full and lit happens |
401 | // to run them all at the same time. FIXME: Make them not flaky and reenable. |
402 | #if !SANITIZER_WINDOWS |
403 | TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) { |
404 | SizeClassAllocatorMetadataStress<Allocator64>(); |
405 | } |
406 | |
407 | TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) { |
408 | SizeClassAllocatorMetadataStress<Allocator64Dynamic>(); |
409 | } |
410 | |
411 | #if !ALLOCATOR64_SMALL_SIZE |
412 | TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedMetadataStress) { |
413 | ScopedPremappedHeap h; |
414 | SizeClassAllocatorMetadataStress<Allocator64Dynamic>(premapped_heap: h.Addr()); |
415 | } |
416 | |
417 | TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) { |
418 | SizeClassAllocatorMetadataStress<Allocator64Compact>(); |
419 | } |
420 | #endif |
421 | |
422 | #endif |
423 | #endif // SANITIZER_CAN_USE_ALLOCATOR64 |
424 | TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) { |
425 | SizeClassAllocatorMetadataStress<Allocator32Compact>(); |
426 | } |
427 | |
428 | template <class Allocator> |
429 | void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize, |
430 | uptr premapped_heap = 0) { |
431 | Allocator *a = new Allocator; |
432 | a->Init(kReleaseToOSIntervalNever, premapped_heap); |
433 | typename Allocator::AllocatorCache cache; |
434 | memset(&cache, 0, sizeof(cache)); |
435 | cache.Init(0); |
436 | |
437 | uptr max_size_class = Allocator::SizeClassMapT::kLargestClassID; |
438 | uptr size = Allocator::SizeClassMapT::Size(max_size_class); |
439 | // Make sure we correctly compute GetBlockBegin() w/o overflow. |
440 | for (size_t i = 0; i <= TotalSize / size; i++) { |
441 | void *x = cache.Allocate(a, max_size_class); |
442 | void *beg = a->GetBlockBegin(x); |
443 | // if ((i & (i - 1)) == 0) |
444 | // fprintf(stderr, "[%zd] %p %p\n", i, x, beg); |
445 | EXPECT_EQ(x, beg); |
446 | } |
447 | |
448 | a->TestOnlyUnmap(); |
449 | delete a; |
450 | } |
451 | |
452 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
453 | // These tests can fail on Windows if memory is somewhat full and lit happens |
454 | // to run them all at the same time. FIXME: Make them not flaky and reenable. |
455 | #if !SANITIZER_WINDOWS |
456 | TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) { |
457 | SizeClassAllocatorGetBlockBeginStress<Allocator64>( |
458 | 1ULL << (SANITIZER_ANDROID ? 31 : 33)); |
459 | } |
460 | TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) { |
461 | SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>( |
462 | TotalSize: 1ULL << (SANITIZER_ANDROID ? 31 : 33)); |
463 | } |
464 | #if !ALLOCATOR64_SMALL_SIZE |
465 | TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedGetBlockBegin) { |
466 | ScopedPremappedHeap h; |
467 | SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>( |
468 | TotalSize: 1ULL << (SANITIZER_ANDROID ? 31 : 33), premapped_heap: h.Addr()); |
469 | } |
470 | TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) { |
471 | SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(TotalSize: 1ULL << 33); |
472 | } |
473 | #endif |
474 | TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) { |
475 | // Does not have > 4Gb for each class. |
476 | SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(TotalSize: 1ULL << 31); |
477 | } |
478 | TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) { |
479 | SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>(TotalSize: 1ULL << 33); |
480 | } |
481 | #endif |
482 | #endif // SANITIZER_CAN_USE_ALLOCATOR64 |
483 | |
484 | struct TestMapUnmapCallback { |
485 | static int map_count, map_secondary_count, unmap_count; |
486 | void OnMap(uptr p, uptr size) const { map_count++; } |
487 | void OnMapSecondary(uptr p, uptr size, uptr user_begin, |
488 | uptr user_size) const { |
489 | map_secondary_count++; |
490 | } |
491 | void OnUnmap(uptr p, uptr size) const { unmap_count++; } |
492 | |
493 | static void Reset() { map_count = map_secondary_count = unmap_count = 0; } |
494 | }; |
495 | int TestMapUnmapCallback::map_count; |
496 | int TestMapUnmapCallback::map_secondary_count; |
497 | int TestMapUnmapCallback::unmap_count; |
498 | |
499 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
500 | // These tests can fail on Windows if memory is somewhat full and lit happens |
501 | // to run them all at the same time. FIXME: Make them not flaky and reenable. |
502 | #if !SANITIZER_WINDOWS |
503 | |
504 | template <typename AddressSpaceViewTy = LocalAddressSpaceView> |
505 | struct AP64WithCallback { |
506 | static const uptr kSpaceBeg = kAllocatorSpace; |
507 | static const uptr kSpaceSize = kAllocatorSize; |
508 | static const uptr kMetadataSize = 16; |
509 | typedef ::SizeClassMap SizeClassMap; |
510 | typedef TestMapUnmapCallback MapUnmapCallback; |
511 | static const uptr kFlags = 0; |
512 | using AddressSpaceView = AddressSpaceViewTy; |
513 | }; |
514 | |
515 | TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) { |
516 | TestMapUnmapCallback::Reset(); |
517 | typedef SizeClassAllocator64<AP64WithCallback<>> Allocator64WithCallBack; |
518 | Allocator64WithCallBack *a = new Allocator64WithCallBack; |
519 | a->Init(release_to_os_interval_ms: kReleaseToOSIntervalNever); |
520 | EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state. |
521 | EXPECT_EQ(TestMapUnmapCallback::map_secondary_count, 0); |
522 | typename Allocator64WithCallBack::AllocatorCache cache; |
523 | memset(&cache, 0, sizeof(cache)); |
524 | cache.Init(s: 0); |
525 | AllocatorStats stats; |
526 | stats.Init(); |
527 | const size_t kNumChunks = 128; |
528 | uint32_t chunks[kNumChunks]; |
529 | a->GetFromAllocator(stat: &stats, class_id: 30, chunks, n_chunks: kNumChunks); |
530 | // State + alloc + metadata + freearray. |
531 | EXPECT_EQ(TestMapUnmapCallback::map_count, 4); |
532 | EXPECT_EQ(TestMapUnmapCallback::map_secondary_count, 0); |
533 | a->TestOnlyUnmap(); |
534 | EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing. |
535 | delete a; |
536 | } |
537 | #endif |
538 | #endif |
539 | |
540 | template <typename AddressSpaceViewTy = LocalAddressSpaceView> |
541 | struct AP32WithCallback { |
542 | static const uptr kSpaceBeg = 0; |
543 | static const u64 kSpaceSize = kAddressSpaceSize; |
544 | static const uptr kMetadataSize = 16; |
545 | typedef CompactSizeClassMap SizeClassMap; |
546 | static const uptr kRegionSizeLog = ::kRegionSizeLog; |
547 | using AddressSpaceView = AddressSpaceViewTy; |
548 | typedef TestMapUnmapCallback MapUnmapCallback; |
549 | static const uptr kFlags = 0; |
550 | }; |
551 | |
552 | TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) { |
553 | TestMapUnmapCallback::Reset(); |
554 | typedef SizeClassAllocator32<AP32WithCallback<>> Allocator32WithCallBack; |
555 | Allocator32WithCallBack *a = new Allocator32WithCallBack; |
556 | a->Init(release_to_os_interval_ms: kReleaseToOSIntervalNever); |
557 | EXPECT_EQ(TestMapUnmapCallback::map_count, 0); |
558 | EXPECT_EQ(TestMapUnmapCallback::map_secondary_count, 0); |
559 | Allocator32WithCallBack::AllocatorCache cache; |
560 | memset(&cache, 0, sizeof(cache)); |
561 | cache.Init(s: 0); |
562 | AllocatorStats stats; |
563 | stats.Init(); |
564 | a->AllocateBatch(stat: &stats, c: &cache, class_id: 32); |
565 | EXPECT_EQ(TestMapUnmapCallback::map_count, 1); |
566 | EXPECT_EQ(TestMapUnmapCallback::map_secondary_count, 0); |
567 | a->TestOnlyUnmap(); |
568 | EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); |
569 | delete a; |
570 | } |
571 | |
572 | TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) { |
573 | TestMapUnmapCallback::Reset(); |
574 | LargeMmapAllocator<TestMapUnmapCallback> a; |
575 | a.Init(); |
576 | AllocatorStats stats; |
577 | stats.Init(); |
578 | void *x = a.Allocate(stat: &stats, size: 1 << 20, alignment: 1); |
579 | EXPECT_EQ(TestMapUnmapCallback::map_count, 0); |
580 | EXPECT_EQ(TestMapUnmapCallback::map_secondary_count, 1); |
581 | a.Deallocate(stat: &stats, p: x); |
582 | EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); |
583 | } |
584 | |
585 | // Don't test OOM conditions on Win64 because it causes other tests on the same |
586 | // machine to OOM. |
587 | #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 |
588 | TEST(SanitizerCommon, SizeClassAllocator64Overflow) { |
589 | Allocator64 a; |
590 | a.Init(release_to_os_interval_ms: kReleaseToOSIntervalNever); |
591 | Allocator64::AllocatorCache cache; |
592 | memset(&cache, 0, sizeof(cache)); |
593 | cache.Init(s: 0); |
594 | AllocatorStats stats; |
595 | stats.Init(); |
596 | |
597 | const size_t kNumChunks = 128; |
598 | uint32_t chunks[kNumChunks]; |
599 | bool allocation_failed = false; |
600 | for (int i = 0; i < 1000000; i++) { |
601 | uptr class_id = a.kNumClasses - 1; |
602 | if (!a.GetFromAllocator(stat: &stats, class_id, chunks, n_chunks: kNumChunks)) { |
603 | allocation_failed = true; |
604 | break; |
605 | } |
606 | } |
607 | EXPECT_EQ(allocation_failed, true); |
608 | |
609 | a.TestOnlyUnmap(); |
610 | } |
611 | #endif |
612 | |
613 | TEST(SanitizerCommon, LargeMmapAllocator) { |
614 | LargeMmapAllocator<NoOpMapUnmapCallback> a; |
615 | a.Init(); |
616 | AllocatorStats stats; |
617 | stats.Init(); |
618 | |
619 | static const int kNumAllocs = 1000; |
620 | char *allocated[kNumAllocs]; |
621 | static const uptr size = 4000; |
622 | // Allocate some. |
623 | for (int i = 0; i < kNumAllocs; i++) { |
624 | allocated[i] = (char *)a.Allocate(stat: &stats, size, alignment: 1); |
625 | CHECK(a.PointerIsMine(allocated[i])); |
626 | } |
627 | // Deallocate all. |
628 | CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs); |
629 | for (int i = 0; i < kNumAllocs; i++) { |
630 | char *p = allocated[i]; |
631 | CHECK(a.PointerIsMine(p)); |
632 | a.Deallocate(stat: &stats, p); |
633 | } |
634 | // Check that non left. |
635 | CHECK_EQ(a.TotalMemoryUsed(), 0); |
636 | |
637 | // Allocate some more, also add metadata. |
638 | for (int i = 0; i < kNumAllocs; i++) { |
639 | char *x = (char *)a.Allocate(stat: &stats, size, alignment: 1); |
640 | CHECK_GE(a.GetActuallyAllocatedSize(x), size); |
641 | uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p: x)); |
642 | *meta = i; |
643 | allocated[i] = x; |
644 | } |
645 | for (int i = 0; i < kNumAllocs * kNumAllocs; i++) { |
646 | char *p = allocated[i % kNumAllocs]; |
647 | CHECK(a.PointerIsMine(p)); |
648 | CHECK(a.PointerIsMine(p + 2000)); |
649 | } |
650 | CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs); |
651 | // Deallocate all in reverse order. |
652 | for (int i = 0; i < kNumAllocs; i++) { |
653 | int idx = kNumAllocs - i - 1; |
654 | char *p = allocated[idx]; |
655 | uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p)); |
656 | CHECK_EQ(*meta, idx); |
657 | CHECK(a.PointerIsMine(p)); |
658 | a.Deallocate(stat: &stats, p); |
659 | } |
660 | CHECK_EQ(a.TotalMemoryUsed(), 0); |
661 | |
662 | // Test alignments. Test with 512MB alignment on x64 non-Windows machines. |
663 | // Windows doesn't overcommit, and many machines do not have 51.2GB of swap. |
664 | uptr max_alignment = |
665 | (SANITIZER_WORDSIZE == 64 && !SANITIZER_WINDOWS) ? (1 << 28) : (1 << 24); |
666 | for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) { |
667 | const uptr kNumAlignedAllocs = 100; |
668 | for (uptr i = 0; i < kNumAlignedAllocs; i++) { |
669 | uptr size = ((i % 10) + 1) * 4096; |
670 | char *p = allocated[i] = (char *)a.Allocate(stat: &stats, size, alignment); |
671 | CHECK_EQ(p, a.GetBlockBegin(p)); |
672 | CHECK_EQ(p, a.GetBlockBegin(p + size - 1)); |
673 | CHECK_EQ(p, a.GetBlockBegin(p + size / 2)); |
674 | CHECK_EQ(0, (uptr)allocated[i] % alignment); |
675 | p[0] = p[size - 1] = 0; |
676 | } |
677 | for (uptr i = 0; i < kNumAlignedAllocs; i++) { |
678 | a.Deallocate(stat: &stats, p: allocated[i]); |
679 | } |
680 | } |
681 | |
682 | // Regression test for boundary condition in GetBlockBegin(). |
683 | uptr page_size = GetPageSizeCached(); |
684 | char *p = (char *)a.Allocate(stat: &stats, size: page_size, alignment: 1); |
685 | CHECK_EQ(p, a.GetBlockBegin(p)); |
686 | CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1)); |
687 | CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size)); |
688 | a.Deallocate(stat: &stats, p); |
689 | } |
690 | |
691 | template <class PrimaryAllocator> |
692 | void TestCombinedAllocator(uptr premapped_heap = 0) { |
693 | typedef CombinedAllocator<PrimaryAllocator> Allocator; |
694 | Allocator *a = new Allocator; |
695 | a->Init(kReleaseToOSIntervalNever, premapped_heap); |
696 | std::mt19937 r; |
697 | |
698 | typename Allocator::AllocatorCache cache; |
699 | memset(&cache, 0, sizeof(cache)); |
700 | a->InitCache(&cache); |
701 | |
702 | EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0); |
703 | EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0); |
704 | EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0); |
705 | EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0); |
706 | EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0); |
707 | EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0); |
708 | |
709 | const uptr kNumAllocs = 100000; |
710 | const uptr kNumIter = 10; |
711 | for (uptr iter = 0; iter < kNumIter; iter++) { |
712 | std::vector<void*> allocated; |
713 | for (uptr i = 0; i < kNumAllocs; i++) { |
714 | uptr size = (i % (1 << 14)) + 1; |
715 | if ((i % 1024) == 0) |
716 | size = 1 << (10 + (i % 14)); |
717 | void *x = a->Allocate(&cache, size, 1); |
718 | uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x)); |
719 | CHECK_EQ(*meta, 0); |
720 | *meta = size; |
721 | allocated.push_back(x); |
722 | } |
723 | |
724 | std::shuffle(allocated.begin(), allocated.end(), r); |
725 | |
726 | // Test ForEachChunk(...) |
727 | { |
728 | std::set<void *> reported_chunks; |
729 | auto cb = [](uptr chunk, void *arg) { |
730 | auto reported_chunks_ptr = reinterpret_cast<std::set<void *> *>(arg); |
731 | auto pair = |
732 | reported_chunks_ptr->insert(reinterpret_cast<void *>(chunk)); |
733 | // Check chunk is never reported more than once. |
734 | ASSERT_TRUE(pair.second); |
735 | }; |
736 | a->ForEachChunk(cb, reinterpret_cast<void *>(&reported_chunks)); |
737 | for (const auto &allocated_ptr : allocated) { |
738 | ASSERT_NE(reported_chunks.find(allocated_ptr), reported_chunks.end()); |
739 | } |
740 | } |
741 | |
742 | for (uptr i = 0; i < kNumAllocs; i++) { |
743 | void *x = allocated[i]; |
744 | uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x)); |
745 | CHECK_NE(*meta, 0); |
746 | CHECK(a->PointerIsMine(x)); |
747 | *meta = 0; |
748 | a->Deallocate(&cache, x); |
749 | } |
750 | allocated.clear(); |
751 | a->SwallowCache(&cache); |
752 | } |
753 | a->DestroyCache(&cache); |
754 | a->TestOnlyUnmap(); |
755 | } |
756 | |
757 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
758 | TEST(SanitizerCommon, CombinedAllocator64) { |
759 | TestCombinedAllocator<Allocator64>(); |
760 | } |
761 | |
762 | TEST(SanitizerCommon, CombinedAllocator64Dynamic) { |
763 | TestCombinedAllocator<Allocator64Dynamic>(); |
764 | } |
765 | |
766 | #if !ALLOCATOR64_SMALL_SIZE |
767 | #if !SANITIZER_WINDOWS |
768 | // Windows fails to map 1TB, so disable this test. |
769 | TEST(SanitizerCommon, CombinedAllocator64DynamicPremapped) { |
770 | ScopedPremappedHeap h; |
771 | TestCombinedAllocator<Allocator64Dynamic>(premapped_heap: h.Addr()); |
772 | } |
773 | #endif |
774 | |
775 | TEST(SanitizerCommon, CombinedAllocator64Compact) { |
776 | TestCombinedAllocator<Allocator64Compact>(); |
777 | } |
778 | #endif |
779 | |
780 | TEST(SanitizerCommon, CombinedAllocator64VeryCompact) { |
781 | TestCombinedAllocator<Allocator64VeryCompact>(); |
782 | } |
783 | #endif |
784 | |
785 | TEST(SanitizerCommon, SKIP_ON_SPARCV9(CombinedAllocator32Compact)) { |
786 | TestCombinedAllocator<Allocator32Compact>(); |
787 | } |
788 | |
789 | template <class Allocator> |
790 | void TestSizeClassAllocatorLocalCache(uptr premapped_heap = 0) { |
791 | using AllocatorCache = typename Allocator::AllocatorCache; |
792 | AllocatorCache cache; |
793 | Allocator *a = new Allocator(); |
794 | |
795 | a->Init(kReleaseToOSIntervalNever, premapped_heap); |
796 | memset(&cache, 0, sizeof(cache)); |
797 | cache.Init(0); |
798 | |
799 | const uptr kNumAllocs = 10000; |
800 | const int kNumIter = 100; |
801 | uptr saved_total = 0; |
802 | for (int class_id = 1; class_id <= 5; class_id++) { |
803 | for (int it = 0; it < kNumIter; it++) { |
804 | void *allocated[kNumAllocs]; |
805 | for (uptr i = 0; i < kNumAllocs; i++) { |
806 | allocated[i] = cache.Allocate(a, class_id); |
807 | } |
808 | for (uptr i = 0; i < kNumAllocs; i++) { |
809 | cache.Deallocate(a, class_id, allocated[i]); |
810 | } |
811 | cache.Drain(a); |
812 | uptr total_allocated = a->TotalMemoryUsed(); |
813 | if (it) |
814 | CHECK_EQ(saved_total, total_allocated); |
815 | saved_total = total_allocated; |
816 | } |
817 | } |
818 | |
819 | a->TestOnlyUnmap(); |
820 | delete a; |
821 | } |
822 | |
823 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
824 | // These tests can fail on Windows if memory is somewhat full and lit happens |
825 | // to run them all at the same time. FIXME: Make them not flaky and reenable. |
826 | #if !SANITIZER_WINDOWS |
827 | TEST(SanitizerCommon, SizeClassAllocator64LocalCache) { |
828 | TestSizeClassAllocatorLocalCache<Allocator64>(); |
829 | } |
830 | |
831 | TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) { |
832 | TestSizeClassAllocatorLocalCache<Allocator64Dynamic>(); |
833 | } |
834 | |
835 | #if !ALLOCATOR64_SMALL_SIZE |
836 | TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedLocalCache) { |
837 | ScopedPremappedHeap h; |
838 | TestSizeClassAllocatorLocalCache<Allocator64Dynamic>(premapped_heap: h.Addr()); |
839 | } |
840 | |
841 | TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) { |
842 | TestSizeClassAllocatorLocalCache<Allocator64Compact>(); |
843 | } |
844 | #endif |
845 | TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) { |
846 | TestSizeClassAllocatorLocalCache<Allocator64VeryCompact>(); |
847 | } |
848 | #endif |
849 | #endif |
850 | |
851 | TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) { |
852 | TestSizeClassAllocatorLocalCache<Allocator32Compact>(); |
853 | } |
854 | |
855 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
856 | typedef Allocator64::AllocatorCache AllocatorCache; |
857 | static AllocatorCache static_allocator_cache; |
858 | |
859 | void *AllocatorLeakTestWorker(void *arg) { |
860 | typedef AllocatorCache::Allocator Allocator; |
861 | Allocator *a = (Allocator*)(arg); |
862 | static_allocator_cache.Allocate(allocator: a, class_id: 10); |
863 | static_allocator_cache.Drain(allocator: a); |
864 | return 0; |
865 | } |
866 | |
867 | TEST(SanitizerCommon, AllocatorLeakTest) { |
868 | typedef AllocatorCache::Allocator Allocator; |
869 | Allocator a; |
870 | a.Init(release_to_os_interval_ms: kReleaseToOSIntervalNever); |
871 | uptr total_used_memory = 0; |
872 | for (int i = 0; i < 100; i++) { |
873 | pthread_t t; |
874 | PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a); |
875 | PTHREAD_JOIN(t, 0); |
876 | if (i == 0) |
877 | total_used_memory = a.TotalMemoryUsed(); |
878 | EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory); |
879 | } |
880 | |
881 | a.TestOnlyUnmap(); |
882 | } |
883 | |
884 | // Struct which is allocated to pass info to new threads. The new thread frees |
885 | // it. |
886 | struct NewThreadParams { |
887 | AllocatorCache *thread_cache; |
888 | AllocatorCache::Allocator *allocator; |
889 | uptr class_id; |
890 | }; |
891 | |
892 | // Called in a new thread. Just frees its argument. |
893 | static void *DeallocNewThreadWorker(void *arg) { |
894 | NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg); |
895 | params->thread_cache->Deallocate(allocator: params->allocator, class_id: params->class_id, p: params); |
896 | return NULL; |
897 | } |
898 | |
899 | // The allocator cache is supposed to be POD and zero initialized. We should be |
900 | // able to call Deallocate on a zeroed cache, and it will self-initialize. |
901 | TEST(Allocator, AllocatorCacheDeallocNewThread) { |
902 | AllocatorCache::Allocator allocator; |
903 | allocator.Init(release_to_os_interval_ms: kReleaseToOSIntervalNever); |
904 | AllocatorCache main_cache; |
905 | AllocatorCache child_cache; |
906 | memset(&main_cache, 0, sizeof(main_cache)); |
907 | memset(&child_cache, 0, sizeof(child_cache)); |
908 | |
909 | uptr class_id = DefaultSizeClassMap::ClassID(size: sizeof(NewThreadParams)); |
910 | NewThreadParams *params = reinterpret_cast<NewThreadParams*>( |
911 | main_cache.Allocate(allocator: &allocator, class_id)); |
912 | params->thread_cache = &child_cache; |
913 | params->allocator = &allocator; |
914 | params->class_id = class_id; |
915 | pthread_t t; |
916 | PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params); |
917 | PTHREAD_JOIN(t, 0); |
918 | |
919 | allocator.TestOnlyUnmap(); |
920 | } |
921 | #endif |
922 | |
923 | TEST(Allocator, Basic) { |
924 | char *p = (char*)InternalAlloc(size: 10); |
925 | EXPECT_NE(p, (char*)0); |
926 | char *p2 = (char*)InternalAlloc(size: 20); |
927 | EXPECT_NE(p2, (char*)0); |
928 | EXPECT_NE(p2, p); |
929 | InternalFree(p); |
930 | InternalFree(p: p2); |
931 | } |
932 | |
933 | TEST(Allocator, Stress) { |
934 | const int kCount = 1000; |
935 | char *ptrs[kCount]; |
936 | unsigned rnd = 42; |
937 | for (int i = 0; i < kCount; i++) { |
938 | uptr sz = my_rand_r(state: &rnd) % 1000; |
939 | char *p = (char*)InternalAlloc(size: sz); |
940 | EXPECT_NE(p, (char*)0); |
941 | ptrs[i] = p; |
942 | } |
943 | for (int i = 0; i < kCount; i++) { |
944 | InternalFree(p: ptrs[i]); |
945 | } |
946 | } |
947 | |
948 | TEST(Allocator, LargeAlloc) { |
949 | void *p = InternalAlloc(size: 10 << 20); |
950 | InternalFree(p); |
951 | } |
952 | |
953 | TEST(Allocator, ScopedBuffer) { |
954 | const int kSize = 512; |
955 | { |
956 | InternalMmapVector<int> int_buf(kSize); |
957 | EXPECT_EQ((uptr)kSize, int_buf.size()); |
958 | } |
959 | InternalMmapVector<char> char_buf(kSize); |
960 | EXPECT_EQ((uptr)kSize, char_buf.size()); |
961 | internal_memset(s: char_buf.data(), c: 'c', n: kSize); |
962 | for (int i = 0; i < kSize; i++) { |
963 | EXPECT_EQ('c', char_buf[i]); |
964 | } |
965 | } |
966 | |
967 | void IterationTestCallback(uptr chunk, void *arg) { |
968 | reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk); |
969 | } |
970 | |
971 | template <class Allocator> |
972 | void TestSizeClassAllocatorIteration(uptr premapped_heap = 0) { |
973 | Allocator *a = new Allocator; |
974 | a->Init(kReleaseToOSIntervalNever, premapped_heap); |
975 | typename Allocator::AllocatorCache cache; |
976 | memset(&cache, 0, sizeof(cache)); |
977 | cache.Init(0); |
978 | |
979 | static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000, |
980 | 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000}; |
981 | |
982 | std::vector<void *> allocated; |
983 | |
984 | // Allocate a bunch of chunks. |
985 | for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) { |
986 | uptr size = sizes[s]; |
987 | if (!a->CanAllocate(size, 1)) continue; |
988 | // printf("s = %ld\n", size); |
989 | uptr n_iter = std::max((uptr)6, 80000 / size); |
990 | // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter); |
991 | for (uptr j = 0; j < n_iter; j++) { |
992 | uptr class_id0 = Allocator::SizeClassMapT::ClassID(size); |
993 | void *x = cache.Allocate(a, class_id0); |
994 | allocated.push_back(x); |
995 | } |
996 | } |
997 | |
998 | std::set<uptr> reported_chunks; |
999 | a->ForceLock(); |
1000 | a->ForEachChunk(IterationTestCallback, &reported_chunks); |
1001 | a->ForceUnlock(); |
1002 | |
1003 | for (uptr i = 0; i < allocated.size(); i++) { |
1004 | // Don't use EXPECT_NE. Reporting the first mismatch is enough. |
1005 | ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])), |
1006 | reported_chunks.end()); |
1007 | } |
1008 | |
1009 | a->TestOnlyUnmap(); |
1010 | delete a; |
1011 | } |
1012 | |
1013 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
1014 | // These tests can fail on Windows if memory is somewhat full and lit happens |
1015 | // to run them all at the same time. FIXME: Make them not flaky and reenable. |
1016 | #if !SANITIZER_WINDOWS |
1017 | TEST(SanitizerCommon, SizeClassAllocator64Iteration) { |
1018 | TestSizeClassAllocatorIteration<Allocator64>(); |
1019 | } |
1020 | TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) { |
1021 | TestSizeClassAllocatorIteration<Allocator64Dynamic>(); |
1022 | } |
1023 | #if !ALLOCATOR64_SMALL_SIZE |
1024 | TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedIteration) { |
1025 | ScopedPremappedHeap h; |
1026 | TestSizeClassAllocatorIteration<Allocator64Dynamic>(premapped_heap: h.Addr()); |
1027 | } |
1028 | #endif |
1029 | #endif |
1030 | #endif |
1031 | |
1032 | TEST(SanitizerCommon, SKIP_ON_SPARCV9(SizeClassAllocator32Iteration)) { |
1033 | TestSizeClassAllocatorIteration<Allocator32Compact>(); |
1034 | } |
1035 | |
1036 | TEST(SanitizerCommon, LargeMmapAllocatorIteration) { |
1037 | LargeMmapAllocator<NoOpMapUnmapCallback> a; |
1038 | a.Init(); |
1039 | AllocatorStats stats; |
1040 | stats.Init(); |
1041 | |
1042 | static const uptr kNumAllocs = 1000; |
1043 | char *allocated[kNumAllocs]; |
1044 | static const uptr size = 40; |
1045 | // Allocate some. |
1046 | for (uptr i = 0; i < kNumAllocs; i++) |
1047 | allocated[i] = (char *)a.Allocate(stat: &stats, size, alignment: 1); |
1048 | |
1049 | std::set<uptr> reported_chunks; |
1050 | a.ForceLock(); |
1051 | a.ForEachChunk(IterationTestCallback, &reported_chunks); |
1052 | a.ForceUnlock(); |
1053 | |
1054 | for (uptr i = 0; i < kNumAllocs; i++) { |
1055 | // Don't use EXPECT_NE. Reporting the first mismatch is enough. |
1056 | ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])), |
1057 | reported_chunks.end()); |
1058 | } |
1059 | for (uptr i = 0; i < kNumAllocs; i++) |
1060 | a.Deallocate(stat: &stats, p: allocated[i]); |
1061 | } |
1062 | |
1063 | TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) { |
1064 | LargeMmapAllocator<NoOpMapUnmapCallback> a; |
1065 | a.Init(); |
1066 | AllocatorStats stats; |
1067 | stats.Init(); |
1068 | |
1069 | static const uptr kNumAllocs = 1024; |
1070 | static const uptr kNumExpectedFalseLookups = 10000000; |
1071 | char *allocated[kNumAllocs]; |
1072 | static const uptr size = 4096; |
1073 | // Allocate some. |
1074 | for (uptr i = 0; i < kNumAllocs; i++) { |
1075 | allocated[i] = (char *)a.Allocate(stat: &stats, size, alignment: 1); |
1076 | } |
1077 | |
1078 | a.ForceLock(); |
1079 | for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) { |
1080 | // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i); |
1081 | char *p1 = allocated[i % kNumAllocs]; |
1082 | EXPECT_EQ(p1, a.GetBlockBeginFastLocked(ptr: p1)); |
1083 | EXPECT_EQ(p1, a.GetBlockBeginFastLocked(ptr: p1 + size / 2)); |
1084 | EXPECT_EQ(p1, a.GetBlockBeginFastLocked(ptr: p1 + size - 1)); |
1085 | EXPECT_EQ(p1, a.GetBlockBeginFastLocked(ptr: p1 - 100)); |
1086 | } |
1087 | |
1088 | for (uptr i = 0; i < kNumExpectedFalseLookups; i++) { |
1089 | void *p = reinterpret_cast<void *>(i % 1024); |
1090 | EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(ptr: p)); |
1091 | p = reinterpret_cast<void *>(~0L - (i % 1024)); |
1092 | EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(ptr: p)); |
1093 | } |
1094 | a.ForceUnlock(); |
1095 | |
1096 | for (uptr i = 0; i < kNumAllocs; i++) |
1097 | a.Deallocate(stat: &stats, p: allocated[i]); |
1098 | } |
1099 | |
1100 | |
1101 | // Don't test OOM conditions on Win64 because it causes other tests on the same |
1102 | // machine to OOM. |
1103 | #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !ALLOCATOR64_SMALL_SIZE |
1104 | typedef __sanitizer::SizeClassMap<2, 22, 22, 34, 128, 16> SpecialSizeClassMap; |
1105 | template <typename AddressSpaceViewTy = LocalAddressSpaceView> |
1106 | struct AP64_SpecialSizeClassMap { |
1107 | static const uptr kSpaceBeg = kAllocatorSpace; |
1108 | static const uptr kSpaceSize = kAllocatorSize; |
1109 | static const uptr kMetadataSize = 0; |
1110 | typedef SpecialSizeClassMap SizeClassMap; |
1111 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
1112 | static const uptr kFlags = 0; |
1113 | using AddressSpaceView = AddressSpaceViewTy; |
1114 | }; |
1115 | |
1116 | // Regression test for out-of-memory condition in PopulateFreeList(). |
1117 | TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) { |
1118 | // In a world where regions are small and chunks are huge... |
1119 | typedef SizeClassAllocator64<AP64_SpecialSizeClassMap<>> SpecialAllocator64; |
1120 | const uptr kRegionSize = |
1121 | kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded; |
1122 | SpecialAllocator64 *a = new SpecialAllocator64; |
1123 | a->Init(release_to_os_interval_ms: kReleaseToOSIntervalNever); |
1124 | SpecialAllocator64::AllocatorCache cache; |
1125 | memset(&cache, 0, sizeof(cache)); |
1126 | cache.Init(s: 0); |
1127 | |
1128 | // ...one man is on a mission to overflow a region with a series of |
1129 | // successive allocations. |
1130 | |
1131 | const uptr kClassID = 24; |
1132 | const uptr kAllocationSize = SpecialSizeClassMap::Size(class_id: kClassID); |
1133 | ASSERT_LT(2 * kAllocationSize, kRegionSize); |
1134 | ASSERT_GT(3 * kAllocationSize, kRegionSize); |
1135 | EXPECT_NE(cache.Allocate(allocator: a, class_id: kClassID), nullptr); |
1136 | EXPECT_NE(cache.Allocate(allocator: a, class_id: kClassID), nullptr); |
1137 | EXPECT_EQ(cache.Allocate(allocator: a, class_id: kClassID), nullptr); |
1138 | |
1139 | const uptr Class2 = 21; |
1140 | const uptr Size2 = SpecialSizeClassMap::Size(class_id: Class2); |
1141 | ASSERT_EQ(Size2 * 8, kRegionSize); |
1142 | char *p[7]; |
1143 | for (int i = 0; i < 7; i++) { |
1144 | p[i] = (char*)cache.Allocate(allocator: a, class_id: Class2); |
1145 | EXPECT_NE(p[i], nullptr); |
1146 | fprintf(stderr, format: "p[%d] %p s = %lx\n" , i, (void*)p[i], Size2); |
1147 | p[i][Size2 - 1] = 42; |
1148 | if (i) ASSERT_LT(p[i - 1], p[i]); |
1149 | } |
1150 | EXPECT_EQ(cache.Allocate(allocator: a, class_id: Class2), nullptr); |
1151 | cache.Deallocate(allocator: a, class_id: Class2, p: p[0]); |
1152 | cache.Drain(allocator: a); |
1153 | ASSERT_EQ(p[6][Size2 - 1], 42); |
1154 | a->TestOnlyUnmap(); |
1155 | delete a; |
1156 | } |
1157 | |
1158 | #endif |
1159 | |
1160 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
1161 | |
1162 | class NoMemoryMapper { |
1163 | public: |
1164 | uptr last_request_buffer_size = 0; |
1165 | |
1166 | u64 *MapPackedCounterArrayBuffer(uptr buffer_size) { |
1167 | last_request_buffer_size = buffer_size * sizeof(u64); |
1168 | return nullptr; |
1169 | } |
1170 | }; |
1171 | |
1172 | class RedZoneMemoryMapper { |
1173 | public: |
1174 | RedZoneMemoryMapper() { |
1175 | const auto page_size = GetPageSize(); |
1176 | buffer = MmapOrDie(size: 3ULL * page_size, mem_type: "" ); |
1177 | MprotectNoAccess(addr: reinterpret_cast<uptr>(buffer), size: page_size); |
1178 | MprotectNoAccess(addr: reinterpret_cast<uptr>(buffer) + page_size * 2, size: page_size); |
1179 | } |
1180 | ~RedZoneMemoryMapper() { UnmapOrDie(addr: buffer, size: 3 * GetPageSize()); } |
1181 | |
1182 | u64 *MapPackedCounterArrayBuffer(uptr buffer_size) { |
1183 | buffer_size *= sizeof(u64); |
1184 | const auto page_size = GetPageSize(); |
1185 | CHECK_EQ(buffer_size, page_size); |
1186 | u64 *p = |
1187 | reinterpret_cast<u64 *>(reinterpret_cast<uptr>(buffer) + page_size); |
1188 | memset(p, 0, page_size); |
1189 | return p; |
1190 | } |
1191 | |
1192 | private: |
1193 | void *buffer; |
1194 | }; |
1195 | |
1196 | TEST(SanitizerCommon, SizeClassAllocator64PackedCounterArray) { |
1197 | NoMemoryMapper no_memory_mapper; |
1198 | for (int i = 0; i < 64; i++) { |
1199 | // Various valid counter's max values packed into one word. |
1200 | Allocator64::PackedCounterArray counters_2n(1, 1ULL << i, |
1201 | &no_memory_mapper); |
1202 | EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size); |
1203 | |
1204 | // Check the "all bit set" values too. |
1205 | Allocator64::PackedCounterArray counters_2n1_1(1, ~0ULL >> i, |
1206 | &no_memory_mapper); |
1207 | EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size); |
1208 | |
1209 | // Verify the packing ratio, the counter is expected to be packed into the |
1210 | // closest power of 2 bits. |
1211 | Allocator64::PackedCounterArray counters(64, 1ULL << i, &no_memory_mapper); |
1212 | EXPECT_EQ(8ULL * RoundUpToPowerOfTwo(size: i + 1), |
1213 | no_memory_mapper.last_request_buffer_size); |
1214 | } |
1215 | |
1216 | RedZoneMemoryMapper memory_mapper; |
1217 | // Go through 1, 2, 4, 8, .. 64 bits per counter. |
1218 | for (int i = 0; i < 7; i++) { |
1219 | // Make sure counters request one memory page for the buffer. |
1220 | const u64 kNumCounters = (GetPageSize() / 8) * (64 >> i); |
1221 | Allocator64::PackedCounterArray counters( |
1222 | kNumCounters, 1ULL << ((1 << i) - 1), &memory_mapper); |
1223 | counters.Inc(i: 0); |
1224 | for (u64 c = 1; c < kNumCounters - 1; c++) { |
1225 | ASSERT_EQ(0ULL, counters.Get(i: c)); |
1226 | counters.Inc(i: c); |
1227 | ASSERT_EQ(1ULL, counters.Get(i: c - 1)); |
1228 | } |
1229 | ASSERT_EQ(0ULL, counters.Get(i: kNumCounters - 1)); |
1230 | counters.Inc(i: kNumCounters - 1); |
1231 | |
1232 | if (i > 0) { |
1233 | counters.IncRange(from: 0, to: kNumCounters - 1); |
1234 | for (u64 c = 0; c < kNumCounters; c++) |
1235 | ASSERT_EQ(2ULL, counters.Get(i: c)); |
1236 | } |
1237 | } |
1238 | } |
1239 | |
1240 | class RangeRecorder { |
1241 | public: |
1242 | std::string reported_pages; |
1243 | |
1244 | RangeRecorder() |
1245 | : page_size_scaled_log( |
1246 | Log2(x: GetPageSizeCached() >> Allocator64::kCompactPtrScale)), |
1247 | last_page_reported(0) {} |
1248 | |
1249 | void (u32 class_id, u32 from, u32 to) { |
1250 | from >>= page_size_scaled_log; |
1251 | to >>= page_size_scaled_log; |
1252 | ASSERT_LT(from, to); |
1253 | if (!reported_pages.empty()) |
1254 | ASSERT_LT(last_page_reported, from); |
1255 | reported_pages.append(from - last_page_reported, '.'); |
1256 | reported_pages.append(to - from, 'x'); |
1257 | last_page_reported = to; |
1258 | } |
1259 | |
1260 | private: |
1261 | const uptr page_size_scaled_log; |
1262 | u32 last_page_reported; |
1263 | }; |
1264 | |
1265 | TEST(SanitizerCommon, SizeClassAllocator64FreePagesRangeTracker) { |
1266 | typedef Allocator64::FreePagesRangeTracker<RangeRecorder> RangeTracker; |
1267 | |
1268 | // 'x' denotes a page to be released, '.' denotes a page to be kept around. |
1269 | const char* test_cases[] = { |
1270 | "" , |
1271 | "." , |
1272 | "x" , |
1273 | "........" , |
1274 | "xxxxxxxxxxx" , |
1275 | "..............xxxxx" , |
1276 | "xxxxxxxxxxxxxxxxxx....." , |
1277 | "......xxxxxxxx........" , |
1278 | "xxx..........xxxxxxxxxxxxxxx" , |
1279 | "......xxxx....xxxx........" , |
1280 | "xxx..........xxxxxxxx....xxxxxxx" , |
1281 | "x.x.x.x.x.x.x.x.x.x.x.x." , |
1282 | ".x.x.x.x.x.x.x.x.x.x.x.x" , |
1283 | ".x.x.x.x.x.x.x.x.x.x.x.x." , |
1284 | "x.x.x.x.x.x.x.x.x.x.x.x.x" , |
1285 | }; |
1286 | |
1287 | for (auto test_case : test_cases) { |
1288 | RangeRecorder range_recorder; |
1289 | RangeTracker tracker(&range_recorder, 1); |
1290 | for (int i = 0; test_case[i] != 0; i++) |
1291 | tracker.NextPage(freed: test_case[i] == 'x'); |
1292 | tracker.Done(); |
1293 | // Strip trailing '.'-pages before comparing the results as they are not |
1294 | // going to be reported to range_recorder anyway. |
1295 | const char* last_x = strrchr(test_case, 'x'); |
1296 | std::string expected( |
1297 | test_case, |
1298 | last_x == nullptr ? 0 : (last_x - test_case + 1)); |
1299 | EXPECT_STREQ(expected.c_str(), range_recorder.reported_pages.c_str()); |
1300 | } |
1301 | } |
1302 | |
1303 | class ReleasedPagesTrackingMemoryMapper { |
1304 | public: |
1305 | std::set<u32> reported_pages; |
1306 | std::vector<u64> buffer; |
1307 | |
1308 | u64 *MapPackedCounterArrayBuffer(uptr buffer_size) { |
1309 | reported_pages.clear(); |
1310 | buffer.assign(buffer_size, 0); |
1311 | return buffer.data(); |
1312 | } |
1313 | void (u32 class_id, u32 from, u32 to) { |
1314 | uptr page_size_scaled = |
1315 | GetPageSizeCached() >> Allocator64::kCompactPtrScale; |
1316 | for (u32 i = from; i < to; i += page_size_scaled) |
1317 | reported_pages.insert(i); |
1318 | } |
1319 | }; |
1320 | |
1321 | template <class Allocator> |
1322 | void TestReleaseFreeMemoryToOS() { |
1323 | ReleasedPagesTrackingMemoryMapper memory_mapper; |
1324 | const uptr kAllocatedPagesCount = 1024; |
1325 | const uptr page_size = GetPageSizeCached(); |
1326 | const uptr page_size_scaled = page_size >> Allocator::kCompactPtrScale; |
1327 | std::mt19937 r; |
1328 | uint32_t rnd_state = 42; |
1329 | |
1330 | for (uptr class_id = 1; class_id <= Allocator::SizeClassMapT::kLargestClassID; |
1331 | class_id++) { |
1332 | const uptr chunk_size = Allocator::SizeClassMapT::Size(class_id); |
1333 | const uptr chunk_size_scaled = chunk_size >> Allocator::kCompactPtrScale; |
1334 | const uptr max_chunks = |
1335 | kAllocatedPagesCount * GetPageSizeCached() / chunk_size; |
1336 | |
1337 | // Generate the random free list. |
1338 | std::vector<u32> free_array; |
1339 | bool in_free_range = false; |
1340 | uptr current_range_end = 0; |
1341 | for (uptr i = 0; i < max_chunks; i++) { |
1342 | if (i == current_range_end) { |
1343 | in_free_range = (my_rand_r(state: &rnd_state) & 1U) == 1; |
1344 | current_range_end += my_rand_r(state: &rnd_state) % 100 + 1; |
1345 | } |
1346 | if (in_free_range) |
1347 | free_array.push_back(i * chunk_size_scaled); |
1348 | } |
1349 | if (free_array.empty()) |
1350 | continue; |
1351 | // Shuffle free_list to verify that ReleaseFreeMemoryToOS does not depend on |
1352 | // the list ordering. |
1353 | std::shuffle(free_array.begin(), free_array.end(), r); |
1354 | |
1355 | Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(), |
1356 | chunk_size, kAllocatedPagesCount, |
1357 | &memory_mapper, class_id); |
1358 | |
1359 | // Verify that there are no released pages touched by used chunks and all |
1360 | // ranges of free chunks big enough to contain the entire memory pages had |
1361 | // these pages released. |
1362 | uptr verified_released_pages = 0; |
1363 | std::set<u32> free_chunks(free_array.begin(), free_array.end()); |
1364 | |
1365 | u32 current_chunk = 0; |
1366 | in_free_range = false; |
1367 | u32 current_free_range_start = 0; |
1368 | for (uptr i = 0; i <= max_chunks; i++) { |
1369 | bool is_free_chunk = free_chunks.find(current_chunk) != free_chunks.end(); |
1370 | |
1371 | if (is_free_chunk) { |
1372 | if (!in_free_range) { |
1373 | in_free_range = true; |
1374 | current_free_range_start = current_chunk; |
1375 | } |
1376 | } else { |
1377 | // Verify that this used chunk does not touch any released page. |
1378 | for (uptr i_page = current_chunk / page_size_scaled; |
1379 | i_page <= (current_chunk + chunk_size_scaled - 1) / |
1380 | page_size_scaled; |
1381 | i_page++) { |
1382 | bool page_released = |
1383 | memory_mapper.reported_pages.find(i_page * page_size_scaled) != |
1384 | memory_mapper.reported_pages.end(); |
1385 | ASSERT_EQ(false, page_released); |
1386 | } |
1387 | |
1388 | if (in_free_range) { |
1389 | in_free_range = false; |
1390 | // Verify that all entire memory pages covered by this range of free |
1391 | // chunks were released. |
1392 | u32 page = RoundUpTo(size: current_free_range_start, boundary: page_size_scaled); |
1393 | while (page + page_size_scaled <= current_chunk) { |
1394 | bool page_released = |
1395 | memory_mapper.reported_pages.find(page) != |
1396 | memory_mapper.reported_pages.end(); |
1397 | ASSERT_EQ(true, page_released); |
1398 | verified_released_pages++; |
1399 | page += page_size_scaled; |
1400 | } |
1401 | } |
1402 | } |
1403 | |
1404 | current_chunk += chunk_size_scaled; |
1405 | } |
1406 | |
1407 | ASSERT_EQ(memory_mapper.reported_pages.size(), verified_released_pages); |
1408 | } |
1409 | } |
1410 | |
1411 | TEST(SanitizerCommon, SizeClassAllocator64ReleaseFreeMemoryToOS) { |
1412 | TestReleaseFreeMemoryToOS<Allocator64>(); |
1413 | } |
1414 | |
1415 | #if !ALLOCATOR64_SMALL_SIZE |
1416 | TEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) { |
1417 | TestReleaseFreeMemoryToOS<Allocator64Compact>(); |
1418 | } |
1419 | |
1420 | TEST(SanitizerCommon, SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS) { |
1421 | TestReleaseFreeMemoryToOS<Allocator64VeryCompact>(); |
1422 | } |
1423 | #endif // !ALLOCATOR64_SMALL_SIZE |
1424 | |
1425 | #endif // SANITIZER_CAN_USE_ALLOCATOR64 |
1426 | |
1427 | TEST(SanitizerCommon, LowLevelAllocatorShouldRoundUpSizeOnAlloc) { |
1428 | // When allocating a memory block slightly bigger than a memory page and |
1429 | // LowLevelAllocator calls MmapOrDie for the internal buffer, it should round |
1430 | // the size up to the page size, so that subsequent calls to the allocator |
1431 | // can use the remaining space in the last allocated page. |
1432 | static LowLevelAllocator allocator; |
1433 | char *ptr1 = (char *)allocator.Allocate(size: GetPageSizeCached() + 16); |
1434 | char *ptr2 = (char *)allocator.Allocate(size: 16); |
1435 | EXPECT_EQ(ptr2, ptr1 + GetPageSizeCached() + 16); |
1436 | } |
1437 | |
1438 | #endif // #if !SANITIZER_DEBUG |
1439 | |