1 | //===-- sanitizer_allocator_test.cpp --------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of ThreadSanitizer/AddressSanitizer runtime. |
10 | // Tests for sanitizer_allocator.h. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | #include "sanitizer_common/sanitizer_allocator.h" |
14 | |
15 | #include <stdio.h> |
16 | #include <stdlib.h> |
17 | |
18 | #include <algorithm> |
19 | #include <random> |
20 | #include <set> |
21 | #include <vector> |
22 | |
23 | #include "gtest/gtest.h" |
24 | #include "sanitizer_common/sanitizer_allocator_internal.h" |
25 | #include "sanitizer_common/sanitizer_common.h" |
26 | #include "sanitizer_pthread_wrappers.h" |
27 | #include "sanitizer_test_utils.h" |
28 | |
29 | using namespace __sanitizer; |
30 | |
31 | #if SANITIZER_SOLARIS && defined(__sparcv9) |
32 | // FIXME: These tests probably fail because Solaris/sparcv9 uses the full |
33 | // 64-bit address space. Needs more investigation |
34 | #define SKIP_ON_SOLARIS_SPARCV9(x) DISABLED_##x |
35 | #else |
36 | #define SKIP_ON_SOLARIS_SPARCV9(x) x |
37 | #endif |
38 | |
39 | // On 64-bit systems with small virtual address spaces (e.g. 39-bit) we can't |
40 | // use size class maps with a large number of classes, as that will make the |
41 | // SizeClassAllocator64 region size too small (< 2^32). |
42 | #if SANITIZER_ANDROID && defined(__aarch64__) |
43 | #define ALLOCATOR64_SMALL_SIZE 1 |
44 | #elif SANITIZER_RISCV64 |
45 | #define ALLOCATOR64_SMALL_SIZE 1 |
46 | #else |
47 | #define ALLOCATOR64_SMALL_SIZE 0 |
48 | #endif |
49 | |
50 | // Too slow for debug build |
51 | #if !SANITIZER_DEBUG |
52 | |
53 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
54 | #if SANITIZER_WINDOWS |
55 | // On Windows 64-bit there is no easy way to find a large enough fixed address |
56 | // space that is always available. Thus, a dynamically allocated address space |
57 | // is used instead (i.e. ~(uptr)0). |
58 | static const uptr kAllocatorSpace = ~(uptr)0; |
59 | static const uptr kAllocatorSize = 0x8000000000ULL; // 500G |
60 | static const u64 kAddressSpaceSize = 1ULL << 47; |
61 | typedef DefaultSizeClassMap SizeClassMap; |
62 | #elif SANITIZER_ANDROID && defined(__aarch64__) |
63 | static const uptr kAllocatorSpace = 0x3000000000ULL; |
64 | static const uptr kAllocatorSize = 0x2000000000ULL; |
65 | static const u64 kAddressSpaceSize = 1ULL << 39; |
66 | typedef VeryCompactSizeClassMap SizeClassMap; |
67 | #elif SANITIZER_RISCV64 |
68 | const uptr kAllocatorSpace = ~(uptr)0; |
69 | const uptr kAllocatorSize = 0x2000000000ULL; // 128G. |
70 | static const u64 kAddressSpaceSize = 1ULL << 38; |
71 | typedef VeryDenseSizeClassMap SizeClassMap; |
72 | #else |
73 | static const uptr kAllocatorSpace = 0x700000000000ULL; |
74 | static const uptr kAllocatorSize = 0x010000000000ULL; // 1T. |
75 | static const u64 kAddressSpaceSize = 1ULL << 47; |
76 | typedef DefaultSizeClassMap SizeClassMap; |
77 | #endif |
78 | |
79 | template <typename AddressSpaceViewTy> |
80 | struct AP64 { // Allocator Params. Short name for shorter demangled names.. |
81 | static const uptr kSpaceBeg = kAllocatorSpace; |
82 | static const uptr kSpaceSize = kAllocatorSize; |
83 | static const uptr kMetadataSize = 16; |
84 | typedef ::SizeClassMap SizeClassMap; |
85 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
86 | static const uptr kFlags = 0; |
87 | using AddressSpaceView = AddressSpaceViewTy; |
88 | }; |
89 | |
90 | template <typename AddressSpaceViewTy> |
91 | struct AP64Dyn { |
92 | static const uptr kSpaceBeg = ~(uptr)0; |
93 | static const uptr kSpaceSize = kAllocatorSize; |
94 | static const uptr kMetadataSize = 16; |
95 | typedef ::SizeClassMap SizeClassMap; |
96 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
97 | static const uptr kFlags = 0; |
98 | using AddressSpaceView = AddressSpaceViewTy; |
99 | }; |
100 | |
101 | template <typename AddressSpaceViewTy> |
102 | struct AP64Compact { |
103 | static const uptr kSpaceBeg = ~(uptr)0; |
104 | static const uptr kSpaceSize = kAllocatorSize; |
105 | static const uptr kMetadataSize = 16; |
106 | typedef CompactSizeClassMap SizeClassMap; |
107 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
108 | static const uptr kFlags = 0; |
109 | using AddressSpaceView = AddressSpaceViewTy; |
110 | }; |
111 | |
112 | template <typename AddressSpaceViewTy> |
113 | struct AP64VeryCompact { |
114 | static const uptr kSpaceBeg = ~(uptr)0; |
115 | static const uptr kSpaceSize = 1ULL << 37; |
116 | static const uptr kMetadataSize = 16; |
117 | typedef VeryCompactSizeClassMap SizeClassMap; |
118 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
119 | static const uptr kFlags = 0; |
120 | using AddressSpaceView = AddressSpaceViewTy; |
121 | }; |
122 | |
123 | template <typename AddressSpaceViewTy> |
124 | struct AP64Dense { |
125 | static const uptr kSpaceBeg = kAllocatorSpace; |
126 | static const uptr kSpaceSize = kAllocatorSize; |
127 | static const uptr kMetadataSize = 16; |
128 | typedef DenseSizeClassMap SizeClassMap; |
129 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
130 | static const uptr kFlags = 0; |
131 | using AddressSpaceView = AddressSpaceViewTy; |
132 | }; |
133 | |
134 | template <typename AddressSpaceView> |
135 | using Allocator64ASVT = SizeClassAllocator64<AP64<AddressSpaceView>>; |
136 | using Allocator64 = Allocator64ASVT<LocalAddressSpaceView>; |
137 | |
138 | template <typename AddressSpaceView> |
139 | using Allocator64DynamicASVT = SizeClassAllocator64<AP64Dyn<AddressSpaceView>>; |
140 | using Allocator64Dynamic = Allocator64DynamicASVT<LocalAddressSpaceView>; |
141 | |
142 | template <typename AddressSpaceView> |
143 | using Allocator64CompactASVT = |
144 | SizeClassAllocator64<AP64Compact<AddressSpaceView>>; |
145 | using Allocator64Compact = Allocator64CompactASVT<LocalAddressSpaceView>; |
146 | |
147 | template <typename AddressSpaceView> |
148 | using Allocator64VeryCompactASVT = |
149 | SizeClassAllocator64<AP64VeryCompact<AddressSpaceView>>; |
150 | using Allocator64VeryCompact = |
151 | Allocator64VeryCompactASVT<LocalAddressSpaceView>; |
152 | |
153 | template <typename AddressSpaceView> |
154 | using Allocator64DenseASVT = SizeClassAllocator64<AP64Dense<AddressSpaceView>>; |
155 | using Allocator64Dense = Allocator64DenseASVT<LocalAddressSpaceView>; |
156 | |
157 | #elif defined(__mips64) |
158 | static const u64 kAddressSpaceSize = 1ULL << 40; |
159 | #elif defined(__aarch64__) |
160 | static const u64 kAddressSpaceSize = 1ULL << 39; |
161 | #elif defined(__s390x__) |
162 | static const u64 kAddressSpaceSize = 1ULL << 53; |
163 | #elif defined(__s390__) |
164 | static const u64 kAddressSpaceSize = 1ULL << 31; |
165 | #else |
166 | static const u64 kAddressSpaceSize = 1ULL << 32; |
167 | #endif |
168 | |
169 | static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24); |
170 | |
171 | template <typename AddressSpaceViewTy> |
172 | struct AP32Compact { |
173 | static const uptr kSpaceBeg = 0; |
174 | static const u64 kSpaceSize = kAddressSpaceSize; |
175 | static const uptr kMetadataSize = 16; |
176 | typedef CompactSizeClassMap SizeClassMap; |
177 | static const uptr kRegionSizeLog = ::kRegionSizeLog; |
178 | using AddressSpaceView = AddressSpaceViewTy; |
179 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
180 | static const uptr kFlags = 0; |
181 | }; |
182 | template <typename AddressSpaceView> |
183 | using Allocator32CompactASVT = |
184 | SizeClassAllocator32<AP32Compact<AddressSpaceView>>; |
185 | using Allocator32Compact = Allocator32CompactASVT<LocalAddressSpaceView>; |
186 | |
187 | template <class SizeClassMap> |
188 | void TestSizeClassMap() { |
189 | typedef SizeClassMap SCMap; |
190 | SCMap::Print(); |
191 | SCMap::Validate(); |
192 | } |
193 | |
194 | TEST(SanitizerCommon, DefaultSizeClassMap) { |
195 | TestSizeClassMap<DefaultSizeClassMap>(); |
196 | } |
197 | |
198 | TEST(SanitizerCommon, CompactSizeClassMap) { |
199 | TestSizeClassMap<CompactSizeClassMap>(); |
200 | } |
201 | |
202 | TEST(SanitizerCommon, VeryCompactSizeClassMap) { |
203 | TestSizeClassMap<VeryCompactSizeClassMap>(); |
204 | } |
205 | |
206 | TEST(SanitizerCommon, InternalSizeClassMap) { |
207 | TestSizeClassMap<InternalSizeClassMap>(); |
208 | } |
209 | |
210 | TEST(SanitizerCommon, DenseSizeClassMap) { |
211 | TestSizeClassMap<VeryCompactSizeClassMap>(); |
212 | } |
213 | |
214 | template <class Allocator> |
215 | void TestSizeClassAllocator(uptr premapped_heap = 0) { |
216 | Allocator *a = new Allocator; |
217 | a->Init(kReleaseToOSIntervalNever, premapped_heap); |
218 | typename Allocator::AllocatorCache cache; |
219 | memset(&cache, 0, sizeof(cache)); |
220 | cache.Init(0); |
221 | |
222 | static const uptr sizes[] = { |
223 | 1, 16, 30, 40, 100, 1000, 10000, |
224 | 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000 |
225 | }; |
226 | |
227 | std::vector<void *> allocated; |
228 | |
229 | uptr last_total_allocated = 0; |
230 | for (int i = 0; i < 3; i++) { |
231 | // Allocate a bunch of chunks. |
232 | for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) { |
233 | uptr size = sizes[s]; |
234 | if (!a->CanAllocate(size, 1)) continue; |
235 | // printf("s = %ld\n", size); |
236 | uptr n_iter = std::max((uptr)6, 4000000 / size); |
237 | // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter); |
238 | for (uptr i = 0; i < n_iter; i++) { |
239 | uptr class_id0 = Allocator::SizeClassMapT::ClassID(size); |
240 | char *x = (char*)cache.Allocate(a, class_id0); |
241 | x[0] = 0; |
242 | x[size - 1] = 0; |
243 | x[size / 2] = 0; |
244 | allocated.push_back(x); |
245 | CHECK_EQ(x, a->GetBlockBegin(x)); |
246 | CHECK_EQ(x, a->GetBlockBegin(x + size - 1)); |
247 | CHECK(a->PointerIsMine(x)); |
248 | CHECK(a->PointerIsMine(x + size - 1)); |
249 | CHECK(a->PointerIsMine(x + size / 2)); |
250 | CHECK_GE(a->GetActuallyAllocatedSize(x), size); |
251 | uptr class_id = a->GetSizeClass(x); |
252 | CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size)); |
253 | uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x)); |
254 | metadata[0] = reinterpret_cast<uptr>(x) + 1; |
255 | metadata[1] = 0xABCD; |
256 | } |
257 | } |
258 | // Deallocate all. |
259 | for (uptr i = 0; i < allocated.size(); i++) { |
260 | void *x = allocated[i]; |
261 | uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x)); |
262 | CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1); |
263 | CHECK_EQ(metadata[1], 0xABCD); |
264 | cache.Deallocate(a, a->GetSizeClass(x), x); |
265 | } |
266 | allocated.clear(); |
267 | uptr total_allocated = a->TotalMemoryUsed(); |
268 | if (last_total_allocated == 0) |
269 | last_total_allocated = total_allocated; |
270 | CHECK_EQ(last_total_allocated, total_allocated); |
271 | } |
272 | |
273 | // Check that GetBlockBegin never crashes. |
274 | for (uptr x = 0, step = kAddressSpaceSize / 100000; |
275 | x < kAddressSpaceSize - step; x += step) |
276 | if (a->PointerIsMine(reinterpret_cast<void *>(x))) |
277 | Ident(a->GetBlockBegin(reinterpret_cast<void *>(x))); |
278 | |
279 | a->TestOnlyUnmap(); |
280 | delete a; |
281 | } |
282 | |
283 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
284 | |
285 | // Allocates kAllocatorSize aligned bytes on construction and frees it on |
286 | // destruction. |
287 | class ScopedPremappedHeap { |
288 | public: |
289 | ScopedPremappedHeap() { |
290 | BasePtr = MmapNoReserveOrDie(size: 2 * kAllocatorSize, mem_type: "preallocated heap" ); |
291 | AlignedAddr = RoundUpTo(size: reinterpret_cast<uptr>(BasePtr), boundary: kAllocatorSize); |
292 | } |
293 | |
294 | ~ScopedPremappedHeap() { UnmapOrDie(addr: BasePtr, size: kAllocatorSize); } |
295 | |
296 | uptr Addr() { return AlignedAddr; } |
297 | |
298 | private: |
299 | void *BasePtr; |
300 | uptr AlignedAddr; |
301 | }; |
302 | |
303 | // These tests can fail on Windows if memory is somewhat full and lit happens |
304 | // to run them all at the same time. FIXME: Make them not flaky and reenable. |
305 | #if !SANITIZER_WINDOWS |
306 | TEST(SanitizerCommon, SizeClassAllocator64) { |
307 | TestSizeClassAllocator<Allocator64>(); |
308 | } |
309 | |
310 | TEST(SanitizerCommon, SizeClassAllocator64Dynamic) { |
311 | TestSizeClassAllocator<Allocator64Dynamic>(); |
312 | } |
313 | |
314 | #if !ALLOCATOR64_SMALL_SIZE |
315 | // Android only has 39-bit address space, so mapping 2 * kAllocatorSize |
316 | // sometimes fails. |
317 | TEST(SanitizerCommon, SizeClassAllocator64DynamicPremapped) { |
318 | ScopedPremappedHeap h; |
319 | TestSizeClassAllocator<Allocator64Dynamic>(premapped_heap: h.Addr()); |
320 | } |
321 | |
322 | TEST(SanitizerCommon, SizeClassAllocator64Compact) { |
323 | TestSizeClassAllocator<Allocator64Compact>(); |
324 | } |
325 | |
326 | TEST(SanitizerCommon, SizeClassAllocator64Dense) { |
327 | TestSizeClassAllocator<Allocator64Dense>(); |
328 | } |
329 | #endif |
330 | |
331 | TEST(SanitizerCommon, SizeClassAllocator64VeryCompact) { |
332 | TestSizeClassAllocator<Allocator64VeryCompact>(); |
333 | } |
334 | #endif |
335 | #endif |
336 | |
337 | TEST(SanitizerCommon, SizeClassAllocator32Compact) { |
338 | TestSizeClassAllocator<Allocator32Compact>(); |
339 | } |
340 | |
341 | template <typename AddressSpaceViewTy> |
342 | struct AP32SeparateBatches { |
343 | static const uptr kSpaceBeg = 0; |
344 | static const u64 kSpaceSize = kAddressSpaceSize; |
345 | static const uptr kMetadataSize = 16; |
346 | typedef DefaultSizeClassMap SizeClassMap; |
347 | static const uptr kRegionSizeLog = ::kRegionSizeLog; |
348 | using AddressSpaceView = AddressSpaceViewTy; |
349 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
350 | static const uptr kFlags = |
351 | SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch; |
352 | }; |
353 | template <typename AddressSpaceView> |
354 | using Allocator32SeparateBatchesASVT = |
355 | SizeClassAllocator32<AP32SeparateBatches<AddressSpaceView>>; |
356 | using Allocator32SeparateBatches = |
357 | Allocator32SeparateBatchesASVT<LocalAddressSpaceView>; |
358 | |
359 | TEST(SanitizerCommon, SizeClassAllocator32SeparateBatches) { |
360 | TestSizeClassAllocator<Allocator32SeparateBatches>(); |
361 | } |
362 | |
363 | template <class Allocator> |
364 | void SizeClassAllocatorMetadataStress(uptr premapped_heap = 0) { |
365 | Allocator *a = new Allocator; |
366 | a->Init(kReleaseToOSIntervalNever, premapped_heap); |
367 | typename Allocator::AllocatorCache cache; |
368 | memset(&cache, 0, sizeof(cache)); |
369 | cache.Init(0); |
370 | |
371 | const uptr kNumAllocs = 1 << 13; |
372 | void *allocated[kNumAllocs]; |
373 | void *meta[kNumAllocs]; |
374 | for (uptr i = 0; i < kNumAllocs; i++) { |
375 | void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1)); |
376 | allocated[i] = x; |
377 | meta[i] = a->GetMetaData(x); |
378 | } |
379 | // Get Metadata kNumAllocs^2 times. |
380 | for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) { |
381 | uptr idx = i % kNumAllocs; |
382 | void *m = a->GetMetaData(allocated[idx]); |
383 | EXPECT_EQ(m, meta[idx]); |
384 | } |
385 | for (uptr i = 0; i < kNumAllocs; i++) { |
386 | cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]); |
387 | } |
388 | |
389 | a->TestOnlyUnmap(); |
390 | delete a; |
391 | } |
392 | |
393 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
394 | // These tests can fail on Windows if memory is somewhat full and lit happens |
395 | // to run them all at the same time. FIXME: Make them not flaky and reenable. |
396 | #if !SANITIZER_WINDOWS |
397 | TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) { |
398 | SizeClassAllocatorMetadataStress<Allocator64>(); |
399 | } |
400 | |
401 | TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) { |
402 | SizeClassAllocatorMetadataStress<Allocator64Dynamic>(); |
403 | } |
404 | |
405 | #if !ALLOCATOR64_SMALL_SIZE |
406 | TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedMetadataStress) { |
407 | ScopedPremappedHeap h; |
408 | SizeClassAllocatorMetadataStress<Allocator64Dynamic>(premapped_heap: h.Addr()); |
409 | } |
410 | |
411 | TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) { |
412 | SizeClassAllocatorMetadataStress<Allocator64Compact>(); |
413 | } |
414 | #endif |
415 | |
416 | #endif |
417 | #endif // SANITIZER_CAN_USE_ALLOCATOR64 |
418 | TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) { |
419 | SizeClassAllocatorMetadataStress<Allocator32Compact>(); |
420 | } |
421 | |
422 | template <class Allocator> |
423 | void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize, |
424 | uptr premapped_heap = 0) { |
425 | Allocator *a = new Allocator; |
426 | a->Init(kReleaseToOSIntervalNever, premapped_heap); |
427 | typename Allocator::AllocatorCache cache; |
428 | memset(&cache, 0, sizeof(cache)); |
429 | cache.Init(0); |
430 | |
431 | uptr max_size_class = Allocator::SizeClassMapT::kLargestClassID; |
432 | uptr size = Allocator::SizeClassMapT::Size(max_size_class); |
433 | // Make sure we correctly compute GetBlockBegin() w/o overflow. |
434 | for (size_t i = 0; i <= TotalSize / size; i++) { |
435 | void *x = cache.Allocate(a, max_size_class); |
436 | void *beg = a->GetBlockBegin(x); |
437 | // if ((i & (i - 1)) == 0) |
438 | // fprintf(stderr, "[%zd] %p %p\n", i, x, beg); |
439 | EXPECT_EQ(x, beg); |
440 | } |
441 | |
442 | a->TestOnlyUnmap(); |
443 | delete a; |
444 | } |
445 | |
446 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
447 | // These tests can fail on Windows if memory is somewhat full and lit happens |
448 | // to run them all at the same time. FIXME: Make them not flaky and reenable. |
449 | #if !SANITIZER_WINDOWS |
450 | TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) { |
451 | SizeClassAllocatorGetBlockBeginStress<Allocator64>( |
452 | 1ULL << (SANITIZER_ANDROID ? 31 : 33)); |
453 | } |
454 | TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) { |
455 | SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>( |
456 | TotalSize: 1ULL << (SANITIZER_ANDROID ? 31 : 33)); |
457 | } |
458 | #if !ALLOCATOR64_SMALL_SIZE |
459 | TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedGetBlockBegin) { |
460 | ScopedPremappedHeap h; |
461 | SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>( |
462 | TotalSize: 1ULL << (SANITIZER_ANDROID ? 31 : 33), premapped_heap: h.Addr()); |
463 | } |
464 | TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) { |
465 | SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(TotalSize: 1ULL << 33); |
466 | } |
467 | #endif |
468 | TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) { |
469 | // Does not have > 4Gb for each class. |
470 | SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(TotalSize: 1ULL << 31); |
471 | } |
472 | TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) { |
473 | SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>(TotalSize: 1ULL << 33); |
474 | } |
475 | #endif |
476 | #endif // SANITIZER_CAN_USE_ALLOCATOR64 |
477 | |
478 | struct TestMapUnmapCallback { |
479 | static int map_count, map_secondary_count, unmap_count; |
480 | void OnMap(uptr p, uptr size) const { map_count++; } |
481 | void OnMapSecondary(uptr p, uptr size, uptr user_begin, |
482 | uptr user_size) const { |
483 | map_secondary_count++; |
484 | } |
485 | void OnUnmap(uptr p, uptr size) const { unmap_count++; } |
486 | |
487 | static void Reset() { map_count = map_secondary_count = unmap_count = 0; } |
488 | }; |
489 | int TestMapUnmapCallback::map_count; |
490 | int TestMapUnmapCallback::map_secondary_count; |
491 | int TestMapUnmapCallback::unmap_count; |
492 | |
493 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
494 | // These tests can fail on Windows if memory is somewhat full and lit happens |
495 | // to run them all at the same time. FIXME: Make them not flaky and reenable. |
496 | #if !SANITIZER_WINDOWS |
497 | |
498 | template <typename AddressSpaceViewTy = LocalAddressSpaceView> |
499 | struct AP64WithCallback { |
500 | static const uptr kSpaceBeg = kAllocatorSpace; |
501 | static const uptr kSpaceSize = kAllocatorSize; |
502 | static const uptr kMetadataSize = 16; |
503 | typedef ::SizeClassMap SizeClassMap; |
504 | typedef TestMapUnmapCallback MapUnmapCallback; |
505 | static const uptr kFlags = 0; |
506 | using AddressSpaceView = AddressSpaceViewTy; |
507 | }; |
508 | |
509 | TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) { |
510 | TestMapUnmapCallback::Reset(); |
511 | typedef SizeClassAllocator64<AP64WithCallback<>> Allocator64WithCallBack; |
512 | Allocator64WithCallBack *a = new Allocator64WithCallBack; |
513 | a->Init(release_to_os_interval_ms: kReleaseToOSIntervalNever); |
514 | EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state. |
515 | EXPECT_EQ(TestMapUnmapCallback::map_secondary_count, 0); |
516 | typename Allocator64WithCallBack::AllocatorCache cache; |
517 | memset(&cache, 0, sizeof(cache)); |
518 | cache.Init(s: 0); |
519 | AllocatorStats stats; |
520 | stats.Init(); |
521 | const size_t kNumChunks = 128; |
522 | uint32_t chunks[kNumChunks]; |
523 | a->GetFromAllocator(stat: &stats, class_id: 30, chunks, n_chunks: kNumChunks); |
524 | // State + alloc + metadata + freearray. |
525 | EXPECT_EQ(TestMapUnmapCallback::map_count, 4); |
526 | EXPECT_EQ(TestMapUnmapCallback::map_secondary_count, 0); |
527 | a->TestOnlyUnmap(); |
528 | EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing. |
529 | delete a; |
530 | } |
531 | #endif |
532 | #endif |
533 | |
534 | template <typename AddressSpaceViewTy = LocalAddressSpaceView> |
535 | struct AP32WithCallback { |
536 | static const uptr kSpaceBeg = 0; |
537 | static const u64 kSpaceSize = kAddressSpaceSize; |
538 | static const uptr kMetadataSize = 16; |
539 | typedef CompactSizeClassMap SizeClassMap; |
540 | static const uptr kRegionSizeLog = ::kRegionSizeLog; |
541 | using AddressSpaceView = AddressSpaceViewTy; |
542 | typedef TestMapUnmapCallback MapUnmapCallback; |
543 | static const uptr kFlags = 0; |
544 | }; |
545 | |
546 | TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) { |
547 | TestMapUnmapCallback::Reset(); |
548 | typedef SizeClassAllocator32<AP32WithCallback<>> Allocator32WithCallBack; |
549 | Allocator32WithCallBack *a = new Allocator32WithCallBack; |
550 | a->Init(release_to_os_interval_ms: kReleaseToOSIntervalNever); |
551 | EXPECT_EQ(TestMapUnmapCallback::map_count, 0); |
552 | EXPECT_EQ(TestMapUnmapCallback::map_secondary_count, 0); |
553 | Allocator32WithCallBack::AllocatorCache cache; |
554 | memset(&cache, 0, sizeof(cache)); |
555 | cache.Init(s: 0); |
556 | AllocatorStats stats; |
557 | stats.Init(); |
558 | a->AllocateBatch(stat: &stats, c: &cache, class_id: 32); |
559 | EXPECT_EQ(TestMapUnmapCallback::map_count, 1); |
560 | EXPECT_EQ(TestMapUnmapCallback::map_secondary_count, 0); |
561 | a->TestOnlyUnmap(); |
562 | EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); |
563 | delete a; |
564 | } |
565 | |
566 | TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) { |
567 | TestMapUnmapCallback::Reset(); |
568 | LargeMmapAllocator<TestMapUnmapCallback> a; |
569 | a.Init(); |
570 | AllocatorStats stats; |
571 | stats.Init(); |
572 | void *x = a.Allocate(stat: &stats, size: 1 << 20, alignment: 1); |
573 | EXPECT_EQ(TestMapUnmapCallback::map_count, 0); |
574 | EXPECT_EQ(TestMapUnmapCallback::map_secondary_count, 1); |
575 | a.Deallocate(stat: &stats, p: x); |
576 | EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); |
577 | } |
578 | |
579 | // Don't test OOM conditions on Win64 because it causes other tests on the same |
580 | // machine to OOM. |
581 | #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 |
582 | TEST(SanitizerCommon, SizeClassAllocator64Overflow) { |
583 | Allocator64 a; |
584 | a.Init(release_to_os_interval_ms: kReleaseToOSIntervalNever); |
585 | Allocator64::AllocatorCache cache; |
586 | memset(&cache, 0, sizeof(cache)); |
587 | cache.Init(s: 0); |
588 | AllocatorStats stats; |
589 | stats.Init(); |
590 | |
591 | const size_t kNumChunks = 128; |
592 | uint32_t chunks[kNumChunks]; |
593 | bool allocation_failed = false; |
594 | for (int i = 0; i < 1000000; i++) { |
595 | uptr class_id = a.kNumClasses - 1; |
596 | if (!a.GetFromAllocator(stat: &stats, class_id, chunks, n_chunks: kNumChunks)) { |
597 | allocation_failed = true; |
598 | break; |
599 | } |
600 | } |
601 | EXPECT_EQ(allocation_failed, true); |
602 | |
603 | a.TestOnlyUnmap(); |
604 | } |
605 | #endif |
606 | |
607 | TEST(SanitizerCommon, LargeMmapAllocator) { |
608 | LargeMmapAllocator<NoOpMapUnmapCallback> a; |
609 | a.Init(); |
610 | AllocatorStats stats; |
611 | stats.Init(); |
612 | |
613 | static const int kNumAllocs = 1000; |
614 | char *allocated[kNumAllocs]; |
615 | static const uptr size = 4000; |
616 | // Allocate some. |
617 | for (int i = 0; i < kNumAllocs; i++) { |
618 | allocated[i] = (char *)a.Allocate(stat: &stats, size, alignment: 1); |
619 | CHECK(a.PointerIsMine(allocated[i])); |
620 | } |
621 | // Deallocate all. |
622 | CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs); |
623 | for (int i = 0; i < kNumAllocs; i++) { |
624 | char *p = allocated[i]; |
625 | CHECK(a.PointerIsMine(p)); |
626 | a.Deallocate(stat: &stats, p); |
627 | } |
628 | // Check that non left. |
629 | CHECK_EQ(a.TotalMemoryUsed(), 0); |
630 | |
631 | // Allocate some more, also add metadata. |
632 | for (int i = 0; i < kNumAllocs; i++) { |
633 | char *x = (char *)a.Allocate(stat: &stats, size, alignment: 1); |
634 | CHECK_GE(a.GetActuallyAllocatedSize(x), size); |
635 | uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p: x)); |
636 | *meta = i; |
637 | allocated[i] = x; |
638 | } |
639 | for (int i = 0; i < kNumAllocs * kNumAllocs; i++) { |
640 | char *p = allocated[i % kNumAllocs]; |
641 | CHECK(a.PointerIsMine(p)); |
642 | CHECK(a.PointerIsMine(p + 2000)); |
643 | } |
644 | CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs); |
645 | // Deallocate all in reverse order. |
646 | for (int i = 0; i < kNumAllocs; i++) { |
647 | int idx = kNumAllocs - i - 1; |
648 | char *p = allocated[idx]; |
649 | uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p)); |
650 | CHECK_EQ(*meta, idx); |
651 | CHECK(a.PointerIsMine(p)); |
652 | a.Deallocate(stat: &stats, p); |
653 | } |
654 | CHECK_EQ(a.TotalMemoryUsed(), 0); |
655 | |
656 | // Test alignments. Test with 512MB alignment on x64 non-Windows machines. |
657 | // Windows doesn't overcommit, and many machines do not have 51.2GB of swap. |
658 | uptr max_alignment = |
659 | (SANITIZER_WORDSIZE == 64 && !SANITIZER_WINDOWS) ? (1 << 28) : (1 << 24); |
660 | for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) { |
661 | const uptr kNumAlignedAllocs = 100; |
662 | for (uptr i = 0; i < kNumAlignedAllocs; i++) { |
663 | uptr size = ((i % 10) + 1) * 4096; |
664 | char *p = allocated[i] = (char *)a.Allocate(stat: &stats, size, alignment); |
665 | CHECK_EQ(p, a.GetBlockBegin(p)); |
666 | CHECK_EQ(p, a.GetBlockBegin(p + size - 1)); |
667 | CHECK_EQ(p, a.GetBlockBegin(p + size / 2)); |
668 | CHECK_EQ(0, (uptr)allocated[i] % alignment); |
669 | p[0] = p[size - 1] = 0; |
670 | } |
671 | for (uptr i = 0; i < kNumAlignedAllocs; i++) { |
672 | a.Deallocate(stat: &stats, p: allocated[i]); |
673 | } |
674 | } |
675 | |
676 | // Regression test for boundary condition in GetBlockBegin(). |
677 | uptr page_size = GetPageSizeCached(); |
678 | char *p = (char *)a.Allocate(stat: &stats, size: page_size, alignment: 1); |
679 | CHECK_EQ(p, a.GetBlockBegin(p)); |
680 | CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1)); |
681 | CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size)); |
682 | a.Deallocate(stat: &stats, p); |
683 | } |
684 | |
685 | template <class PrimaryAllocator> |
686 | void TestCombinedAllocator(uptr premapped_heap = 0) { |
687 | typedef CombinedAllocator<PrimaryAllocator> Allocator; |
688 | Allocator *a = new Allocator; |
689 | a->Init(kReleaseToOSIntervalNever, premapped_heap); |
690 | std::mt19937 r; |
691 | |
692 | typename Allocator::AllocatorCache cache; |
693 | memset(&cache, 0, sizeof(cache)); |
694 | a->InitCache(&cache); |
695 | |
696 | EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0); |
697 | EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0); |
698 | EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0); |
699 | EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0); |
700 | EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0); |
701 | EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0); |
702 | |
703 | const uptr kNumAllocs = 100000; |
704 | const uptr kNumIter = 10; |
705 | for (uptr iter = 0; iter < kNumIter; iter++) { |
706 | std::vector<void*> allocated; |
707 | for (uptr i = 0; i < kNumAllocs; i++) { |
708 | uptr size = (i % (1 << 14)) + 1; |
709 | if ((i % 1024) == 0) |
710 | size = 1 << (10 + (i % 14)); |
711 | void *x = a->Allocate(&cache, size, 1); |
712 | uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x)); |
713 | CHECK_EQ(*meta, 0); |
714 | *meta = size; |
715 | allocated.push_back(x); |
716 | } |
717 | |
718 | std::shuffle(allocated.begin(), allocated.end(), r); |
719 | |
720 | // Test ForEachChunk(...) |
721 | { |
722 | std::set<void *> reported_chunks; |
723 | auto cb = [](uptr chunk, void *arg) { |
724 | auto reported_chunks_ptr = reinterpret_cast<std::set<void *> *>(arg); |
725 | auto pair = |
726 | reported_chunks_ptr->insert(reinterpret_cast<void *>(chunk)); |
727 | // Check chunk is never reported more than once. |
728 | ASSERT_TRUE(pair.second); |
729 | }; |
730 | a->ForEachChunk(cb, reinterpret_cast<void *>(&reported_chunks)); |
731 | for (const auto &allocated_ptr : allocated) { |
732 | ASSERT_NE(reported_chunks.find(allocated_ptr), reported_chunks.end()); |
733 | } |
734 | } |
735 | |
736 | for (uptr i = 0; i < kNumAllocs; i++) { |
737 | void *x = allocated[i]; |
738 | uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x)); |
739 | CHECK_NE(*meta, 0); |
740 | CHECK(a->PointerIsMine(x)); |
741 | *meta = 0; |
742 | a->Deallocate(&cache, x); |
743 | } |
744 | allocated.clear(); |
745 | a->SwallowCache(&cache); |
746 | } |
747 | a->DestroyCache(&cache); |
748 | a->TestOnlyUnmap(); |
749 | } |
750 | |
751 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
752 | TEST(SanitizerCommon, CombinedAllocator64) { |
753 | TestCombinedAllocator<Allocator64>(); |
754 | } |
755 | |
756 | TEST(SanitizerCommon, CombinedAllocator64Dynamic) { |
757 | TestCombinedAllocator<Allocator64Dynamic>(); |
758 | } |
759 | |
760 | #if !ALLOCATOR64_SMALL_SIZE |
761 | #if !SANITIZER_WINDOWS |
762 | // Windows fails to map 1TB, so disable this test. |
763 | TEST(SanitizerCommon, CombinedAllocator64DynamicPremapped) { |
764 | ScopedPremappedHeap h; |
765 | TestCombinedAllocator<Allocator64Dynamic>(premapped_heap: h.Addr()); |
766 | } |
767 | #endif |
768 | |
769 | TEST(SanitizerCommon, CombinedAllocator64Compact) { |
770 | TestCombinedAllocator<Allocator64Compact>(); |
771 | } |
772 | #endif |
773 | |
774 | TEST(SanitizerCommon, CombinedAllocator64VeryCompact) { |
775 | TestCombinedAllocator<Allocator64VeryCompact>(); |
776 | } |
777 | #endif |
778 | |
779 | TEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(CombinedAllocator32Compact)) { |
780 | TestCombinedAllocator<Allocator32Compact>(); |
781 | } |
782 | |
783 | template <class Allocator> |
784 | void TestSizeClassAllocatorLocalCache(uptr premapped_heap = 0) { |
785 | using AllocatorCache = typename Allocator::AllocatorCache; |
786 | AllocatorCache cache; |
787 | Allocator *a = new Allocator(); |
788 | |
789 | a->Init(kReleaseToOSIntervalNever, premapped_heap); |
790 | memset(&cache, 0, sizeof(cache)); |
791 | cache.Init(0); |
792 | |
793 | const uptr kNumAllocs = 10000; |
794 | const int kNumIter = 100; |
795 | uptr saved_total = 0; |
796 | for (int class_id = 1; class_id <= 5; class_id++) { |
797 | for (int it = 0; it < kNumIter; it++) { |
798 | void *allocated[kNumAllocs]; |
799 | for (uptr i = 0; i < kNumAllocs; i++) { |
800 | allocated[i] = cache.Allocate(a, class_id); |
801 | } |
802 | for (uptr i = 0; i < kNumAllocs; i++) { |
803 | cache.Deallocate(a, class_id, allocated[i]); |
804 | } |
805 | cache.Drain(a); |
806 | uptr total_allocated = a->TotalMemoryUsed(); |
807 | if (it) |
808 | CHECK_EQ(saved_total, total_allocated); |
809 | saved_total = total_allocated; |
810 | } |
811 | } |
812 | |
813 | a->TestOnlyUnmap(); |
814 | delete a; |
815 | } |
816 | |
817 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
818 | // These tests can fail on Windows if memory is somewhat full and lit happens |
819 | // to run them all at the same time. FIXME: Make them not flaky and reenable. |
820 | #if !SANITIZER_WINDOWS |
821 | TEST(SanitizerCommon, SizeClassAllocator64LocalCache) { |
822 | TestSizeClassAllocatorLocalCache<Allocator64>(); |
823 | } |
824 | |
825 | TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) { |
826 | TestSizeClassAllocatorLocalCache<Allocator64Dynamic>(); |
827 | } |
828 | |
829 | #if !ALLOCATOR64_SMALL_SIZE |
830 | TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedLocalCache) { |
831 | ScopedPremappedHeap h; |
832 | TestSizeClassAllocatorLocalCache<Allocator64Dynamic>(premapped_heap: h.Addr()); |
833 | } |
834 | |
835 | TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) { |
836 | TestSizeClassAllocatorLocalCache<Allocator64Compact>(); |
837 | } |
838 | #endif |
839 | TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) { |
840 | TestSizeClassAllocatorLocalCache<Allocator64VeryCompact>(); |
841 | } |
842 | #endif |
843 | #endif |
844 | |
845 | TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) { |
846 | TestSizeClassAllocatorLocalCache<Allocator32Compact>(); |
847 | } |
848 | |
849 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
850 | typedef Allocator64::AllocatorCache AllocatorCache; |
851 | static AllocatorCache static_allocator_cache; |
852 | |
853 | void *AllocatorLeakTestWorker(void *arg) { |
854 | typedef AllocatorCache::Allocator Allocator; |
855 | Allocator *a = (Allocator*)(arg); |
856 | static_allocator_cache.Allocate(allocator: a, class_id: 10); |
857 | static_allocator_cache.Drain(allocator: a); |
858 | return 0; |
859 | } |
860 | |
861 | TEST(SanitizerCommon, AllocatorLeakTest) { |
862 | typedef AllocatorCache::Allocator Allocator; |
863 | Allocator a; |
864 | a.Init(release_to_os_interval_ms: kReleaseToOSIntervalNever); |
865 | uptr total_used_memory = 0; |
866 | for (int i = 0; i < 100; i++) { |
867 | pthread_t t; |
868 | PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a); |
869 | PTHREAD_JOIN(t, 0); |
870 | if (i == 0) |
871 | total_used_memory = a.TotalMemoryUsed(); |
872 | EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory); |
873 | } |
874 | |
875 | a.TestOnlyUnmap(); |
876 | } |
877 | |
878 | // Struct which is allocated to pass info to new threads. The new thread frees |
879 | // it. |
880 | struct NewThreadParams { |
881 | AllocatorCache *thread_cache; |
882 | AllocatorCache::Allocator *allocator; |
883 | uptr class_id; |
884 | }; |
885 | |
886 | // Called in a new thread. Just frees its argument. |
887 | static void *DeallocNewThreadWorker(void *arg) { |
888 | NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg); |
889 | params->thread_cache->Deallocate(allocator: params->allocator, class_id: params->class_id, p: params); |
890 | return NULL; |
891 | } |
892 | |
893 | // The allocator cache is supposed to be POD and zero initialized. We should be |
894 | // able to call Deallocate on a zeroed cache, and it will self-initialize. |
895 | TEST(Allocator, AllocatorCacheDeallocNewThread) { |
896 | AllocatorCache::Allocator allocator; |
897 | allocator.Init(release_to_os_interval_ms: kReleaseToOSIntervalNever); |
898 | AllocatorCache main_cache; |
899 | AllocatorCache child_cache; |
900 | memset(&main_cache, 0, sizeof(main_cache)); |
901 | memset(&child_cache, 0, sizeof(child_cache)); |
902 | |
903 | uptr class_id = DefaultSizeClassMap::ClassID(size: sizeof(NewThreadParams)); |
904 | NewThreadParams *params = reinterpret_cast<NewThreadParams*>( |
905 | main_cache.Allocate(allocator: &allocator, class_id)); |
906 | params->thread_cache = &child_cache; |
907 | params->allocator = &allocator; |
908 | params->class_id = class_id; |
909 | pthread_t t; |
910 | PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params); |
911 | PTHREAD_JOIN(t, 0); |
912 | |
913 | allocator.TestOnlyUnmap(); |
914 | } |
915 | #endif |
916 | |
917 | TEST(Allocator, Basic) { |
918 | char *p = (char*)InternalAlloc(size: 10); |
919 | EXPECT_NE(p, (char*)0); |
920 | char *p2 = (char*)InternalAlloc(size: 20); |
921 | EXPECT_NE(p2, (char*)0); |
922 | EXPECT_NE(p2, p); |
923 | InternalFree(p); |
924 | InternalFree(p: p2); |
925 | } |
926 | |
927 | TEST(Allocator, Stress) { |
928 | const int kCount = 1000; |
929 | char *ptrs[kCount]; |
930 | unsigned rnd = 42; |
931 | for (int i = 0; i < kCount; i++) { |
932 | uptr sz = my_rand_r(state: &rnd) % 1000; |
933 | char *p = (char*)InternalAlloc(size: sz); |
934 | EXPECT_NE(p, (char*)0); |
935 | ptrs[i] = p; |
936 | } |
937 | for (int i = 0; i < kCount; i++) { |
938 | InternalFree(p: ptrs[i]); |
939 | } |
940 | } |
941 | |
942 | TEST(Allocator, LargeAlloc) { |
943 | void *p = InternalAlloc(size: 10 << 20); |
944 | InternalFree(p); |
945 | } |
946 | |
947 | TEST(Allocator, ScopedBuffer) { |
948 | const int kSize = 512; |
949 | { |
950 | InternalMmapVector<int> int_buf(kSize); |
951 | EXPECT_EQ((uptr)kSize, int_buf.size()); |
952 | } |
953 | InternalMmapVector<char> char_buf(kSize); |
954 | EXPECT_EQ((uptr)kSize, char_buf.size()); |
955 | internal_memset(s: char_buf.data(), c: 'c', n: kSize); |
956 | for (int i = 0; i < kSize; i++) { |
957 | EXPECT_EQ('c', char_buf[i]); |
958 | } |
959 | } |
960 | |
961 | void IterationTestCallback(uptr chunk, void *arg) { |
962 | reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk); |
963 | } |
964 | |
965 | template <class Allocator> |
966 | void TestSizeClassAllocatorIteration(uptr premapped_heap = 0) { |
967 | Allocator *a = new Allocator; |
968 | a->Init(kReleaseToOSIntervalNever, premapped_heap); |
969 | typename Allocator::AllocatorCache cache; |
970 | memset(&cache, 0, sizeof(cache)); |
971 | cache.Init(0); |
972 | |
973 | static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000, |
974 | 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000}; |
975 | |
976 | std::vector<void *> allocated; |
977 | |
978 | // Allocate a bunch of chunks. |
979 | for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) { |
980 | uptr size = sizes[s]; |
981 | if (!a->CanAllocate(size, 1)) continue; |
982 | // printf("s = %ld\n", size); |
983 | uptr n_iter = std::max((uptr)6, 80000 / size); |
984 | // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter); |
985 | for (uptr j = 0; j < n_iter; j++) { |
986 | uptr class_id0 = Allocator::SizeClassMapT::ClassID(size); |
987 | void *x = cache.Allocate(a, class_id0); |
988 | allocated.push_back(x); |
989 | } |
990 | } |
991 | |
992 | std::set<uptr> reported_chunks; |
993 | a->ForceLock(); |
994 | a->ForEachChunk(IterationTestCallback, &reported_chunks); |
995 | a->ForceUnlock(); |
996 | |
997 | for (uptr i = 0; i < allocated.size(); i++) { |
998 | // Don't use EXPECT_NE. Reporting the first mismatch is enough. |
999 | ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])), |
1000 | reported_chunks.end()); |
1001 | } |
1002 | |
1003 | a->TestOnlyUnmap(); |
1004 | delete a; |
1005 | } |
1006 | |
1007 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
1008 | // These tests can fail on Windows if memory is somewhat full and lit happens |
1009 | // to run them all at the same time. FIXME: Make them not flaky and reenable. |
1010 | #if !SANITIZER_WINDOWS |
1011 | TEST(SanitizerCommon, SizeClassAllocator64Iteration) { |
1012 | TestSizeClassAllocatorIteration<Allocator64>(); |
1013 | } |
1014 | TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) { |
1015 | TestSizeClassAllocatorIteration<Allocator64Dynamic>(); |
1016 | } |
1017 | #if !ALLOCATOR64_SMALL_SIZE |
1018 | TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedIteration) { |
1019 | ScopedPremappedHeap h; |
1020 | TestSizeClassAllocatorIteration<Allocator64Dynamic>(premapped_heap: h.Addr()); |
1021 | } |
1022 | #endif |
1023 | #endif |
1024 | #endif |
1025 | |
1026 | TEST(SanitizerCommon, SKIP_ON_SOLARIS_SPARCV9(SizeClassAllocator32Iteration)) { |
1027 | TestSizeClassAllocatorIteration<Allocator32Compact>(); |
1028 | } |
1029 | |
1030 | TEST(SanitizerCommon, LargeMmapAllocatorIteration) { |
1031 | LargeMmapAllocator<NoOpMapUnmapCallback> a; |
1032 | a.Init(); |
1033 | AllocatorStats stats; |
1034 | stats.Init(); |
1035 | |
1036 | static const uptr kNumAllocs = 1000; |
1037 | char *allocated[kNumAllocs]; |
1038 | static const uptr size = 40; |
1039 | // Allocate some. |
1040 | for (uptr i = 0; i < kNumAllocs; i++) |
1041 | allocated[i] = (char *)a.Allocate(stat: &stats, size, alignment: 1); |
1042 | |
1043 | std::set<uptr> reported_chunks; |
1044 | a.ForceLock(); |
1045 | a.ForEachChunk(IterationTestCallback, &reported_chunks); |
1046 | a.ForceUnlock(); |
1047 | |
1048 | for (uptr i = 0; i < kNumAllocs; i++) { |
1049 | // Don't use EXPECT_NE. Reporting the first mismatch is enough. |
1050 | ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])), |
1051 | reported_chunks.end()); |
1052 | } |
1053 | for (uptr i = 0; i < kNumAllocs; i++) |
1054 | a.Deallocate(stat: &stats, p: allocated[i]); |
1055 | } |
1056 | |
1057 | TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) { |
1058 | LargeMmapAllocator<NoOpMapUnmapCallback> a; |
1059 | a.Init(); |
1060 | AllocatorStats stats; |
1061 | stats.Init(); |
1062 | |
1063 | static const uptr kNumAllocs = 1024; |
1064 | static const uptr kNumExpectedFalseLookups = 10000000; |
1065 | char *allocated[kNumAllocs]; |
1066 | static const uptr size = 4096; |
1067 | // Allocate some. |
1068 | for (uptr i = 0; i < kNumAllocs; i++) { |
1069 | allocated[i] = (char *)a.Allocate(stat: &stats, size, alignment: 1); |
1070 | } |
1071 | |
1072 | a.ForceLock(); |
1073 | for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) { |
1074 | // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i); |
1075 | char *p1 = allocated[i % kNumAllocs]; |
1076 | EXPECT_EQ(p1, a.GetBlockBeginFastLocked(ptr: p1)); |
1077 | EXPECT_EQ(p1, a.GetBlockBeginFastLocked(ptr: p1 + size / 2)); |
1078 | EXPECT_EQ(p1, a.GetBlockBeginFastLocked(ptr: p1 + size - 1)); |
1079 | EXPECT_EQ(p1, a.GetBlockBeginFastLocked(ptr: p1 - 100)); |
1080 | } |
1081 | |
1082 | for (uptr i = 0; i < kNumExpectedFalseLookups; i++) { |
1083 | void *p = reinterpret_cast<void *>(i % 1024); |
1084 | EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(ptr: p)); |
1085 | p = reinterpret_cast<void *>(~0L - (i % 1024)); |
1086 | EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(ptr: p)); |
1087 | } |
1088 | a.ForceUnlock(); |
1089 | |
1090 | for (uptr i = 0; i < kNumAllocs; i++) |
1091 | a.Deallocate(stat: &stats, p: allocated[i]); |
1092 | } |
1093 | |
1094 | |
1095 | // Don't test OOM conditions on Win64 because it causes other tests on the same |
1096 | // machine to OOM. |
1097 | #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !ALLOCATOR64_SMALL_SIZE |
1098 | typedef __sanitizer::SizeClassMap<2, 22, 22, 34, 128, 16> SpecialSizeClassMap; |
1099 | template <typename AddressSpaceViewTy = LocalAddressSpaceView> |
1100 | struct AP64_SpecialSizeClassMap { |
1101 | static const uptr kSpaceBeg = kAllocatorSpace; |
1102 | static const uptr kSpaceSize = kAllocatorSize; |
1103 | static const uptr kMetadataSize = 0; |
1104 | typedef SpecialSizeClassMap SizeClassMap; |
1105 | typedef NoOpMapUnmapCallback MapUnmapCallback; |
1106 | static const uptr kFlags = 0; |
1107 | using AddressSpaceView = AddressSpaceViewTy; |
1108 | }; |
1109 | |
1110 | // Regression test for out-of-memory condition in PopulateFreeList(). |
1111 | TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) { |
1112 | // In a world where regions are small and chunks are huge... |
1113 | typedef SizeClassAllocator64<AP64_SpecialSizeClassMap<>> SpecialAllocator64; |
1114 | const uptr kRegionSize = |
1115 | kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded; |
1116 | SpecialAllocator64 *a = new SpecialAllocator64; |
1117 | a->Init(release_to_os_interval_ms: kReleaseToOSIntervalNever); |
1118 | SpecialAllocator64::AllocatorCache cache; |
1119 | memset(&cache, 0, sizeof(cache)); |
1120 | cache.Init(s: 0); |
1121 | |
1122 | // ...one man is on a mission to overflow a region with a series of |
1123 | // successive allocations. |
1124 | |
1125 | const uptr kClassID = 24; |
1126 | const uptr kAllocationSize = SpecialSizeClassMap::Size(class_id: kClassID); |
1127 | ASSERT_LT(2 * kAllocationSize, kRegionSize); |
1128 | ASSERT_GT(3 * kAllocationSize, kRegionSize); |
1129 | EXPECT_NE(cache.Allocate(allocator: a, class_id: kClassID), nullptr); |
1130 | EXPECT_NE(cache.Allocate(allocator: a, class_id: kClassID), nullptr); |
1131 | EXPECT_EQ(cache.Allocate(allocator: a, class_id: kClassID), nullptr); |
1132 | |
1133 | const uptr Class2 = 21; |
1134 | const uptr Size2 = SpecialSizeClassMap::Size(class_id: Class2); |
1135 | ASSERT_EQ(Size2 * 8, kRegionSize); |
1136 | char *p[7]; |
1137 | for (int i = 0; i < 7; i++) { |
1138 | p[i] = (char*)cache.Allocate(allocator: a, class_id: Class2); |
1139 | EXPECT_NE(p[i], nullptr); |
1140 | fprintf(stderr, format: "p[%d] %p s = %lx\n" , i, (void*)p[i], Size2); |
1141 | p[i][Size2 - 1] = 42; |
1142 | if (i) ASSERT_LT(p[i - 1], p[i]); |
1143 | } |
1144 | EXPECT_EQ(cache.Allocate(allocator: a, class_id: Class2), nullptr); |
1145 | cache.Deallocate(allocator: a, class_id: Class2, p: p[0]); |
1146 | cache.Drain(allocator: a); |
1147 | ASSERT_EQ(p[6][Size2 - 1], 42); |
1148 | a->TestOnlyUnmap(); |
1149 | delete a; |
1150 | } |
1151 | |
1152 | #endif |
1153 | |
1154 | #if SANITIZER_CAN_USE_ALLOCATOR64 |
1155 | |
1156 | class NoMemoryMapper { |
1157 | public: |
1158 | uptr last_request_buffer_size = 0; |
1159 | |
1160 | u64 *MapPackedCounterArrayBuffer(uptr buffer_size) { |
1161 | last_request_buffer_size = buffer_size * sizeof(u64); |
1162 | return nullptr; |
1163 | } |
1164 | }; |
1165 | |
1166 | class RedZoneMemoryMapper { |
1167 | public: |
1168 | RedZoneMemoryMapper() { |
1169 | const auto page_size = GetPageSize(); |
1170 | buffer = MmapOrDie(size: 3ULL * page_size, mem_type: "" ); |
1171 | MprotectNoAccess(addr: reinterpret_cast<uptr>(buffer), size: page_size); |
1172 | MprotectNoAccess(addr: reinterpret_cast<uptr>(buffer) + page_size * 2, size: page_size); |
1173 | } |
1174 | ~RedZoneMemoryMapper() { UnmapOrDie(addr: buffer, size: 3 * GetPageSize()); } |
1175 | |
1176 | u64 *MapPackedCounterArrayBuffer(uptr buffer_size) { |
1177 | buffer_size *= sizeof(u64); |
1178 | const auto page_size = GetPageSize(); |
1179 | CHECK_EQ(buffer_size, page_size); |
1180 | u64 *p = |
1181 | reinterpret_cast<u64 *>(reinterpret_cast<uptr>(buffer) + page_size); |
1182 | memset(p, 0, page_size); |
1183 | return p; |
1184 | } |
1185 | |
1186 | private: |
1187 | void *buffer; |
1188 | }; |
1189 | |
1190 | TEST(SanitizerCommon, SizeClassAllocator64PackedCounterArray) { |
1191 | NoMemoryMapper no_memory_mapper; |
1192 | for (int i = 0; i < 64; i++) { |
1193 | // Various valid counter's max values packed into one word. |
1194 | Allocator64::PackedCounterArray counters_2n(1, 1ULL << i, |
1195 | &no_memory_mapper); |
1196 | EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size); |
1197 | |
1198 | // Check the "all bit set" values too. |
1199 | Allocator64::PackedCounterArray counters_2n1_1(1, ~0ULL >> i, |
1200 | &no_memory_mapper); |
1201 | EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size); |
1202 | |
1203 | // Verify the packing ratio, the counter is expected to be packed into the |
1204 | // closest power of 2 bits. |
1205 | Allocator64::PackedCounterArray counters(64, 1ULL << i, &no_memory_mapper); |
1206 | EXPECT_EQ(8ULL * RoundUpToPowerOfTwo(size: i + 1), |
1207 | no_memory_mapper.last_request_buffer_size); |
1208 | } |
1209 | |
1210 | RedZoneMemoryMapper memory_mapper; |
1211 | // Go through 1, 2, 4, 8, .. 64 bits per counter. |
1212 | for (int i = 0; i < 7; i++) { |
1213 | // Make sure counters request one memory page for the buffer. |
1214 | const u64 kNumCounters = (GetPageSize() / 8) * (64 >> i); |
1215 | Allocator64::PackedCounterArray counters( |
1216 | kNumCounters, 1ULL << ((1 << i) - 1), &memory_mapper); |
1217 | counters.Inc(i: 0); |
1218 | for (u64 c = 1; c < kNumCounters - 1; c++) { |
1219 | ASSERT_EQ(0ULL, counters.Get(i: c)); |
1220 | counters.Inc(i: c); |
1221 | ASSERT_EQ(1ULL, counters.Get(i: c - 1)); |
1222 | } |
1223 | ASSERT_EQ(0ULL, counters.Get(i: kNumCounters - 1)); |
1224 | counters.Inc(i: kNumCounters - 1); |
1225 | |
1226 | if (i > 0) { |
1227 | counters.IncRange(from: 0, to: kNumCounters - 1); |
1228 | for (u64 c = 0; c < kNumCounters; c++) |
1229 | ASSERT_EQ(2ULL, counters.Get(i: c)); |
1230 | } |
1231 | } |
1232 | } |
1233 | |
1234 | class RangeRecorder { |
1235 | public: |
1236 | std::string reported_pages; |
1237 | |
1238 | RangeRecorder() |
1239 | : page_size_scaled_log( |
1240 | Log2(x: GetPageSizeCached() >> Allocator64::kCompactPtrScale)), |
1241 | last_page_reported(0) {} |
1242 | |
1243 | void (u32 class_id, u32 from, u32 to) { |
1244 | from >>= page_size_scaled_log; |
1245 | to >>= page_size_scaled_log; |
1246 | ASSERT_LT(from, to); |
1247 | if (!reported_pages.empty()) |
1248 | ASSERT_LT(last_page_reported, from); |
1249 | reported_pages.append(from - last_page_reported, '.'); |
1250 | reported_pages.append(to - from, 'x'); |
1251 | last_page_reported = to; |
1252 | } |
1253 | |
1254 | private: |
1255 | const uptr page_size_scaled_log; |
1256 | u32 last_page_reported; |
1257 | }; |
1258 | |
1259 | TEST(SanitizerCommon, SizeClassAllocator64FreePagesRangeTracker) { |
1260 | typedef Allocator64::FreePagesRangeTracker<RangeRecorder> RangeTracker; |
1261 | |
1262 | // 'x' denotes a page to be released, '.' denotes a page to be kept around. |
1263 | const char* test_cases[] = { |
1264 | "" , |
1265 | "." , |
1266 | "x" , |
1267 | "........" , |
1268 | "xxxxxxxxxxx" , |
1269 | "..............xxxxx" , |
1270 | "xxxxxxxxxxxxxxxxxx....." , |
1271 | "......xxxxxxxx........" , |
1272 | "xxx..........xxxxxxxxxxxxxxx" , |
1273 | "......xxxx....xxxx........" , |
1274 | "xxx..........xxxxxxxx....xxxxxxx" , |
1275 | "x.x.x.x.x.x.x.x.x.x.x.x." , |
1276 | ".x.x.x.x.x.x.x.x.x.x.x.x" , |
1277 | ".x.x.x.x.x.x.x.x.x.x.x.x." , |
1278 | "x.x.x.x.x.x.x.x.x.x.x.x.x" , |
1279 | }; |
1280 | |
1281 | for (auto test_case : test_cases) { |
1282 | RangeRecorder range_recorder; |
1283 | RangeTracker tracker(&range_recorder, 1); |
1284 | for (int i = 0; test_case[i] != 0; i++) |
1285 | tracker.NextPage(freed: test_case[i] == 'x'); |
1286 | tracker.Done(); |
1287 | // Strip trailing '.'-pages before comparing the results as they are not |
1288 | // going to be reported to range_recorder anyway. |
1289 | const char* last_x = strrchr(test_case, 'x'); |
1290 | std::string expected( |
1291 | test_case, |
1292 | last_x == nullptr ? 0 : (last_x - test_case + 1)); |
1293 | EXPECT_STREQ(expected.c_str(), range_recorder.reported_pages.c_str()); |
1294 | } |
1295 | } |
1296 | |
1297 | class ReleasedPagesTrackingMemoryMapper { |
1298 | public: |
1299 | std::set<u32> reported_pages; |
1300 | std::vector<u64> buffer; |
1301 | |
1302 | u64 *MapPackedCounterArrayBuffer(uptr buffer_size) { |
1303 | reported_pages.clear(); |
1304 | buffer.assign(buffer_size, 0); |
1305 | return buffer.data(); |
1306 | } |
1307 | void (u32 class_id, u32 from, u32 to) { |
1308 | uptr page_size_scaled = |
1309 | GetPageSizeCached() >> Allocator64::kCompactPtrScale; |
1310 | for (u32 i = from; i < to; i += page_size_scaled) |
1311 | reported_pages.insert(i); |
1312 | } |
1313 | }; |
1314 | |
1315 | template <class Allocator> |
1316 | void TestReleaseFreeMemoryToOS() { |
1317 | ReleasedPagesTrackingMemoryMapper memory_mapper; |
1318 | const uptr kAllocatedPagesCount = 1024; |
1319 | const uptr page_size = GetPageSizeCached(); |
1320 | const uptr page_size_scaled = page_size >> Allocator::kCompactPtrScale; |
1321 | std::mt19937 r; |
1322 | uint32_t rnd_state = 42; |
1323 | |
1324 | for (uptr class_id = 1; class_id <= Allocator::SizeClassMapT::kLargestClassID; |
1325 | class_id++) { |
1326 | const uptr chunk_size = Allocator::SizeClassMapT::Size(class_id); |
1327 | const uptr chunk_size_scaled = chunk_size >> Allocator::kCompactPtrScale; |
1328 | const uptr max_chunks = |
1329 | kAllocatedPagesCount * GetPageSizeCached() / chunk_size; |
1330 | |
1331 | // Generate the random free list. |
1332 | std::vector<u32> free_array; |
1333 | bool in_free_range = false; |
1334 | uptr current_range_end = 0; |
1335 | for (uptr i = 0; i < max_chunks; i++) { |
1336 | if (i == current_range_end) { |
1337 | in_free_range = (my_rand_r(state: &rnd_state) & 1U) == 1; |
1338 | current_range_end += my_rand_r(state: &rnd_state) % 100 + 1; |
1339 | } |
1340 | if (in_free_range) |
1341 | free_array.push_back(i * chunk_size_scaled); |
1342 | } |
1343 | if (free_array.empty()) |
1344 | continue; |
1345 | // Shuffle free_list to verify that ReleaseFreeMemoryToOS does not depend on |
1346 | // the list ordering. |
1347 | std::shuffle(free_array.begin(), free_array.end(), r); |
1348 | |
1349 | Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(), |
1350 | chunk_size, kAllocatedPagesCount, |
1351 | &memory_mapper, class_id); |
1352 | |
1353 | // Verify that there are no released pages touched by used chunks and all |
1354 | // ranges of free chunks big enough to contain the entire memory pages had |
1355 | // these pages released. |
1356 | uptr verified_released_pages = 0; |
1357 | std::set<u32> free_chunks(free_array.begin(), free_array.end()); |
1358 | |
1359 | u32 current_chunk = 0; |
1360 | in_free_range = false; |
1361 | u32 current_free_range_start = 0; |
1362 | for (uptr i = 0; i <= max_chunks; i++) { |
1363 | bool is_free_chunk = free_chunks.find(current_chunk) != free_chunks.end(); |
1364 | |
1365 | if (is_free_chunk) { |
1366 | if (!in_free_range) { |
1367 | in_free_range = true; |
1368 | current_free_range_start = current_chunk; |
1369 | } |
1370 | } else { |
1371 | // Verify that this used chunk does not touch any released page. |
1372 | for (uptr i_page = current_chunk / page_size_scaled; |
1373 | i_page <= (current_chunk + chunk_size_scaled - 1) / |
1374 | page_size_scaled; |
1375 | i_page++) { |
1376 | bool page_released = |
1377 | memory_mapper.reported_pages.find(i_page * page_size_scaled) != |
1378 | memory_mapper.reported_pages.end(); |
1379 | ASSERT_EQ(false, page_released); |
1380 | } |
1381 | |
1382 | if (in_free_range) { |
1383 | in_free_range = false; |
1384 | // Verify that all entire memory pages covered by this range of free |
1385 | // chunks were released. |
1386 | u32 page = RoundUpTo(size: current_free_range_start, boundary: page_size_scaled); |
1387 | while (page + page_size_scaled <= current_chunk) { |
1388 | bool page_released = |
1389 | memory_mapper.reported_pages.find(page) != |
1390 | memory_mapper.reported_pages.end(); |
1391 | ASSERT_EQ(true, page_released); |
1392 | verified_released_pages++; |
1393 | page += page_size_scaled; |
1394 | } |
1395 | } |
1396 | } |
1397 | |
1398 | current_chunk += chunk_size_scaled; |
1399 | } |
1400 | |
1401 | ASSERT_EQ(memory_mapper.reported_pages.size(), verified_released_pages); |
1402 | } |
1403 | } |
1404 | |
1405 | TEST(SanitizerCommon, SizeClassAllocator64ReleaseFreeMemoryToOS) { |
1406 | TestReleaseFreeMemoryToOS<Allocator64>(); |
1407 | } |
1408 | |
1409 | #if !ALLOCATOR64_SMALL_SIZE |
1410 | TEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) { |
1411 | TestReleaseFreeMemoryToOS<Allocator64Compact>(); |
1412 | } |
1413 | |
1414 | TEST(SanitizerCommon, SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS) { |
1415 | TestReleaseFreeMemoryToOS<Allocator64VeryCompact>(); |
1416 | } |
1417 | #endif // !ALLOCATOR64_SMALL_SIZE |
1418 | |
1419 | #endif // SANITIZER_CAN_USE_ALLOCATOR64 |
1420 | |
1421 | TEST(SanitizerCommon, LowLevelAllocatorShouldRoundUpSizeOnAlloc) { |
1422 | // When allocating a memory block slightly bigger than a memory page and |
1423 | // LowLevelAllocator calls MmapOrDie for the internal buffer, it should round |
1424 | // the size up to the page size, so that subsequent calls to the allocator |
1425 | // can use the remaining space in the last allocated page. |
1426 | static LowLevelAllocator allocator; |
1427 | char *ptr1 = (char *)allocator.Allocate(size: GetPageSizeCached() + 16); |
1428 | char *ptr2 = (char *)allocator.Allocate(size: 16); |
1429 | EXPECT_EQ(ptr2, ptr1 + GetPageSizeCached() + 16); |
1430 | } |
1431 | |
1432 | #endif // #if !SANITIZER_DEBUG |
1433 | |