1//===-- primary_test.cpp ----------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "tests/scudo_unit_test.h"
10
11#include "allocator_config.h"
12#include "allocator_config_wrapper.h"
13#include "condition_variable.h"
14#include "primary32.h"
15#include "primary64.h"
16#include "size_class_map.h"
17
18#include <algorithm>
19#include <chrono>
20#include <condition_variable>
21#include <mutex>
22#include <random>
23#include <stdlib.h>
24#include <thread>
25#include <vector>
26
27// Note that with small enough regions, the SizeClassAllocator64 also works on
28// 32-bit architectures. It's not something we want to encourage, but we still
29// should ensure the tests pass.
30
31template <typename SizeClassMapT> struct TestConfig1 {
32 static const bool MaySupportMemoryTagging = false;
33 template <typename> using TSDRegistryT = void;
34 template <typename> using PrimaryT = void;
35 template <typename> using SecondaryT = void;
36
37 struct Primary {
38 using SizeClassMap = SizeClassMapT;
39 static const scudo::uptr RegionSizeLog = 18U;
40 static const scudo::uptr GroupSizeLog = 18U;
41 static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
42 static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
43 typedef scudo::uptr CompactPtrT;
44 static const scudo::uptr CompactPtrScale = 0;
45 static const bool EnableRandomOffset = true;
46 static const scudo::uptr MapSizeIncrement = 1UL << 18;
47 };
48};
49
50template <typename SizeClassMapT> struct TestConfig2 {
51 static const bool MaySupportMemoryTagging = false;
52 template <typename> using TSDRegistryT = void;
53 template <typename> using PrimaryT = void;
54 template <typename> using SecondaryT = void;
55
56 struct Primary {
57 using SizeClassMap = SizeClassMapT;
58#if defined(__mips__)
59 // Unable to allocate greater size on QEMU-user.
60 static const scudo::uptr RegionSizeLog = 23U;
61#else
62 static const scudo::uptr RegionSizeLog = 24U;
63#endif
64 static const scudo::uptr GroupSizeLog = 20U;
65 static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
66 static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
67 typedef scudo::uptr CompactPtrT;
68 static const scudo::uptr CompactPtrScale = 0;
69 static const bool EnableRandomOffset = true;
70 static const scudo::uptr MapSizeIncrement = 1UL << 18;
71 };
72};
73
74template <typename SizeClassMapT> struct TestConfig3 {
75 static const bool MaySupportMemoryTagging = true;
76 template <typename> using TSDRegistryT = void;
77 template <typename> using PrimaryT = void;
78 template <typename> using SecondaryT = void;
79
80 struct Primary {
81 using SizeClassMap = SizeClassMapT;
82#if defined(__mips__)
83 // Unable to allocate greater size on QEMU-user.
84 static const scudo::uptr RegionSizeLog = 23U;
85#else
86 static const scudo::uptr RegionSizeLog = 24U;
87#endif
88 static const scudo::uptr GroupSizeLog = 20U;
89 static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
90 static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
91 typedef scudo::uptr CompactPtrT;
92 static const scudo::uptr CompactPtrScale = 0;
93 static const bool EnableContiguousRegions = false;
94 static const bool EnableRandomOffset = true;
95 static const scudo::uptr MapSizeIncrement = 1UL << 18;
96 };
97};
98
99template <typename SizeClassMapT> struct TestConfig4 {
100 static const bool MaySupportMemoryTagging = true;
101 template <typename> using TSDRegistryT = void;
102 template <typename> using PrimaryT = void;
103 template <typename> using SecondaryT = void;
104
105 struct Primary {
106 using SizeClassMap = SizeClassMapT;
107#if defined(__mips__)
108 // Unable to allocate greater size on QEMU-user.
109 static const scudo::uptr RegionSizeLog = 23U;
110#else
111 static const scudo::uptr RegionSizeLog = 24U;
112#endif
113 static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
114 static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
115 static const scudo::uptr CompactPtrScale = 3U;
116 static const scudo::uptr GroupSizeLog = 20U;
117 typedef scudo::u32 CompactPtrT;
118 static const bool EnableRandomOffset = true;
119 static const scudo::uptr MapSizeIncrement = 1UL << 18;
120 };
121};
122
123// This is the only test config that enables the condition variable.
124template <typename SizeClassMapT> struct TestConfig5 {
125 static const bool MaySupportMemoryTagging = true;
126 template <typename> using TSDRegistryT = void;
127 template <typename> using PrimaryT = void;
128 template <typename> using SecondaryT = void;
129
130 struct Primary {
131 using SizeClassMap = SizeClassMapT;
132#if defined(__mips__)
133 // Unable to allocate greater size on QEMU-user.
134 static const scudo::uptr RegionSizeLog = 23U;
135#else
136 static const scudo::uptr RegionSizeLog = 24U;
137#endif
138 static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
139 static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
140 static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
141 static const scudo::uptr GroupSizeLog = 18U;
142 typedef scudo::u32 CompactPtrT;
143 static const bool EnableRandomOffset = true;
144 static const scudo::uptr MapSizeIncrement = 1UL << 18;
145#if SCUDO_LINUX
146 using ConditionVariableT = scudo::ConditionVariableLinux;
147#else
148 using ConditionVariableT = scudo::ConditionVariableDummy;
149#endif
150 };
151};
152
153template <template <typename> class BaseConfig, typename SizeClassMapT>
154struct Config : public BaseConfig<SizeClassMapT> {};
155
156template <template <typename> class BaseConfig, typename SizeClassMapT>
157struct SizeClassAllocator
158 : public scudo::SizeClassAllocator64<
159 scudo::PrimaryConfig<Config<BaseConfig, SizeClassMapT>>> {};
160template <typename SizeClassMapT>
161struct SizeClassAllocator<TestConfig1, SizeClassMapT>
162 : public scudo::SizeClassAllocator32<
163 scudo::PrimaryConfig<Config<TestConfig1, SizeClassMapT>>> {};
164
165template <template <typename> class BaseConfig, typename SizeClassMapT>
166struct TestAllocator : public SizeClassAllocator<BaseConfig, SizeClassMapT> {
167 ~TestAllocator() {
168 this->verifyAllBlocksAreReleasedTestOnly();
169 this->unmapTestOnly();
170 }
171
172 void *operator new(size_t size) {
173 void *p = nullptr;
174 EXPECT_EQ(0, posix_memalign(memptr: &p, alignment: alignof(TestAllocator), size: size));
175 return p;
176 }
177
178 void operator delete(void *ptr) { free(ptr: ptr); }
179};
180
181template <template <typename> class BaseConfig>
182struct ScudoPrimaryTest : public Test {};
183
184#if SCUDO_FUCHSIA
185#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
186 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2) \
187 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3)
188#else
189#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
190 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig1) \
191 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2) \
192 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3) \
193 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig4) \
194 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig5)
195#endif
196
197#define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \
198 using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<TYPE>; \
199 TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<TYPE>::Run(); }
200
201#define SCUDO_TYPED_TEST(FIXTURE, NAME) \
202 template <template <typename> class TypeParam> \
203 struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
204 void Run(); \
205 }; \
206 SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
207 template <template <typename> class TypeParam> \
208 void FIXTURE##NAME<TypeParam>::Run()
209
210SCUDO_TYPED_TEST(ScudoPrimaryTest, BasicPrimary) {
211 using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
212 std::unique_ptr<Primary> Allocator(new Primary);
213 Allocator->init(/*ReleaseToOsInterval=*/-1);
214 typename Primary::CacheT Cache;
215 Cache.init(nullptr, Allocator.get());
216 const scudo::uptr NumberOfAllocations = 32U;
217 for (scudo::uptr I = 0; I <= 16U; I++) {
218 const scudo::uptr Size = 1UL << I;
219 if (!Primary::canAllocate(Size))
220 continue;
221 const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
222 void *Pointers[NumberOfAllocations];
223 for (scudo::uptr J = 0; J < NumberOfAllocations; J++) {
224 void *P = Cache.allocate(ClassId);
225 memset(s: P, c: 'B', n: Size);
226 Pointers[J] = P;
227 }
228 for (scudo::uptr J = 0; J < NumberOfAllocations; J++)
229 Cache.deallocate(ClassId, Pointers[J]);
230 }
231 Cache.destroy(nullptr);
232 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
233 scudo::ScopedString Str;
234 Allocator->getStats(&Str);
235 Str.output();
236}
237
238struct SmallRegionsConfig {
239 static const bool MaySupportMemoryTagging = false;
240 template <typename> using TSDRegistryT = void;
241 template <typename> using PrimaryT = void;
242 template <typename> using SecondaryT = void;
243
244 struct Primary {
245 using SizeClassMap = scudo::DefaultSizeClassMap;
246 static const scudo::uptr RegionSizeLog = 21U;
247 static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
248 static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
249 typedef scudo::uptr CompactPtrT;
250 static const scudo::uptr CompactPtrScale = 0;
251 static const bool EnableRandomOffset = true;
252 static const scudo::uptr MapSizeIncrement = 1UL << 18;
253 static const scudo::uptr GroupSizeLog = 20U;
254 };
255};
256
257// The 64-bit SizeClassAllocator can be easily OOM'd with small region sizes.
258// For the 32-bit one, it requires actually exhausting memory, so we skip it.
259TEST(ScudoPrimaryTest, Primary64OOM) {
260 using Primary =
261 scudo::SizeClassAllocator64<scudo::PrimaryConfig<SmallRegionsConfig>>;
262 Primary Allocator;
263 Allocator.init(/*ReleaseToOsInterval=*/-1);
264 typename Primary::CacheT Cache;
265 scudo::GlobalStats Stats;
266 Stats.init();
267 Cache.init(S: &Stats, A: &Allocator);
268 bool AllocationFailed = false;
269 std::vector<void *> Blocks;
270 const scudo::uptr ClassId = Primary::SizeClassMap::LargestClassId;
271 const scudo::uptr Size = Primary::getSizeByClassId(ClassId);
272 const scudo::u16 MaxCachedBlockCount = Primary::CacheT::getMaxCached(Size);
273
274 for (scudo::uptr I = 0; I < 10000U; I++) {
275 for (scudo::uptr J = 0; J < MaxCachedBlockCount; ++J) {
276 void *Ptr = Cache.allocate(ClassId);
277 if (Ptr == nullptr) {
278 AllocationFailed = true;
279 break;
280 }
281 memset(s: Ptr, c: 'B', n: Size);
282 Blocks.push_back(Ptr);
283 }
284 }
285
286 for (auto *Ptr : Blocks)
287 Cache.deallocate(ClassId, Ptr);
288
289 Cache.destroy(S: nullptr);
290 Allocator.releaseToOS(ReleaseType: scudo::ReleaseToOS::Force);
291 scudo::ScopedString Str;
292 Allocator.getStats(Str: &Str);
293 Str.output();
294 EXPECT_EQ(AllocationFailed, true);
295 Allocator.unmapTestOnly();
296}
297
298SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryIterate) {
299 using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
300 std::unique_ptr<Primary> Allocator(new Primary);
301 Allocator->init(/*ReleaseToOsInterval=*/-1);
302 typename Primary::CacheT Cache;
303 Cache.init(nullptr, Allocator.get());
304 std::vector<std::pair<scudo::uptr, void *>> V;
305 for (scudo::uptr I = 0; I < 64U; I++) {
306 const scudo::uptr Size =
307 static_cast<scudo::uptr>(std::rand()) % Primary::SizeClassMap::MaxSize;
308 const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
309 void *P = Cache.allocate(ClassId);
310 V.push_back(std::make_pair(ClassId, P));
311 }
312 scudo::uptr Found = 0;
313 auto Lambda = [&V, &Found](scudo::uptr Block) {
314 for (const auto &Pair : V) {
315 if (Pair.second == reinterpret_cast<void *>(Block))
316 Found++;
317 }
318 };
319 Allocator->disable();
320 Allocator->iterateOverBlocks(Lambda);
321 Allocator->enable();
322 EXPECT_EQ(Found, V.size());
323 while (!V.empty()) {
324 auto Pair = V.back();
325 Cache.deallocate(Pair.first, Pair.second);
326 V.pop_back();
327 }
328 Cache.destroy(nullptr);
329 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
330 scudo::ScopedString Str;
331 Allocator->getStats(&Str);
332 Str.output();
333}
334
335SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) {
336 using Primary = TestAllocator<TypeParam, scudo::Config::Primary::SizeClassMap>;
337 std::unique_ptr<Primary> Allocator(new Primary);
338 Allocator->init(/*ReleaseToOsInterval=*/-1);
339 std::mutex Mutex;
340 std::condition_variable Cv;
341 bool Ready = false;
342 std::thread Threads[32];
343 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++) {
344 Threads[I] = std::thread([&]() {
345 static thread_local typename Primary::CacheT Cache;
346 Cache.init(nullptr, Allocator.get());
347 std::vector<std::pair<scudo::uptr, void *>> V;
348 {
349 std::unique_lock<std::mutex> Lock(Mutex);
350 while (!Ready)
351 Cv.wait(Lock);
352 }
353 for (scudo::uptr I = 0; I < 256U; I++) {
354 const scudo::uptr Size = static_cast<scudo::uptr>(std::rand()) %
355 Primary::SizeClassMap::MaxSize / 4;
356 const scudo::uptr ClassId =
357 Primary::SizeClassMap::getClassIdBySize(Size);
358 void *P = Cache.allocate(ClassId);
359 if (P)
360 V.push_back(std::make_pair(ClassId, P));
361 }
362
363 // Try to interleave pushBlocks(), popBlocks() and releaseToOS().
364 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
365
366 while (!V.empty()) {
367 auto Pair = V.back();
368 Cache.deallocate(Pair.first, Pair.second);
369 V.pop_back();
370 // This increases the chance of having non-full TransferBatches and it
371 // will jump into the code path of merging TransferBatches.
372 if (std::rand() % 8 == 0)
373 Cache.drain();
374 }
375 Cache.destroy(nullptr);
376 });
377 }
378 {
379 std::unique_lock<std::mutex> Lock(Mutex);
380 Ready = true;
381 Cv.notify_all();
382 }
383 for (auto &T : Threads)
384 T.join();
385 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
386 scudo::ScopedString Str;
387 Allocator->getStats(&Str);
388 Allocator->getFragmentationInfo(&Str);
389 Str.output();
390}
391
392// Through a simple allocation that spans two pages, verify that releaseToOS
393// actually releases some bytes (at least one page worth). This is a regression
394// test for an error in how the release criteria were computed.
395SCUDO_TYPED_TEST(ScudoPrimaryTest, ReleaseToOS) {
396 using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
397 std::unique_ptr<Primary> Allocator(new Primary);
398 Allocator->init(/*ReleaseToOsInterval=*/-1);
399 typename Primary::CacheT Cache;
400 Cache.init(nullptr, Allocator.get());
401 const scudo::uptr Size = scudo::getPageSizeCached() * 2;
402 EXPECT_TRUE(Primary::canAllocate(Size));
403 const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
404 void *P = Cache.allocate(ClassId);
405 EXPECT_NE(P, nullptr);
406 Cache.deallocate(ClassId, P);
407 Cache.destroy(nullptr);
408 EXPECT_GT(Allocator->releaseToOS(scudo::ReleaseToOS::ForceAll), 0U);
409}
410
411SCUDO_TYPED_TEST(ScudoPrimaryTest, MemoryGroup) {
412 using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
413 std::unique_ptr<Primary> Allocator(new Primary);
414 Allocator->init(/*ReleaseToOsInterval=*/-1);
415 typename Primary::CacheT Cache;
416 Cache.init(nullptr, Allocator.get());
417 const scudo::uptr Size = 32U;
418 const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
419
420 // We will allocate 4 times the group size memory and release all of them. We
421 // expect the free blocks will be classified with groups. Then we will
422 // allocate the same amount of memory as group size and expect the blocks will
423 // have the max address difference smaller or equal to 2 times the group size.
424 // Note that it isn't necessary to be in the range of single group size
425 // because the way we get the group id is doing compact pointer shifting.
426 // According to configuration, the compact pointer may not align to group
427 // size. As a result, the blocks can cross two groups at most.
428 const scudo::uptr GroupSizeMem = (1ULL << Primary::GroupSizeLog);
429 const scudo::uptr PeakAllocationMem = 4 * GroupSizeMem;
430 const scudo::uptr PeakNumberOfAllocations = PeakAllocationMem / Size;
431 const scudo::uptr FinalNumberOfAllocations = GroupSizeMem / Size;
432 std::vector<scudo::uptr> Blocks;
433 std::mt19937 R;
434
435 for (scudo::uptr I = 0; I < PeakNumberOfAllocations; ++I)
436 Blocks.push_back(reinterpret_cast<scudo::uptr>(Cache.allocate(ClassId)));
437
438 std::shuffle(Blocks.begin(), Blocks.end(), R);
439
440 // Release all the allocated blocks, including those held by local cache.
441 while (!Blocks.empty()) {
442 Cache.deallocate(ClassId, reinterpret_cast<void *>(Blocks.back()));
443 Blocks.pop_back();
444 }
445 Cache.drain();
446
447 for (scudo::uptr I = 0; I < FinalNumberOfAllocations; ++I)
448 Blocks.push_back(reinterpret_cast<scudo::uptr>(Cache.allocate(ClassId)));
449
450 EXPECT_LE(*std::max_element(Blocks.begin(), Blocks.end()) -
451 *std::min_element(Blocks.begin(), Blocks.end()),
452 GroupSizeMem * 2);
453
454 while (!Blocks.empty()) {
455 Cache.deallocate(ClassId, reinterpret_cast<void *>(Blocks.back()));
456 Blocks.pop_back();
457 }
458 Cache.drain();
459}
460

source code of compiler-rt/lib/scudo/standalone/tests/primary_test.cpp