1//===-- primary_test.cpp ----------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "tests/scudo_unit_test.h"
10
11#include "allocator_config.h"
12#include "allocator_config_wrapper.h"
13#include "condition_variable.h"
14#include "primary32.h"
15#include "primary64.h"
16#include "size_class_map.h"
17
18#include <algorithm>
19#include <chrono>
20#include <condition_variable>
21#include <mutex>
22#include <random>
23#include <stdlib.h>
24#include <thread>
25#include <vector>
26
27// Note that with small enough regions, the SizeClassAllocator64 also works on
28// 32-bit architectures. It's not something we want to encourage, but we still
29// should ensure the tests pass.
30
31template <typename SizeClassMapT> struct TestConfig1 {
32 static const bool MaySupportMemoryTagging = false;
33 template <typename> using TSDRegistryT = void;
34 template <typename> using PrimaryT = void;
35 template <typename> using SecondaryT = void;
36
37 struct Primary {
38 using SizeClassMap = SizeClassMapT;
39 static const scudo::uptr RegionSizeLog = 18U;
40 static const scudo::uptr GroupSizeLog = 18U;
41 static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
42 static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
43 typedef scudo::uptr CompactPtrT;
44 static const scudo::uptr CompactPtrScale = 0;
45 static const bool EnableRandomOffset = true;
46 static const scudo::uptr MapSizeIncrement = 1UL << 18;
47 };
48};
49
50template <typename SizeClassMapT> struct TestConfig2 {
51 static const bool MaySupportMemoryTagging = false;
52 template <typename> using TSDRegistryT = void;
53 template <typename> using PrimaryT = void;
54 template <typename> using SecondaryT = void;
55
56 struct Primary {
57 using SizeClassMap = SizeClassMapT;
58#if defined(__mips__)
59 // Unable to allocate greater size on QEMU-user.
60 static const scudo::uptr RegionSizeLog = 23U;
61#else
62 static const scudo::uptr RegionSizeLog = 24U;
63#endif
64 static const scudo::uptr GroupSizeLog = 20U;
65 static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
66 static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
67 typedef scudo::uptr CompactPtrT;
68 static const scudo::uptr CompactPtrScale = 0;
69 static const bool EnableRandomOffset = true;
70 static const scudo::uptr MapSizeIncrement = 1UL << 18;
71 };
72};
73
74template <typename SizeClassMapT> struct TestConfig3 {
75 static const bool MaySupportMemoryTagging = true;
76 template <typename> using TSDRegistryT = void;
77 template <typename> using PrimaryT = void;
78 template <typename> using SecondaryT = void;
79
80 struct Primary {
81 using SizeClassMap = SizeClassMapT;
82#if defined(__mips__)
83 // Unable to allocate greater size on QEMU-user.
84 static const scudo::uptr RegionSizeLog = 23U;
85#else
86 static const scudo::uptr RegionSizeLog = 24U;
87#endif
88 static const scudo::uptr GroupSizeLog = 20U;
89 static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
90 static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
91 typedef scudo::uptr CompactPtrT;
92 static const scudo::uptr CompactPtrScale = 0;
93 static const bool EnableContiguousRegions = false;
94 static const bool EnableRandomOffset = true;
95 static const scudo::uptr MapSizeIncrement = 1UL << 18;
96 };
97};
98
99template <typename SizeClassMapT> struct TestConfig4 {
100 static const bool MaySupportMemoryTagging = true;
101 template <typename> using TSDRegistryT = void;
102 template <typename> using PrimaryT = void;
103 template <typename> using SecondaryT = void;
104
105 struct Primary {
106 using SizeClassMap = SizeClassMapT;
107#if defined(__mips__)
108 // Unable to allocate greater size on QEMU-user.
109 static const scudo::uptr RegionSizeLog = 23U;
110#else
111 static const scudo::uptr RegionSizeLog = 24U;
112#endif
113 static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
114 static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
115 static const scudo::uptr CompactPtrScale = 3U;
116 static const scudo::uptr GroupSizeLog = 20U;
117 typedef scudo::u32 CompactPtrT;
118 static const bool EnableRandomOffset = true;
119 static const scudo::uptr MapSizeIncrement = 1UL << 18;
120 };
121};
122
123// This is the only test config that enables the condition variable.
124template <typename SizeClassMapT> struct TestConfig5 {
125 static const bool MaySupportMemoryTagging = true;
126 template <typename> using TSDRegistryT = void;
127 template <typename> using PrimaryT = void;
128 template <typename> using SecondaryT = void;
129
130 struct Primary {
131 using SizeClassMap = SizeClassMapT;
132#if defined(__mips__)
133 // Unable to allocate greater size on QEMU-user.
134 static const scudo::uptr RegionSizeLog = 23U;
135#else
136 static const scudo::uptr RegionSizeLog = 24U;
137#endif
138 static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
139 static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
140 static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
141 static const scudo::uptr GroupSizeLog = 18U;
142 typedef scudo::u32 CompactPtrT;
143 static const bool EnableRandomOffset = true;
144 static const scudo::uptr MapSizeIncrement = 1UL << 18;
145#if SCUDO_LINUX
146 using ConditionVariableT = scudo::ConditionVariableLinux;
147#else
148 using ConditionVariableT = scudo::ConditionVariableDummy;
149#endif
150 };
151};
152
153template <template <typename> class BaseConfig, typename SizeClassMapT>
154struct Config : public BaseConfig<SizeClassMapT> {};
155
156template <template <typename> class BaseConfig, typename SizeClassMapT>
157struct SizeClassAllocator
158 : public scudo::SizeClassAllocator64<
159 scudo::PrimaryConfig<Config<BaseConfig, SizeClassMapT>>> {};
160template <typename SizeClassMapT>
161struct SizeClassAllocator<TestConfig1, SizeClassMapT>
162 : public scudo::SizeClassAllocator32<
163 scudo::PrimaryConfig<Config<TestConfig1, SizeClassMapT>>> {};
164
165template <template <typename> class BaseConfig, typename SizeClassMapT>
166struct TestAllocator : public SizeClassAllocator<BaseConfig, SizeClassMapT> {
167 ~TestAllocator() {
168 this->verifyAllBlocksAreReleasedTestOnly();
169 this->unmapTestOnly();
170 }
171
172 void *operator new(size_t size) {
173 void *p = nullptr;
174 EXPECT_EQ(0, posix_memalign(memptr: &p, alignment: alignof(TestAllocator), size: size));
175 return p;
176 }
177
178 void operator delete(void *ptr) { free(ptr: ptr); }
179};
180
181template <template <typename> class BaseConfig>
182struct ScudoPrimaryTest : public Test {};
183
184#if SCUDO_FUCHSIA
185#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
186 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2) \
187 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3)
188#else
189#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
190 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig1) \
191 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2) \
192 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3) \
193 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig4) \
194 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig5)
195#endif
196
197#define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \
198 using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<TYPE>; \
199 TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<TYPE>::Run(); }
200
201#define SCUDO_TYPED_TEST(FIXTURE, NAME) \
202 template <template <typename> class TypeParam> \
203 struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
204 void Run(); \
205 }; \
206 SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
207 template <template <typename> class TypeParam> \
208 void FIXTURE##NAME<TypeParam>::Run()
209
210SCUDO_TYPED_TEST(ScudoPrimaryTest, BasicPrimary) {
211 using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
212 std::unique_ptr<Primary> Allocator(new Primary);
213 Allocator->init(/*ReleaseToOsInterval=*/-1);
214 typename Primary::SizeClassAllocatorT SizeClassAllocator;
215 SizeClassAllocator.init(nullptr, Allocator.get());
216 const scudo::uptr NumberOfAllocations = 32U;
217 for (scudo::uptr I = 0; I <= 16U; I++) {
218 const scudo::uptr Size = 1UL << I;
219 if (!Primary::canAllocate(Size))
220 continue;
221 const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
222 void *Pointers[NumberOfAllocations];
223 for (scudo::uptr J = 0; J < NumberOfAllocations; J++) {
224 void *P = SizeClassAllocator.allocate(ClassId);
225 memset(s: P, c: 'B', n: Size);
226 Pointers[J] = P;
227 }
228 for (scudo::uptr J = 0; J < NumberOfAllocations; J++)
229 SizeClassAllocator.deallocate(ClassId, Pointers[J]);
230 }
231 SizeClassAllocator.destroy(nullptr);
232 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
233 scudo::ScopedString Str;
234 Allocator->getStats(&Str);
235 Str.output();
236}
237
238struct SmallRegionsConfig {
239 static const bool MaySupportMemoryTagging = false;
240 template <typename> using TSDRegistryT = void;
241 template <typename> using PrimaryT = void;
242 template <typename> using SecondaryT = void;
243
244 struct Primary {
245 using SizeClassMap = scudo::DefaultSizeClassMap;
246 static const scudo::uptr RegionSizeLog = 21U;
247 static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
248 static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
249 typedef scudo::uptr CompactPtrT;
250 static const scudo::uptr CompactPtrScale = 0;
251 static const bool EnableRandomOffset = true;
252 static const scudo::uptr MapSizeIncrement = 1UL << 18;
253 static const scudo::uptr GroupSizeLog = 20U;
254 };
255};
256
257// The 64-bit SizeClassAllocator can be easily OOM'd with small region sizes.
258// For the 32-bit one, it requires actually exhausting memory, so we skip it.
259TEST(ScudoPrimaryTest, Primary64OOM) {
260 using Primary =
261 scudo::SizeClassAllocator64<scudo::PrimaryConfig<SmallRegionsConfig>>;
262 Primary Allocator;
263 Allocator.init(/*ReleaseToOsInterval=*/-1);
264 typename Primary::SizeClassAllocatorT SizeClassAllocator;
265 scudo::GlobalStats Stats;
266 Stats.init();
267 SizeClassAllocator.init(S: &Stats, A: &Allocator);
268 bool AllocationFailed = false;
269 std::vector<void *> Blocks;
270 const scudo::uptr ClassId = Primary::SizeClassMap::LargestClassId;
271 const scudo::uptr Size = Primary::getSizeByClassId(ClassId);
272 const scudo::u16 MaxCachedBlockCount =
273 Primary::SizeClassAllocatorT::getMaxCached(Size);
274
275 for (scudo::uptr I = 0; I < 10000U; I++) {
276 for (scudo::uptr J = 0; J < MaxCachedBlockCount; ++J) {
277 void *Ptr = SizeClassAllocator.allocate(ClassId);
278 if (Ptr == nullptr) {
279 AllocationFailed = true;
280 break;
281 }
282 memset(s: Ptr, c: 'B', n: Size);
283 Blocks.push_back(Ptr);
284 }
285 }
286
287 for (auto *Ptr : Blocks)
288 SizeClassAllocator.deallocate(ClassId, Ptr);
289
290 SizeClassAllocator.destroy(S: nullptr);
291 Allocator.releaseToOS(ReleaseType: scudo::ReleaseToOS::Force);
292 scudo::ScopedString Str;
293 Allocator.getStats(Str: &Str);
294 Str.output();
295 EXPECT_EQ(AllocationFailed, true);
296 Allocator.unmapTestOnly();
297}
298
299SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryIterate) {
300 using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
301 std::unique_ptr<Primary> Allocator(new Primary);
302 Allocator->init(/*ReleaseToOsInterval=*/-1);
303 typename Primary::SizeClassAllocatorT SizeClassAllocator;
304 SizeClassAllocator.init(nullptr, Allocator.get());
305 std::vector<std::pair<scudo::uptr, void *>> V;
306 for (scudo::uptr I = 0; I < 64U; I++) {
307 const scudo::uptr Size =
308 static_cast<scudo::uptr>(std::rand()) % Primary::SizeClassMap::MaxSize;
309 const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
310 void *P = SizeClassAllocator.allocate(ClassId);
311 V.push_back(std::make_pair(ClassId, P));
312 }
313 scudo::uptr Found = 0;
314 auto Lambda = [&V, &Found](scudo::uptr Block) {
315 for (const auto &Pair : V) {
316 if (Pair.second == reinterpret_cast<void *>(Block))
317 Found++;
318 }
319 };
320 Allocator->disable();
321 Allocator->iterateOverBlocks(Lambda);
322 Allocator->enable();
323 EXPECT_EQ(Found, V.size());
324 while (!V.empty()) {
325 auto Pair = V.back();
326 SizeClassAllocator.deallocate(Pair.first, Pair.second);
327 V.pop_back();
328 }
329 SizeClassAllocator.destroy(nullptr);
330 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
331 scudo::ScopedString Str;
332 Allocator->getStats(&Str);
333 Str.output();
334}
335
336SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) {
337 using Primary = TestAllocator<TypeParam, scudo::Config::Primary::SizeClassMap>;
338 std::unique_ptr<Primary> Allocator(new Primary);
339 Allocator->init(/*ReleaseToOsInterval=*/-1);
340 std::mutex Mutex;
341 std::condition_variable Cv;
342 bool Ready = false;
343 std::thread Threads[32];
344 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++) {
345 Threads[I] = std::thread([&]() {
346 static thread_local
347 typename Primary::SizeClassAllocatorT SizeClassAllocator;
348 SizeClassAllocator.init(nullptr, Allocator.get());
349 std::vector<std::pair<scudo::uptr, void *>> V;
350 {
351 std::unique_lock<std::mutex> Lock(Mutex);
352 while (!Ready)
353 Cv.wait(Lock);
354 }
355 for (scudo::uptr I = 0; I < 256U; I++) {
356 const scudo::uptr Size = static_cast<scudo::uptr>(std::rand()) %
357 Primary::SizeClassMap::MaxSize / 4;
358 const scudo::uptr ClassId =
359 Primary::SizeClassMap::getClassIdBySize(Size);
360 void *P = SizeClassAllocator.allocate(ClassId);
361 if (P)
362 V.push_back(std::make_pair(ClassId, P));
363 }
364
365 // Try to interleave pushBlocks(), popBlocks() and releaseToOS().
366 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
367
368 while (!V.empty()) {
369 auto Pair = V.back();
370 SizeClassAllocator.deallocate(Pair.first, Pair.second);
371 V.pop_back();
372 // This increases the chance of having non-full TransferBatches and it
373 // will jump into the code path of merging TransferBatches.
374 if (std::rand() % 8 == 0)
375 SizeClassAllocator.drain();
376 }
377 SizeClassAllocator.destroy(nullptr);
378 });
379 }
380 {
381 std::unique_lock<std::mutex> Lock(Mutex);
382 Ready = true;
383 Cv.notify_all();
384 }
385 for (auto &T : Threads)
386 T.join();
387 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
388 scudo::ScopedString Str;
389 Allocator->getStats(&Str);
390 Allocator->getFragmentationInfo(&Str);
391 Allocator->getMemoryGroupFragmentationInfo(&Str);
392 Str.output();
393}
394
395// Through a simple allocation that spans two pages, verify that releaseToOS
396// actually releases some bytes (at least one page worth). This is a regression
397// test for an error in how the release criteria were computed.
398SCUDO_TYPED_TEST(ScudoPrimaryTest, ReleaseToOS) {
399 using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
400 std::unique_ptr<Primary> Allocator(new Primary);
401 Allocator->init(/*ReleaseToOsInterval=*/-1);
402 typename Primary::SizeClassAllocatorT SizeClassAllocator;
403 SizeClassAllocator.init(nullptr, Allocator.get());
404 const scudo::uptr Size = scudo::getPageSizeCached() * 2;
405 EXPECT_TRUE(Primary::canAllocate(Size));
406 const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
407 void *P = SizeClassAllocator.allocate(ClassId);
408 EXPECT_NE(P, nullptr);
409 SizeClassAllocator.deallocate(ClassId, P);
410 SizeClassAllocator.destroy(nullptr);
411 EXPECT_GT(Allocator->releaseToOS(scudo::ReleaseToOS::ForceAll), 0U);
412}
413
414SCUDO_TYPED_TEST(ScudoPrimaryTest, MemoryGroup) {
415 using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
416 std::unique_ptr<Primary> Allocator(new Primary);
417 Allocator->init(/*ReleaseToOsInterval=*/-1);
418 typename Primary::SizeClassAllocatorT SizeClassAllocator;
419 SizeClassAllocator.init(nullptr, Allocator.get());
420 const scudo::uptr Size = 32U;
421 const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
422
423 // We will allocate 4 times the group size memory and release all of them. We
424 // expect the free blocks will be classified with groups. Then we will
425 // allocate the same amount of memory as group size and expect the blocks will
426 // have the max address difference smaller or equal to 2 times the group size.
427 // Note that it isn't necessary to be in the range of single group size
428 // because the way we get the group id is doing compact pointer shifting.
429 // According to configuration, the compact pointer may not align to group
430 // size. As a result, the blocks can cross two groups at most.
431 const scudo::uptr GroupSizeMem = (1ULL << Primary::GroupSizeLog);
432 const scudo::uptr PeakAllocationMem = 4 * GroupSizeMem;
433 const scudo::uptr PeakNumberOfAllocations = PeakAllocationMem / Size;
434 const scudo::uptr FinalNumberOfAllocations = GroupSizeMem / Size;
435 std::vector<scudo::uptr> Blocks;
436 std::mt19937 R;
437
438 for (scudo::uptr I = 0; I < PeakNumberOfAllocations; ++I)
439 Blocks.push_back(
440 reinterpret_cast<scudo::uptr>(SizeClassAllocator.allocate(ClassId)));
441
442 std::shuffle(Blocks.begin(), Blocks.end(), R);
443
444 // Release all the allocated blocks, including those held by local cache.
445 while (!Blocks.empty()) {
446 SizeClassAllocator.deallocate(ClassId,
447 reinterpret_cast<void *>(Blocks.back()));
448 Blocks.pop_back();
449 }
450 SizeClassAllocator.drain();
451
452 for (scudo::uptr I = 0; I < FinalNumberOfAllocations; ++I)
453 Blocks.push_back(
454 reinterpret_cast<scudo::uptr>(SizeClassAllocator.allocate(ClassId)));
455
456 EXPECT_LE(*std::max_element(Blocks.begin(), Blocks.end()) -
457 *std::min_element(Blocks.begin(), Blocks.end()),
458 GroupSizeMem * 2);
459
460 while (!Blocks.empty()) {
461 SizeClassAllocator.deallocate(ClassId,
462 reinterpret_cast<void *>(Blocks.back()));
463 Blocks.pop_back();
464 }
465 SizeClassAllocator.drain();
466}
467

Provided by KDAB

Privacy Policy
Update your C++ knowledge – Modern C++11/14/17 Training
Find out more

source code of compiler-rt/lib/scudo/standalone/tests/primary_test.cpp