1//===-- combined_test.cpp ---------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "memtag.h"
10#include "stack_depot.h"
11#include "tests/scudo_unit_test.h"
12
13#include "allocator_config.h"
14#include "chunk.h"
15#include "combined.h"
16#include "condition_variable.h"
17#include "mem_map.h"
18#include "size_class_map.h"
19
20#include <algorithm>
21#include <condition_variable>
22#include <memory>
23#include <mutex>
24#include <set>
25#include <stdlib.h>
26#include <thread>
27#include <vector>
28
29static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc;
30static constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
31
32// Fuchsia complains that the function is not used.
33UNUSED static void disableDebuggerdMaybe() {
34#if SCUDO_ANDROID
35 // Disable the debuggerd signal handler on Android, without this we can end
36 // up spending a significant amount of time creating tombstones.
37 signal(SIGSEGV, SIG_DFL);
38#endif
39}
40
41template <class AllocatorT>
42bool isPrimaryAllocation(scudo::uptr Size, scudo::uptr Alignment) {
43 const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
44 if (Alignment < MinAlignment)
45 Alignment = MinAlignment;
46 const scudo::uptr NeededSize =
47 scudo::roundUp(X: Size, Boundary: MinAlignment) +
48 ((Alignment > MinAlignment) ? Alignment : scudo::Chunk::getHeaderSize());
49 return AllocatorT::PrimaryT::canAllocate(NeededSize);
50}
51
52template <class AllocatorT>
53void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
54 scudo::uptr Alignment) {
55 const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
56 Size = scudo::roundUp(X: Size, Boundary: MinAlignment);
57 if (Allocator->useMemoryTaggingTestOnly()) {
58 EXPECT_DEATH(
59 {
60 disableDebuggerdMaybe();
61 reinterpret_cast<char *>(P)[-1] = 'A';
62 },
63 "");
64 }
65 if (isPrimaryAllocation<AllocatorT>(Size, Alignment)
66 ? Allocator->useMemoryTaggingTestOnly()
67 : Alignment == MinAlignment &&
68 AllocatorT::SecondaryT::getGuardPageSize() > 0) {
69 EXPECT_DEATH(
70 {
71 disableDebuggerdMaybe();
72 reinterpret_cast<char *>(P)[Size] = 'A';
73 },
74 "");
75 }
76}
77
78template <typename Config> struct TestAllocator : scudo::Allocator<Config> {
79 TestAllocator() {
80 this->initThreadMaybe();
81 if (scudo::archSupportsMemoryTagging() &&
82 !scudo::systemDetectsMemoryTagFaultsTestOnly())
83 this->disableMemoryTagging();
84 }
85 ~TestAllocator() { this->unmapTestOnly(); }
86
87 void *operator new(size_t size);
88 void operator delete(void *ptr);
89};
90
91constexpr size_t kMaxAlign = std::max({
92 alignof(scudo::Allocator<scudo::DefaultConfig>),
93#if SCUDO_CAN_USE_PRIMARY64
94 alignof(scudo::Allocator<scudo::FuchsiaConfig>),
95#endif
96 alignof(scudo::Allocator<scudo::AndroidConfig>)
97});
98
99#if SCUDO_RISCV64
100// The allocator is over 4MB large. Rather than creating an instance of this on
101// the heap, keep it in a global storage to reduce fragmentation from having to
102// mmap this at the start of every test.
103struct TestAllocatorStorage {
104 static constexpr size_t kMaxSize = std::max({
105 sizeof(scudo::Allocator<scudo::DefaultConfig>),
106#if SCUDO_CAN_USE_PRIMARY64
107 sizeof(scudo::Allocator<scudo::FuchsiaConfig>),
108#endif
109 sizeof(scudo::Allocator<scudo::AndroidConfig>)
110 });
111
112 // To alleviate some problem, let's skip the thread safety analysis here.
113 static void *get(size_t size) NO_THREAD_SAFETY_ANALYSIS {
114 CHECK(size <= kMaxSize &&
115 "Allocation size doesn't fit in the allocator storage");
116 M.lock();
117 return AllocatorStorage;
118 }
119
120 static void release(void *ptr) NO_THREAD_SAFETY_ANALYSIS {
121 M.assertHeld();
122 M.unlock();
123 ASSERT_EQ(ptr, AllocatorStorage);
124 }
125
126 static scudo::HybridMutex M;
127 static uint8_t AllocatorStorage[kMaxSize];
128};
129scudo::HybridMutex TestAllocatorStorage::M;
130alignas(kMaxAlign) uint8_t TestAllocatorStorage::AllocatorStorage[kMaxSize];
131#else
132struct TestAllocatorStorage {
133 static void *get(size_t size) NO_THREAD_SAFETY_ANALYSIS {
134 void *p = nullptr;
135 EXPECT_EQ(0, posix_memalign(memptr: &p, alignment: kMaxAlign, size: size));
136 return p;
137 }
138 static void release(void *ptr) NO_THREAD_SAFETY_ANALYSIS { free(ptr: ptr); }
139};
140#endif
141
142template <typename Config>
143void *TestAllocator<Config>::operator new(size_t size) {
144 return TestAllocatorStorage::get(size);
145}
146
147template <typename Config>
148void TestAllocator<Config>::operator delete(void *ptr) {
149 TestAllocatorStorage::release(ptr);
150}
151
152template <class TypeParam> struct ScudoCombinedTest : public Test {
153 ScudoCombinedTest() {
154 UseQuarantine = std::is_same<TypeParam, scudo::AndroidConfig>::value;
155 Allocator = std::make_unique<AllocatorT>();
156 }
157 ~ScudoCombinedTest() {
158 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
159 UseQuarantine = true;
160 }
161
162 void RunTest();
163
164 void BasicTest(scudo::uptr SizeLog);
165
166 using AllocatorT = TestAllocator<TypeParam>;
167 std::unique_ptr<AllocatorT> Allocator;
168};
169
170template <typename T> using ScudoCombinedDeathTest = ScudoCombinedTest<T>;
171
172namespace scudo {
173struct TestConditionVariableConfig {
174 static const bool MaySupportMemoryTagging = true;
175 template <class A>
176 using TSDRegistryT =
177 scudo::TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
178
179 struct Primary {
180 using SizeClassMap = scudo::AndroidSizeClassMap;
181#if SCUDO_CAN_USE_PRIMARY64
182 static const scudo::uptr RegionSizeLog = 28U;
183 typedef scudo::u32 CompactPtrT;
184 static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
185 static const scudo::uptr GroupSizeLog = 20U;
186 static const bool EnableRandomOffset = true;
187 static const scudo::uptr MapSizeIncrement = 1UL << 18;
188#else
189 static const scudo::uptr RegionSizeLog = 18U;
190 static const scudo::uptr GroupSizeLog = 18U;
191 typedef scudo::uptr CompactPtrT;
192#endif
193 static const scudo::s32 MinReleaseToOsIntervalMs = 1000;
194 static const scudo::s32 MaxReleaseToOsIntervalMs = 1000;
195#if SCUDO_LINUX
196 using ConditionVariableT = scudo::ConditionVariableLinux;
197#else
198 using ConditionVariableT = scudo::ConditionVariableDummy;
199#endif
200 };
201#if SCUDO_CAN_USE_PRIMARY64
202 template <typename Config>
203 using PrimaryT = scudo::SizeClassAllocator64<Config>;
204#else
205 template <typename Config>
206 using PrimaryT = scudo::SizeClassAllocator32<Config>;
207#endif
208
209 struct Secondary {
210 template <typename Config>
211 using CacheT = scudo::MapAllocatorNoCache<Config>;
212 };
213 template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
214};
215
216struct TestNoCacheConfig {
217 static const bool MaySupportMemoryTagging = true;
218 template <class A>
219 using TSDRegistryT =
220 scudo::TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
221
222 struct Primary {
223 using SizeClassMap = scudo::AndroidSizeClassMap;
224#if SCUDO_CAN_USE_PRIMARY64
225 static const scudo::uptr RegionSizeLog = 28U;
226 typedef scudo::u32 CompactPtrT;
227 static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
228 static const scudo::uptr GroupSizeLog = 20U;
229 static const bool EnableRandomOffset = true;
230 static const scudo::uptr MapSizeIncrement = 1UL << 18;
231#else
232 static const scudo::uptr RegionSizeLog = 18U;
233 static const scudo::uptr GroupSizeLog = 18U;
234 typedef scudo::uptr CompactPtrT;
235#endif
236 static const bool EnableBlockCache = false;
237 static const scudo::s32 MinReleaseToOsIntervalMs = 1000;
238 static const scudo::s32 MaxReleaseToOsIntervalMs = 1000;
239 };
240
241#if SCUDO_CAN_USE_PRIMARY64
242 template <typename Config>
243 using PrimaryT = scudo::SizeClassAllocator64<Config>;
244#else
245 template <typename Config>
246 using PrimaryT = scudo::SizeClassAllocator32<Config>;
247#endif
248
249 struct Secondary {
250 template <typename Config>
251 using CacheT = scudo::MapAllocatorNoCache<Config>;
252 };
253 template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
254};
255
256} // namespace scudo
257
258#if SCUDO_FUCHSIA
259#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
260 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, FuchsiaConfig)
261#else
262#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
263 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig) \
264 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig) \
265 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConditionVariableConfig) \
266 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestNoCacheConfig)
267#endif
268
269#define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \
270 using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<scudo::TYPE>; \
271 TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<scudo::TYPE>::Run(); }
272
273#define SCUDO_TYPED_TEST(FIXTURE, NAME) \
274 template <class TypeParam> \
275 struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
276 using BaseT = FIXTURE<TypeParam>; \
277 void Run(); \
278 }; \
279 SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
280 template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
281
282// Accessing `TSD->getCache()` requires `TSD::Mutex` which isn't easy to test
283// using thread-safety analysis. Alternatively, we verify the thread safety
284// through a runtime check in ScopedTSD and mark the test body with
285// NO_THREAD_SAFETY_ANALYSIS.
286#define SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(FIXTURE, NAME) \
287 template <class TypeParam> \
288 struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
289 using BaseT = FIXTURE<TypeParam>; \
290 void Run() NO_THREAD_SAFETY_ANALYSIS; \
291 }; \
292 SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
293 template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
294
295SCUDO_TYPED_TEST(ScudoCombinedTest, IsOwned) {
296 auto *Allocator = this->Allocator.get();
297 static scudo::u8 StaticBuffer[scudo::Chunk::getHeaderSize() + 1];
298 EXPECT_FALSE(
299 Allocator->isOwned(&StaticBuffer[scudo::Chunk::getHeaderSize()]));
300
301 scudo::u8 StackBuffer[scudo::Chunk::getHeaderSize() + 1];
302 for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
303 StackBuffer[I] = 0x42U;
304 EXPECT_FALSE(Allocator->isOwned(&StackBuffer[scudo::Chunk::getHeaderSize()]));
305 for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
306 EXPECT_EQ(StackBuffer[I], 0x42U);
307}
308
309template <class Config>
310void ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLog) {
311 auto *Allocator = this->Allocator.get();
312
313 // This allocates and deallocates a bunch of chunks, with a wide range of
314 // sizes and alignments, with a focus on sizes that could trigger weird
315 // behaviors (plus or minus a small delta of a power of two for example).
316 for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
317 const scudo::uptr Align = 1U << AlignLog;
318 for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
319 if ((1LL << SizeLog) + Delta < 0)
320 continue;
321 const scudo::uptr Size =
322 static_cast<scudo::uptr>((1LL << SizeLog) + Delta);
323 void *P = Allocator->allocate(Size, Origin, Align);
324 EXPECT_NE(P, nullptr);
325 EXPECT_TRUE(Allocator->isOwned(P));
326 EXPECT_TRUE(scudo::isAligned(X: reinterpret_cast<scudo::uptr>(P), Alignment: Align));
327 EXPECT_LE(Size, Allocator->getUsableSize(P));
328 memset(s: P, c: 0xaa, n: Size);
329 checkMemoryTaggingMaybe(Allocator, P, Size, Align);
330 Allocator->deallocate(P, Origin, Size);
331 }
332 }
333
334 Allocator->printStats();
335 Allocator->printFragmentationInfo();
336}
337
338#define SCUDO_MAKE_BASIC_TEST(SizeLog) \
339 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, BasicCombined##SizeLog) { \
340 this->BasicTest(SizeLog); \
341 }
342
343SCUDO_MAKE_BASIC_TEST(0)
344SCUDO_MAKE_BASIC_TEST(1)
345SCUDO_MAKE_BASIC_TEST(2)
346SCUDO_MAKE_BASIC_TEST(3)
347SCUDO_MAKE_BASIC_TEST(4)
348SCUDO_MAKE_BASIC_TEST(5)
349SCUDO_MAKE_BASIC_TEST(6)
350SCUDO_MAKE_BASIC_TEST(7)
351SCUDO_MAKE_BASIC_TEST(8)
352SCUDO_MAKE_BASIC_TEST(9)
353SCUDO_MAKE_BASIC_TEST(10)
354SCUDO_MAKE_BASIC_TEST(11)
355SCUDO_MAKE_BASIC_TEST(12)
356SCUDO_MAKE_BASIC_TEST(13)
357SCUDO_MAKE_BASIC_TEST(14)
358SCUDO_MAKE_BASIC_TEST(15)
359SCUDO_MAKE_BASIC_TEST(16)
360SCUDO_MAKE_BASIC_TEST(17)
361SCUDO_MAKE_BASIC_TEST(18)
362SCUDO_MAKE_BASIC_TEST(19)
363SCUDO_MAKE_BASIC_TEST(20)
364
365SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroContents) {
366 auto *Allocator = this->Allocator.get();
367
368 // Ensure that specifying ZeroContents returns a zero'd out block.
369 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
370 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
371 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
372 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
373 EXPECT_NE(P, nullptr);
374 for (scudo::uptr I = 0; I < Size; I++)
375 ASSERT_EQ((reinterpret_cast<char *>(P))[I], '\0');
376 memset(s: P, c: 0xaa, n: Size);
377 Allocator->deallocate(P, Origin, Size);
378 }
379 }
380}
381
382SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroFill) {
383 auto *Allocator = this->Allocator.get();
384
385 // Ensure that specifying ZeroFill returns a zero'd out block.
386 Allocator->setFillContents(scudo::ZeroFill);
387 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
388 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
389 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
390 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
391 EXPECT_NE(P, nullptr);
392 for (scudo::uptr I = 0; I < Size; I++)
393 ASSERT_EQ((reinterpret_cast<char *>(P))[I], '\0');
394 memset(s: P, c: 0xaa, n: Size);
395 Allocator->deallocate(P, Origin, Size);
396 }
397 }
398}
399
400SCUDO_TYPED_TEST(ScudoCombinedTest, PatternOrZeroFill) {
401 auto *Allocator = this->Allocator.get();
402
403 // Ensure that specifying PatternOrZeroFill returns a pattern or zero filled
404 // block. The primary allocator only produces pattern filled blocks if MTE
405 // is disabled, so we only require pattern filled blocks in that case.
406 Allocator->setFillContents(scudo::PatternOrZeroFill);
407 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
408 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
409 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
410 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
411 EXPECT_NE(P, nullptr);
412 for (scudo::uptr I = 0; I < Size; I++) {
413 unsigned char V = (reinterpret_cast<unsigned char *>(P))[I];
414 if (isPrimaryAllocation<TestAllocator<TypeParam>>(Size,
415 1U << MinAlignLog) &&
416 !Allocator->useMemoryTaggingTestOnly())
417 ASSERT_EQ(V, scudo::PatternFillByte);
418 else
419 ASSERT_TRUE(V == scudo::PatternFillByte || V == 0);
420 }
421 memset(s: P, c: 0xaa, n: Size);
422 Allocator->deallocate(P, Origin, Size);
423 }
424 }
425}
426
427SCUDO_TYPED_TEST(ScudoCombinedTest, BlockReuse) {
428 auto *Allocator = this->Allocator.get();
429
430 // Verify that a chunk will end up being reused, at some point.
431 const scudo::uptr NeedleSize = 1024U;
432 void *NeedleP = Allocator->allocate(NeedleSize, Origin);
433 Allocator->deallocate(NeedleP, Origin);
434 bool Found = false;
435 for (scudo::uptr I = 0; I < 1024U && !Found; I++) {
436 void *P = Allocator->allocate(NeedleSize, Origin);
437 if (Allocator->getHeaderTaggedPointer(P) ==
438 Allocator->getHeaderTaggedPointer(NeedleP))
439 Found = true;
440 Allocator->deallocate(P, Origin);
441 }
442 EXPECT_TRUE(Found);
443}
444
445SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeIncreasing) {
446 auto *Allocator = this->Allocator.get();
447
448 // Reallocate a chunk all the way up to a secondary allocation, verifying that
449 // we preserve the data in the process.
450 scudo::uptr Size = 16;
451 void *P = Allocator->allocate(Size, Origin);
452 const char Marker = 'A';
453 memset(s: P, c: Marker, n: Size);
454 while (Size < TypeParam::Primary::SizeClassMap::MaxSize * 4) {
455 void *NewP = Allocator->reallocate(P, Size * 2);
456 EXPECT_NE(NewP, nullptr);
457 for (scudo::uptr J = 0; J < Size; J++)
458 EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
459 memset(s: reinterpret_cast<char *>(NewP) + Size, c: Marker, n: Size);
460 Size *= 2U;
461 P = NewP;
462 }
463 Allocator->deallocate(P, Origin);
464}
465
466SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeDecreasing) {
467 auto *Allocator = this->Allocator.get();
468
469 // Reallocate a large chunk all the way down to a byte, verifying that we
470 // preserve the data in the process.
471 scudo::uptr Size = TypeParam::Primary::SizeClassMap::MaxSize * 2;
472 const scudo::uptr DataSize = 2048U;
473 void *P = Allocator->allocate(Size, Origin);
474 const char Marker = 'A';
475 memset(s: P, c: Marker, n: scudo::Min(A: Size, B: DataSize));
476 while (Size > 1U) {
477 Size /= 2U;
478 void *NewP = Allocator->reallocate(P, Size);
479 EXPECT_NE(NewP, nullptr);
480 for (scudo::uptr J = 0; J < scudo::Min(A: Size, B: DataSize); J++)
481 EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
482 P = NewP;
483 }
484 Allocator->deallocate(P, Origin);
485}
486
487SCUDO_TYPED_TEST(ScudoCombinedDeathTest, ReallocateSame) {
488 auto *Allocator = this->Allocator.get();
489
490 // Check that reallocating a chunk to a slightly smaller or larger size
491 // returns the same chunk. This requires that all the sizes we iterate on use
492 // the same block size, but that should be the case for MaxSize - 64 with our
493 // default class size maps.
494 constexpr scudo::uptr InitialSize =
495 TypeParam::Primary::SizeClassMap::MaxSize - 64;
496 const char Marker = 'A';
497 Allocator->setFillContents(scudo::PatternOrZeroFill);
498
499 void *P = Allocator->allocate(InitialSize, Origin);
500 scudo::uptr CurrentSize = InitialSize;
501 for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
502 memset(s: P, c: Marker, n: CurrentSize);
503 const scudo::uptr NewSize =
504 static_cast<scudo::uptr>(static_cast<scudo::sptr>(InitialSize) + Delta);
505 void *NewP = Allocator->reallocate(P, NewSize);
506 EXPECT_EQ(NewP, P);
507
508 // Verify that existing contents have been preserved.
509 for (scudo::uptr I = 0; I < scudo::Min(A: CurrentSize, B: NewSize); I++)
510 EXPECT_EQ((reinterpret_cast<char *>(NewP))[I], Marker);
511
512 // Verify that new bytes are set according to FillContentsMode.
513 for (scudo::uptr I = CurrentSize; I < NewSize; I++) {
514 unsigned char V = (reinterpret_cast<unsigned char *>(NewP))[I];
515 EXPECT_TRUE(V == scudo::PatternFillByte || V == 0);
516 }
517
518 checkMemoryTaggingMaybe(Allocator, NewP, NewSize, 0);
519 CurrentSize = NewSize;
520 }
521 Allocator->deallocate(P, Origin);
522}
523
524SCUDO_TYPED_TEST(ScudoCombinedTest, IterateOverChunks) {
525 auto *Allocator = this->Allocator.get();
526 // Allocates a bunch of chunks, then iterate over all the chunks, ensuring
527 // they are the ones we allocated. This requires the allocator to not have any
528 // other allocated chunk at this point (eg: won't work with the Quarantine).
529 // FIXME: Make it work with UseQuarantine and tagging enabled. Internals of
530 // iterateOverChunks reads header by tagged and non-tagger pointers so one of
531 // them will fail.
532 if (!UseQuarantine) {
533 std::vector<void *> V;
534 for (scudo::uptr I = 0; I < 64U; I++)
535 V.push_back(Allocator->allocate(
536 static_cast<scudo::uptr>(std::rand()) %
537 (TypeParam::Primary::SizeClassMap::MaxSize / 2U),
538 Origin));
539 Allocator->disable();
540 Allocator->iterateOverChunks(
541 0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
542 [](uintptr_t Base, UNUSED size_t Size, void *Arg) {
543 std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
544 void *P = reinterpret_cast<void *>(Base);
545 EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
546 },
547 reinterpret_cast<void *>(&V));
548 Allocator->enable();
549 for (auto P : V)
550 Allocator->deallocate(P, Origin);
551 }
552}
553
554SCUDO_TYPED_TEST(ScudoCombinedDeathTest, UseAfterFree) {
555 auto *Allocator = this->Allocator.get();
556
557 // Check that use-after-free is detected.
558 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
559 const scudo::uptr Size = 1U << SizeLog;
560 if (!Allocator->useMemoryTaggingTestOnly())
561 continue;
562 EXPECT_DEATH(
563 {
564 disableDebuggerdMaybe();
565 void *P = Allocator->allocate(Size, Origin);
566 Allocator->deallocate(P, Origin);
567 reinterpret_cast<char *>(P)[0] = 'A';
568 },
569 "");
570 EXPECT_DEATH(
571 {
572 disableDebuggerdMaybe();
573 void *P = Allocator->allocate(Size, Origin);
574 Allocator->deallocate(P, Origin);
575 reinterpret_cast<char *>(P)[Size - 1] = 'A';
576 },
577 "");
578 }
579}
580
581SCUDO_TYPED_TEST(ScudoCombinedDeathTest, DoubleFreeFromPrimary) {
582 auto *Allocator = this->Allocator.get();
583
584 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
585 const scudo::uptr Size = 1U << SizeLog;
586 if (!isPrimaryAllocation<TestAllocator<TypeParam>>(Size, 0))
587 break;
588
589 // Verify that a double free results in a chunk state error.
590 EXPECT_DEATH(
591 {
592 // Allocate from primary
593 void *P = Allocator->allocate(Size, Origin);
594 ASSERT_TRUE(P != nullptr);
595 Allocator->deallocate(P, Origin);
596 Allocator->deallocate(P, Origin);
597 },
598 "invalid chunk state");
599 }
600}
601
602SCUDO_TYPED_TEST(ScudoCombinedDeathTest, DisableMemoryTagging) {
603 auto *Allocator = this->Allocator.get();
604
605 if (Allocator->useMemoryTaggingTestOnly()) {
606 // Check that disabling memory tagging works correctly.
607 void *P = Allocator->allocate(2048, Origin);
608 EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 'A', "");
609 scudo::ScopedDisableMemoryTagChecks NoTagChecks;
610 Allocator->disableMemoryTagging();
611 reinterpret_cast<char *>(P)[2048] = 'A';
612 Allocator->deallocate(P, Origin);
613
614 P = Allocator->allocate(2048, Origin);
615 EXPECT_EQ(scudo::untagPointer(Ptr: P), P);
616 reinterpret_cast<char *>(P)[2048] = 'A';
617 Allocator->deallocate(P, Origin);
618
619 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
620 }
621}
622
623SCUDO_TYPED_TEST(ScudoCombinedTest, Stats) {
624 auto *Allocator = this->Allocator.get();
625
626 scudo::uptr BufferSize = 8192;
627 std::vector<char> Buffer(BufferSize);
628 scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
629 while (ActualSize > BufferSize) {
630 BufferSize = ActualSize + 1024;
631 Buffer.resize(BufferSize);
632 ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
633 }
634 std::string Stats(Buffer.begin(), Buffer.end());
635 // Basic checks on the contents of the statistics output, which also allows us
636 // to verify that we got it all.
637 EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos);
638 EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos);
639 EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
640}
641
642SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(ScudoCombinedTest, Drain) {
643 using AllocatorT = typename BaseT::AllocatorT;
644 auto *Allocator = this->Allocator.get();
645
646 std::vector<void *> V;
647 for (scudo::uptr I = 0; I < 64U; I++)
648 V.push_back(Allocator->allocate(
649 static_cast<scudo::uptr>(std::rand()) %
650 (TypeParam::Primary::SizeClassMap::MaxSize / 2U),
651 Origin));
652 for (auto P : V)
653 Allocator->deallocate(P, Origin);
654
655 typename AllocatorT::TSDRegistryT::ScopedTSD TSD(
656 *Allocator->getTSDRegistry());
657 EXPECT_TRUE(!TSD->getSizeClassAllocator().isEmpty());
658 TSD->getSizeClassAllocator().drain();
659 EXPECT_TRUE(TSD->getSizeClassAllocator().isEmpty());
660}
661
662SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(ScudoCombinedTest, ForceCacheDrain) {
663 using AllocatorT = typename BaseT::AllocatorT;
664 auto *Allocator = this->Allocator.get();
665
666 std::vector<void *> V;
667 for (scudo::uptr I = 0; I < 64U; I++)
668 V.push_back(Allocator->allocate(
669 static_cast<scudo::uptr>(std::rand()) %
670 (TypeParam::Primary::SizeClassMap::MaxSize / 2U),
671 Origin));
672 for (auto P : V)
673 Allocator->deallocate(P, Origin);
674
675 // `ForceAll` will also drain the caches.
676 Allocator->releaseToOS(scudo::ReleaseToOS::ForceAll);
677
678 typename AllocatorT::TSDRegistryT::ScopedTSD TSD(
679 *Allocator->getTSDRegistry());
680 EXPECT_TRUE(TSD->getSizeClassAllocator().isEmpty());
681 EXPECT_EQ(TSD->getQuarantineCache().getSize(), 0U);
682 EXPECT_TRUE(Allocator->getQuarantine()->isEmpty());
683}
684
685SCUDO_TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
686 std::mutex Mutex;
687 std::condition_variable Cv;
688 bool Ready = false;
689 auto *Allocator = this->Allocator.get();
690 std::thread Threads[32];
691 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
692 Threads[I] = std::thread([&]() {
693 {
694 std::unique_lock<std::mutex> Lock(Mutex);
695 while (!Ready)
696 Cv.wait(Lock);
697 }
698 std::vector<std::pair<void *, scudo::uptr>> V;
699 for (scudo::uptr I = 0; I < 256U; I++) {
700 const scudo::uptr Size = static_cast<scudo::uptr>(std::rand()) % 4096U;
701 void *P = Allocator->allocate(Size, Origin);
702 // A region could have ran out of memory, resulting in a null P.
703 if (P)
704 V.push_back(std::make_pair(P, Size));
705 }
706
707 // Try to interleave pushBlocks(), popBatch() and releaseToOS().
708 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
709
710 while (!V.empty()) {
711 auto Pair = V.back();
712 Allocator->deallocate(Pair.first, Origin, Pair.second);
713 V.pop_back();
714 }
715 });
716 {
717 std::unique_lock<std::mutex> Lock(Mutex);
718 Ready = true;
719 Cv.notify_all();
720 }
721 for (auto &T : Threads)
722 T.join();
723 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
724}
725
726// Test that multiple instantiations of the allocator have not messed up the
727// process's signal handlers (GWP-ASan used to do this).
728TEST(ScudoCombinedDeathTest, SKIP_ON_FUCHSIA(testSEGV)) {
729 const scudo::uptr Size = 4 * scudo::getPageSizeCached();
730 scudo::ReservedMemoryT ReservedMemory;
731 ASSERT_TRUE(ReservedMemory.create(/*Addr=*/0U, Size, Name: "testSEGV"));
732 void *P = reinterpret_cast<void *>(ReservedMemory.getBase());
733 ASSERT_NE(P, nullptr);
734 EXPECT_DEATH(memset(P, 0xaa, Size), "");
735 ReservedMemory.release();
736}
737
738struct DeathSizeClassConfig {
739 static const scudo::uptr NumBits = 1;
740 static const scudo::uptr MinSizeLog = 10;
741 static const scudo::uptr MidSizeLog = 10;
742 static const scudo::uptr MaxSizeLog = 13;
743 static const scudo::u16 MaxNumCachedHint = 8;
744 static const scudo::uptr MaxBytesCachedLog = 12;
745 static const scudo::uptr SizeDelta = 0;
746};
747
748static const scudo::uptr DeathRegionSizeLog = 21U;
749struct DeathConfig {
750 static const bool MaySupportMemoryTagging = false;
751 template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
752
753 struct Primary {
754 // Tiny allocator, its Primary only serves chunks of four sizes.
755 using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
756 static const scudo::uptr RegionSizeLog = DeathRegionSizeLog;
757 static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
758 static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
759 typedef scudo::uptr CompactPtrT;
760 static const scudo::uptr CompactPtrScale = 0;
761 static const bool EnableRandomOffset = true;
762 static const scudo::uptr MapSizeIncrement = 1UL << 18;
763 static const scudo::uptr GroupSizeLog = 18;
764 };
765 template <typename Config>
766 using PrimaryT = scudo::SizeClassAllocator64<Config>;
767
768 struct Secondary {
769 template <typename Config>
770 using CacheT = scudo::MapAllocatorNoCache<Config>;
771 };
772
773 template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
774};
775
776TEST(ScudoCombinedDeathTest, DeathCombined) {
777 using AllocatorT = TestAllocator<DeathConfig>;
778 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
779
780 const scudo::uptr Size = 1000U;
781 void *P = Allocator->allocate(Size, Origin);
782 EXPECT_NE(P, nullptr);
783
784 // Invalid sized deallocation.
785 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size + 8U), "");
786
787 // Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available.
788 UNUSED void *MisalignedP =
789 reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) | 1U);
790 EXPECT_DEATH(Allocator->deallocate(MisalignedP, Origin, Size), "");
791 EXPECT_DEATH(Allocator->reallocate(MisalignedP, Size * 2U), "");
792
793 // Header corruption.
794 scudo::u64 *H =
795 reinterpret_cast<scudo::u64 *>(scudo::Chunk::getAtomicHeader(Ptr: P));
796 *H ^= 0x42U;
797 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
798 *H ^= 0x420042U;
799 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
800 *H ^= 0x420000U;
801
802 // Invalid chunk state.
803 Allocator->deallocate(P, Origin, Size);
804 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
805 EXPECT_DEATH(Allocator->reallocate(P, Size * 2U), "");
806 EXPECT_DEATH(Allocator->getUsableSize(P), "");
807}
808
809// Verify that when a region gets full, the allocator will still manage to
810// fulfill the allocation through a larger size class.
811TEST(ScudoCombinedTest, FullRegion) {
812 using AllocatorT = TestAllocator<DeathConfig>;
813 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
814
815 std::vector<void *> V;
816 scudo::uptr FailedAllocationsCount = 0;
817 for (scudo::uptr ClassId = 1U;
818 ClassId <= DeathConfig::Primary::SizeClassMap::LargestClassId;
819 ClassId++) {
820 const scudo::uptr Size =
821 DeathConfig::Primary::SizeClassMap::getSizeByClassId(ClassId);
822 // Allocate enough to fill all of the regions above this one.
823 const scudo::uptr MaxNumberOfChunks =
824 ((1U << DeathRegionSizeLog) / Size) *
825 (DeathConfig::Primary::SizeClassMap::LargestClassId - ClassId + 1);
826 void *P;
827 for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) {
828 P = Allocator->allocate(Size - 64U, Origin);
829 if (!P)
830 FailedAllocationsCount++;
831 else
832 V.push_back(P);
833 }
834 while (!V.empty()) {
835 Allocator->deallocate(V.back(), Origin);
836 V.pop_back();
837 }
838 }
839 EXPECT_EQ(FailedAllocationsCount, 0U);
840}
841
842// Ensure that releaseToOS can be called prior to any other allocator
843// operation without issue.
844SCUDO_TYPED_TEST(ScudoCombinedTest, ReleaseToOS) {
845 auto *Allocator = this->Allocator.get();
846 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
847}
848
849SCUDO_TYPED_TEST(ScudoCombinedTest, OddEven) {
850 auto *Allocator = this->Allocator.get();
851 Allocator->setOption(scudo::Option::MemtagTuning, M_MEMTAG_TUNING_BUFFER_OVERFLOW);
852
853 if (!Allocator->useMemoryTaggingTestOnly())
854 return;
855
856 auto CheckOddEven = [](scudo::uptr P1, scudo::uptr P2) {
857 scudo::uptr Tag1 = scudo::extractTag(Ptr: scudo::loadTag(Ptr: P1));
858 scudo::uptr Tag2 = scudo::extractTag(Ptr: scudo::loadTag(Ptr: P2));
859 EXPECT_NE(Tag1 % 2, Tag2 % 2);
860 };
861
862 using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
863 for (scudo::uptr ClassId = 1U; ClassId <= SizeClassMap::LargestClassId;
864 ClassId++) {
865 const scudo::uptr Size = SizeClassMap::getSizeByClassId(ClassId);
866
867 std::set<scudo::uptr> Ptrs;
868 bool Found = false;
869 for (unsigned I = 0; I != 65536; ++I) {
870 scudo::uptr P = scudo::untagPointer(Ptr: reinterpret_cast<scudo::uptr>(
871 Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin)));
872 if (Ptrs.count(P - Size)) {
873 Found = true;
874 CheckOddEven(P, P - Size);
875 break;
876 }
877 if (Ptrs.count(P + Size)) {
878 Found = true;
879 CheckOddEven(P, P + Size);
880 break;
881 }
882 Ptrs.insert(P);
883 }
884 EXPECT_TRUE(Found);
885 }
886}
887
888SCUDO_TYPED_TEST(ScudoCombinedTest, DisableMemInit) {
889 auto *Allocator = this->Allocator.get();
890
891 std::vector<void *> Ptrs(65536);
892
893 Allocator->setOption(scudo::Option::ThreadDisableMemInit, 1);
894
895 constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
896
897 // Test that if mem-init is disabled on a thread, calloc should still work as
898 // expected. This is tricky to ensure when MTE is enabled, so this test tries
899 // to exercise the relevant code on our MTE path.
900 for (scudo::uptr ClassId = 1U; ClassId <= 8; ClassId++) {
901 using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
902 const scudo::uptr Size =
903 SizeClassMap::getSizeByClassId(ClassId) - scudo::Chunk::getHeaderSize();
904 if (Size < 8)
905 continue;
906 for (unsigned I = 0; I != Ptrs.size(); ++I) {
907 Ptrs[I] = Allocator->allocate(Size, Origin);
908 memset(Ptrs[I], 0xaa, Size);
909 }
910 for (unsigned I = 0; I != Ptrs.size(); ++I)
911 Allocator->deallocate(Ptrs[I], Origin, Size);
912 for (unsigned I = 0; I != Ptrs.size(); ++I) {
913 Ptrs[I] = Allocator->allocate(Size - 8, Origin);
914 memset(Ptrs[I], 0xbb, Size - 8);
915 }
916 for (unsigned I = 0; I != Ptrs.size(); ++I)
917 Allocator->deallocate(Ptrs[I], Origin, Size - 8);
918 for (unsigned I = 0; I != Ptrs.size(); ++I) {
919 Ptrs[I] = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
920 for (scudo::uptr J = 0; J < Size; ++J)
921 ASSERT_EQ((reinterpret_cast<char *>(Ptrs[I]))[J], '\0');
922 }
923 }
924
925 Allocator->setOption(scudo::Option::ThreadDisableMemInit, 0);
926}
927
928SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateInPlaceStress) {
929 auto *Allocator = this->Allocator.get();
930
931 // Regression test: make realloc-in-place happen at the very right end of a
932 // mapped region.
933 constexpr size_t nPtrs = 10000;
934 for (scudo::uptr i = 1; i < 32; ++i) {
935 scudo::uptr Size = 16 * i - 1;
936 std::vector<void *> Ptrs;
937 for (size_t i = 0; i < nPtrs; ++i) {
938 void *P = Allocator->allocate(Size, Origin);
939 P = Allocator->reallocate(P, Size + 1);
940 Ptrs.push_back(P);
941 }
942
943 for (size_t i = 0; i < nPtrs; ++i)
944 Allocator->deallocate(Ptrs[i], Origin);
945 }
946}
947
948SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferDefaultDisabled) {
949 // The RingBuffer is not initialized until tracking is enabled for the
950 // first time.
951 auto *Allocator = this->Allocator.get();
952 EXPECT_EQ(0u, Allocator->getRingBufferSize());
953 EXPECT_EQ(nullptr, Allocator->getRingBufferAddress());
954}
955
956SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferInitOnce) {
957 auto *Allocator = this->Allocator.get();
958 Allocator->setTrackAllocationStacks(true);
959
960 auto RingBufferSize = Allocator->getRingBufferSize();
961 ASSERT_GT(RingBufferSize, 0u);
962 auto *RingBufferAddress = Allocator->getRingBufferAddress();
963 EXPECT_NE(nullptr, RingBufferAddress);
964
965 // Enable tracking again to verify that the initialization only happens once.
966 Allocator->setTrackAllocationStacks(true);
967 ASSERT_EQ(RingBufferSize, Allocator->getRingBufferSize());
968 EXPECT_EQ(RingBufferAddress, Allocator->getRingBufferAddress());
969}
970
971SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferSize) {
972 auto *Allocator = this->Allocator.get();
973 Allocator->setTrackAllocationStacks(true);
974
975 auto RingBufferSize = Allocator->getRingBufferSize();
976 ASSERT_GT(RingBufferSize, 0u);
977 EXPECT_EQ(Allocator->getRingBufferAddress()[RingBufferSize - 1], '\0');
978}
979
980SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferAddress) {
981 auto *Allocator = this->Allocator.get();
982 Allocator->setTrackAllocationStacks(true);
983
984 auto *RingBufferAddress = Allocator->getRingBufferAddress();
985 EXPECT_NE(RingBufferAddress, nullptr);
986 EXPECT_EQ(RingBufferAddress, Allocator->getRingBufferAddress());
987}
988
989SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotDefaultDisabled) {
990 // The StackDepot is not initialized until tracking is enabled for the
991 // first time.
992 auto *Allocator = this->Allocator.get();
993 EXPECT_EQ(0u, Allocator->getStackDepotSize());
994 EXPECT_EQ(nullptr, Allocator->getStackDepotAddress());
995}
996
997SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotInitOnce) {
998 auto *Allocator = this->Allocator.get();
999 Allocator->setTrackAllocationStacks(true);
1000
1001 auto StackDepotSize = Allocator->getStackDepotSize();
1002 EXPECT_GT(StackDepotSize, 0u);
1003 auto *StackDepotAddress = Allocator->getStackDepotAddress();
1004 EXPECT_NE(nullptr, StackDepotAddress);
1005
1006 // Enable tracking again to verify that the initialization only happens once.
1007 Allocator->setTrackAllocationStacks(true);
1008 EXPECT_EQ(StackDepotSize, Allocator->getStackDepotSize());
1009 EXPECT_EQ(StackDepotAddress, Allocator->getStackDepotAddress());
1010}
1011
1012SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotSize) {
1013 auto *Allocator = this->Allocator.get();
1014 Allocator->setTrackAllocationStacks(true);
1015
1016 auto StackDepotSize = Allocator->getStackDepotSize();
1017 EXPECT_GT(StackDepotSize, 0u);
1018 EXPECT_EQ(Allocator->getStackDepotAddress()[StackDepotSize - 1], '\0');
1019}
1020
1021SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotAddress) {
1022 auto *Allocator = this->Allocator.get();
1023 Allocator->setTrackAllocationStacks(true);
1024
1025 auto *StackDepotAddress = Allocator->getStackDepotAddress();
1026 EXPECT_NE(StackDepotAddress, nullptr);
1027 EXPECT_EQ(StackDepotAddress, Allocator->getStackDepotAddress());
1028}
1029
1030SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepot) {
1031 alignas(scudo::StackDepot) char Buf[sizeof(scudo::StackDepot) +
1032 1024 * sizeof(scudo::atomic_u64) +
1033 1024 * sizeof(scudo::atomic_u32)] = {};
1034 auto *Depot = reinterpret_cast<scudo::StackDepot *>(Buf);
1035 Depot->init(RingSz: 1024, TabSz: 1024);
1036 ASSERT_TRUE(Depot->isValid(BufSize: sizeof(Buf)));
1037 ASSERT_FALSE(Depot->isValid(BufSize: sizeof(Buf) - 1));
1038 scudo::uptr Stack[] = {1, 2, 3};
1039 scudo::u32 Elem = Depot->insert(Begin: &Stack[0], End: &Stack[3]);
1040 scudo::uptr RingPosPtr = 0;
1041 scudo::uptr SizePtr = 0;
1042 ASSERT_TRUE(Depot->find(Hash: Elem, RingPosPtr: &RingPosPtr, SizePtr: &SizePtr));
1043 ASSERT_EQ(SizePtr, 3u);
1044 EXPECT_EQ(Depot->at(RingPos: RingPosPtr), 1u);
1045 EXPECT_EQ(Depot->at(RingPos: RingPosPtr + 1), 2u);
1046 EXPECT_EQ(Depot->at(RingPos: RingPosPtr + 2), 3u);
1047}
1048
1049#if SCUDO_CAN_USE_PRIMARY64
1050#if SCUDO_TRUSTY
1051
1052// TrustyConfig is designed for a domain-specific allocator. Add a basic test
1053// which covers only simple operations and ensure the configuration is able to
1054// compile.
1055TEST(ScudoCombinedTest, BasicTrustyConfig) {
1056 using AllocatorT = scudo::Allocator<scudo::TrustyConfig>;
1057 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
1058
1059 for (scudo::uptr ClassId = 1U;
1060 ClassId <= scudo::TrustyConfig::SizeClassMap::LargestClassId;
1061 ClassId++) {
1062 const scudo::uptr Size =
1063 scudo::TrustyConfig::SizeClassMap::getSizeByClassId(ClassId);
1064 void *p = Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin);
1065 ASSERT_NE(p, nullptr);
1066 free(p);
1067 }
1068
1069 bool UnlockRequired;
1070 typename AllocatorT::TSDRegistryT::ScopedTSD TSD(
1071 *Allocator->getTSDRegistry());
1072 TSD->getSizeClassAllocator().drain();
1073
1074 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
1075}
1076
1077#endif
1078#endif
1079

Provided by KDAB

Privacy Policy
Update your C++ knowledge – Modern C++11/14/17 Training
Find out more

source code of compiler-rt/lib/scudo/standalone/tests/combined_test.cpp