1//===-- combined_test.cpp ---------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "memtag.h"
10#include "stack_depot.h"
11#include "tests/scudo_unit_test.h"
12
13#include "allocator_config.h"
14#include "chunk.h"
15#include "combined.h"
16#include "condition_variable.h"
17#include "mem_map.h"
18#include "size_class_map.h"
19
20#include <algorithm>
21#include <condition_variable>
22#include <memory>
23#include <mutex>
24#include <set>
25#include <stdlib.h>
26#include <thread>
27#include <vector>
28
29static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc;
30static constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
31
32// Fuchsia complains that the function is not used.
33UNUSED static void disableDebuggerdMaybe() {
34#if SCUDO_ANDROID
35 // Disable the debuggerd signal handler on Android, without this we can end
36 // up spending a significant amount of time creating tombstones.
37 signal(SIGSEGV, SIG_DFL);
38#endif
39}
40
41template <class AllocatorT>
42bool isPrimaryAllocation(scudo::uptr Size, scudo::uptr Alignment) {
43 const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
44 if (Alignment < MinAlignment)
45 Alignment = MinAlignment;
46 const scudo::uptr NeededSize =
47 scudo::roundUp(X: Size, Boundary: MinAlignment) +
48 ((Alignment > MinAlignment) ? Alignment : scudo::Chunk::getHeaderSize());
49 return AllocatorT::PrimaryT::canAllocate(NeededSize);
50}
51
52template <class AllocatorT>
53void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
54 scudo::uptr Alignment) {
55 const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
56 Size = scudo::roundUp(X: Size, Boundary: MinAlignment);
57 if (Allocator->useMemoryTaggingTestOnly())
58 EXPECT_DEATH(
59 {
60 disableDebuggerdMaybe();
61 reinterpret_cast<char *>(P)[-1] = 'A';
62 },
63 "");
64 if (isPrimaryAllocation<AllocatorT>(Size, Alignment)
65 ? Allocator->useMemoryTaggingTestOnly()
66 : Alignment == MinAlignment) {
67 EXPECT_DEATH(
68 {
69 disableDebuggerdMaybe();
70 reinterpret_cast<char *>(P)[Size] = 'A';
71 },
72 "");
73 }
74}
75
76template <typename Config> struct TestAllocator : scudo::Allocator<Config> {
77 TestAllocator() {
78 this->initThreadMaybe();
79 if (scudo::archSupportsMemoryTagging() &&
80 !scudo::systemDetectsMemoryTagFaultsTestOnly())
81 this->disableMemoryTagging();
82 }
83 ~TestAllocator() { this->unmapTestOnly(); }
84
85 void *operator new(size_t size);
86 void operator delete(void *ptr);
87};
88
89constexpr size_t kMaxAlign = std::max({
90 alignof(scudo::Allocator<scudo::DefaultConfig>),
91#if SCUDO_CAN_USE_PRIMARY64
92 alignof(scudo::Allocator<scudo::FuchsiaConfig>),
93#endif
94 alignof(scudo::Allocator<scudo::AndroidConfig>)
95});
96
97#if SCUDO_RISCV64
98// The allocator is over 4MB large. Rather than creating an instance of this on
99// the heap, keep it in a global storage to reduce fragmentation from having to
100// mmap this at the start of every test.
101struct TestAllocatorStorage {
102 static constexpr size_t kMaxSize = std::max({
103 sizeof(scudo::Allocator<scudo::DefaultConfig>),
104#if SCUDO_CAN_USE_PRIMARY64
105 sizeof(scudo::Allocator<scudo::FuchsiaConfig>),
106#endif
107 sizeof(scudo::Allocator<scudo::AndroidConfig>)
108 });
109
110 // To alleviate some problem, let's skip the thread safety analysis here.
111 static void *get(size_t size) NO_THREAD_SAFETY_ANALYSIS {
112 CHECK(size <= kMaxSize &&
113 "Allocation size doesn't fit in the allocator storage");
114 M.lock();
115 return AllocatorStorage;
116 }
117
118 static void release(void *ptr) NO_THREAD_SAFETY_ANALYSIS {
119 M.assertHeld();
120 M.unlock();
121 ASSERT_EQ(ptr, AllocatorStorage);
122 }
123
124 static scudo::HybridMutex M;
125 static uint8_t AllocatorStorage[kMaxSize];
126};
127scudo::HybridMutex TestAllocatorStorage::M;
128alignas(kMaxAlign) uint8_t TestAllocatorStorage::AllocatorStorage[kMaxSize];
129#else
130struct TestAllocatorStorage {
131 static void *get(size_t size) NO_THREAD_SAFETY_ANALYSIS {
132 void *p = nullptr;
133 EXPECT_EQ(0, posix_memalign(memptr: &p, alignment: kMaxAlign, size: size));
134 return p;
135 }
136 static void release(void *ptr) NO_THREAD_SAFETY_ANALYSIS { free(ptr: ptr); }
137};
138#endif
139
140template <typename Config>
141void *TestAllocator<Config>::operator new(size_t size) {
142 return TestAllocatorStorage::get(size);
143}
144
145template <typename Config>
146void TestAllocator<Config>::operator delete(void *ptr) {
147 TestAllocatorStorage::release(ptr);
148}
149
150template <class TypeParam> struct ScudoCombinedTest : public Test {
151 ScudoCombinedTest() {
152 UseQuarantine = std::is_same<TypeParam, scudo::AndroidConfig>::value;
153 Allocator = std::make_unique<AllocatorT>();
154 }
155 ~ScudoCombinedTest() {
156 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
157 UseQuarantine = true;
158 }
159
160 void RunTest();
161
162 void BasicTest(scudo::uptr SizeLog);
163
164 using AllocatorT = TestAllocator<TypeParam>;
165 std::unique_ptr<AllocatorT> Allocator;
166};
167
168template <typename T> using ScudoCombinedDeathTest = ScudoCombinedTest<T>;
169
170namespace scudo {
171struct TestConditionVariableConfig {
172 static const bool MaySupportMemoryTagging = true;
173 template <class A>
174 using TSDRegistryT =
175 scudo::TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
176
177 struct Primary {
178 using SizeClassMap = scudo::AndroidSizeClassMap;
179#if SCUDO_CAN_USE_PRIMARY64
180 static const scudo::uptr RegionSizeLog = 28U;
181 typedef scudo::u32 CompactPtrT;
182 static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
183 static const scudo::uptr GroupSizeLog = 20U;
184 static const bool EnableRandomOffset = true;
185 static const scudo::uptr MapSizeIncrement = 1UL << 18;
186#else
187 static const scudo::uptr RegionSizeLog = 18U;
188 static const scudo::uptr GroupSizeLog = 18U;
189 typedef scudo::uptr CompactPtrT;
190#endif
191 static const scudo::s32 MinReleaseToOsIntervalMs = 1000;
192 static const scudo::s32 MaxReleaseToOsIntervalMs = 1000;
193#if SCUDO_LINUX
194 using ConditionVariableT = scudo::ConditionVariableLinux;
195#else
196 using ConditionVariableT = scudo::ConditionVariableDummy;
197#endif
198 };
199#if SCUDO_CAN_USE_PRIMARY64
200 template <typename Config>
201 using PrimaryT = scudo::SizeClassAllocator64<Config>;
202#else
203 template <typename Config>
204 using PrimaryT = scudo::SizeClassAllocator32<Config>;
205#endif
206
207 struct Secondary {
208 template <typename Config>
209 using CacheT = scudo::MapAllocatorNoCache<Config>;
210 };
211 template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
212};
213} // namespace scudo
214
215#if SCUDO_FUCHSIA
216#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
217 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, FuchsiaConfig)
218#else
219#define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
220 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig) \
221 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig) \
222 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConditionVariableConfig)
223#endif
224
225#define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \
226 using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<scudo::TYPE>; \
227 TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<scudo::TYPE>::Run(); }
228
229#define SCUDO_TYPED_TEST(FIXTURE, NAME) \
230 template <class TypeParam> \
231 struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
232 using BaseT = FIXTURE<TypeParam>; \
233 void Run(); \
234 }; \
235 SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
236 template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
237
238// Accessing `TSD->getCache()` requires `TSD::Mutex` which isn't easy to test
239// using thread-safety analysis. Alternatively, we verify the thread safety
240// through a runtime check in ScopedTSD and mark the test body with
241// NO_THREAD_SAFETY_ANALYSIS.
242#define SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(FIXTURE, NAME) \
243 template <class TypeParam> \
244 struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
245 using BaseT = FIXTURE<TypeParam>; \
246 void Run() NO_THREAD_SAFETY_ANALYSIS; \
247 }; \
248 SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
249 template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
250
251SCUDO_TYPED_TEST(ScudoCombinedTest, IsOwned) {
252 auto *Allocator = this->Allocator.get();
253 static scudo::u8 StaticBuffer[scudo::Chunk::getHeaderSize() + 1];
254 EXPECT_FALSE(
255 Allocator->isOwned(&StaticBuffer[scudo::Chunk::getHeaderSize()]));
256
257 scudo::u8 StackBuffer[scudo::Chunk::getHeaderSize() + 1];
258 for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
259 StackBuffer[I] = 0x42U;
260 EXPECT_FALSE(Allocator->isOwned(&StackBuffer[scudo::Chunk::getHeaderSize()]));
261 for (scudo::uptr I = 0; I < sizeof(StackBuffer); I++)
262 EXPECT_EQ(StackBuffer[I], 0x42U);
263}
264
265template <class Config>
266void ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLog) {
267 auto *Allocator = this->Allocator.get();
268
269 // This allocates and deallocates a bunch of chunks, with a wide range of
270 // sizes and alignments, with a focus on sizes that could trigger weird
271 // behaviors (plus or minus a small delta of a power of two for example).
272 for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
273 const scudo::uptr Align = 1U << AlignLog;
274 for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
275 if ((1LL << SizeLog) + Delta < 0)
276 continue;
277 const scudo::uptr Size =
278 static_cast<scudo::uptr>((1LL << SizeLog) + Delta);
279 void *P = Allocator->allocate(Size, Origin, Align);
280 EXPECT_NE(P, nullptr);
281 EXPECT_TRUE(Allocator->isOwned(P));
282 EXPECT_TRUE(scudo::isAligned(X: reinterpret_cast<scudo::uptr>(P), Alignment: Align));
283 EXPECT_LE(Size, Allocator->getUsableSize(P));
284 memset(s: P, c: 0xaa, n: Size);
285 checkMemoryTaggingMaybe(Allocator, P, Size, Align);
286 Allocator->deallocate(P, Origin, Size);
287 }
288 }
289
290 Allocator->printStats();
291 Allocator->printFragmentationInfo();
292}
293
294#define SCUDO_MAKE_BASIC_TEST(SizeLog) \
295 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, BasicCombined##SizeLog) { \
296 this->BasicTest(SizeLog); \
297 }
298
299SCUDO_MAKE_BASIC_TEST(0)
300SCUDO_MAKE_BASIC_TEST(1)
301SCUDO_MAKE_BASIC_TEST(2)
302SCUDO_MAKE_BASIC_TEST(3)
303SCUDO_MAKE_BASIC_TEST(4)
304SCUDO_MAKE_BASIC_TEST(5)
305SCUDO_MAKE_BASIC_TEST(6)
306SCUDO_MAKE_BASIC_TEST(7)
307SCUDO_MAKE_BASIC_TEST(8)
308SCUDO_MAKE_BASIC_TEST(9)
309SCUDO_MAKE_BASIC_TEST(10)
310SCUDO_MAKE_BASIC_TEST(11)
311SCUDO_MAKE_BASIC_TEST(12)
312SCUDO_MAKE_BASIC_TEST(13)
313SCUDO_MAKE_BASIC_TEST(14)
314SCUDO_MAKE_BASIC_TEST(15)
315SCUDO_MAKE_BASIC_TEST(16)
316SCUDO_MAKE_BASIC_TEST(17)
317SCUDO_MAKE_BASIC_TEST(18)
318SCUDO_MAKE_BASIC_TEST(19)
319SCUDO_MAKE_BASIC_TEST(20)
320
321SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroContents) {
322 auto *Allocator = this->Allocator.get();
323
324 // Ensure that specifying ZeroContents returns a zero'd out block.
325 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
326 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
327 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
328 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
329 EXPECT_NE(P, nullptr);
330 for (scudo::uptr I = 0; I < Size; I++)
331 ASSERT_EQ((reinterpret_cast<char *>(P))[I], '\0');
332 memset(s: P, c: 0xaa, n: Size);
333 Allocator->deallocate(P, Origin, Size);
334 }
335 }
336}
337
338SCUDO_TYPED_TEST(ScudoCombinedTest, ZeroFill) {
339 auto *Allocator = this->Allocator.get();
340
341 // Ensure that specifying ZeroFill returns a zero'd out block.
342 Allocator->setFillContents(scudo::ZeroFill);
343 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
344 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
345 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
346 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
347 EXPECT_NE(P, nullptr);
348 for (scudo::uptr I = 0; I < Size; I++)
349 ASSERT_EQ((reinterpret_cast<char *>(P))[I], '\0');
350 memset(s: P, c: 0xaa, n: Size);
351 Allocator->deallocate(P, Origin, Size);
352 }
353 }
354}
355
356SCUDO_TYPED_TEST(ScudoCombinedTest, PatternOrZeroFill) {
357 auto *Allocator = this->Allocator.get();
358
359 // Ensure that specifying PatternOrZeroFill returns a pattern or zero filled
360 // block. The primary allocator only produces pattern filled blocks if MTE
361 // is disabled, so we only require pattern filled blocks in that case.
362 Allocator->setFillContents(scudo::PatternOrZeroFill);
363 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
364 for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) {
365 const scudo::uptr Size = (1U << SizeLog) + Delta * 128U;
366 void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, false);
367 EXPECT_NE(P, nullptr);
368 for (scudo::uptr I = 0; I < Size; I++) {
369 unsigned char V = (reinterpret_cast<unsigned char *>(P))[I];
370 if (isPrimaryAllocation<TestAllocator<TypeParam>>(Size,
371 1U << MinAlignLog) &&
372 !Allocator->useMemoryTaggingTestOnly())
373 ASSERT_EQ(V, scudo::PatternFillByte);
374 else
375 ASSERT_TRUE(V == scudo::PatternFillByte || V == 0);
376 }
377 memset(s: P, c: 0xaa, n: Size);
378 Allocator->deallocate(P, Origin, Size);
379 }
380 }
381}
382
383SCUDO_TYPED_TEST(ScudoCombinedTest, BlockReuse) {
384 auto *Allocator = this->Allocator.get();
385
386 // Verify that a chunk will end up being reused, at some point.
387 const scudo::uptr NeedleSize = 1024U;
388 void *NeedleP = Allocator->allocate(NeedleSize, Origin);
389 Allocator->deallocate(NeedleP, Origin);
390 bool Found = false;
391 for (scudo::uptr I = 0; I < 1024U && !Found; I++) {
392 void *P = Allocator->allocate(NeedleSize, Origin);
393 if (Allocator->getHeaderTaggedPointer(P) ==
394 Allocator->getHeaderTaggedPointer(NeedleP))
395 Found = true;
396 Allocator->deallocate(P, Origin);
397 }
398 EXPECT_TRUE(Found);
399}
400
401SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeIncreasing) {
402 auto *Allocator = this->Allocator.get();
403
404 // Reallocate a chunk all the way up to a secondary allocation, verifying that
405 // we preserve the data in the process.
406 scudo::uptr Size = 16;
407 void *P = Allocator->allocate(Size, Origin);
408 const char Marker = 'A';
409 memset(s: P, c: Marker, n: Size);
410 while (Size < TypeParam::Primary::SizeClassMap::MaxSize * 4) {
411 void *NewP = Allocator->reallocate(P, Size * 2);
412 EXPECT_NE(NewP, nullptr);
413 for (scudo::uptr J = 0; J < Size; J++)
414 EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
415 memset(s: reinterpret_cast<char *>(NewP) + Size, c: Marker, n: Size);
416 Size *= 2U;
417 P = NewP;
418 }
419 Allocator->deallocate(P, Origin);
420}
421
422SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateLargeDecreasing) {
423 auto *Allocator = this->Allocator.get();
424
425 // Reallocate a large chunk all the way down to a byte, verifying that we
426 // preserve the data in the process.
427 scudo::uptr Size = TypeParam::Primary::SizeClassMap::MaxSize * 2;
428 const scudo::uptr DataSize = 2048U;
429 void *P = Allocator->allocate(Size, Origin);
430 const char Marker = 'A';
431 memset(s: P, c: Marker, n: scudo::Min(A: Size, B: DataSize));
432 while (Size > 1U) {
433 Size /= 2U;
434 void *NewP = Allocator->reallocate(P, Size);
435 EXPECT_NE(NewP, nullptr);
436 for (scudo::uptr J = 0; J < scudo::Min(A: Size, B: DataSize); J++)
437 EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
438 P = NewP;
439 }
440 Allocator->deallocate(P, Origin);
441}
442
443SCUDO_TYPED_TEST(ScudoCombinedDeathTest, ReallocateSame) {
444 auto *Allocator = this->Allocator.get();
445
446 // Check that reallocating a chunk to a slightly smaller or larger size
447 // returns the same chunk. This requires that all the sizes we iterate on use
448 // the same block size, but that should be the case for MaxSize - 64 with our
449 // default class size maps.
450 constexpr scudo::uptr ReallocSize =
451 TypeParam::Primary::SizeClassMap::MaxSize - 64;
452 void *P = Allocator->allocate(ReallocSize, Origin);
453 const char Marker = 'A';
454 memset(s: P, c: Marker, n: ReallocSize);
455 for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
456 const scudo::uptr NewSize =
457 static_cast<scudo::uptr>(static_cast<scudo::sptr>(ReallocSize) + Delta);
458 void *NewP = Allocator->reallocate(P, NewSize);
459 EXPECT_EQ(NewP, P);
460 for (scudo::uptr I = 0; I < ReallocSize - 32; I++)
461 EXPECT_EQ((reinterpret_cast<char *>(NewP))[I], Marker);
462 checkMemoryTaggingMaybe(Allocator, NewP, NewSize, 0);
463 }
464 Allocator->deallocate(P, Origin);
465}
466
467SCUDO_TYPED_TEST(ScudoCombinedTest, IterateOverChunks) {
468 auto *Allocator = this->Allocator.get();
469 // Allocates a bunch of chunks, then iterate over all the chunks, ensuring
470 // they are the ones we allocated. This requires the allocator to not have any
471 // other allocated chunk at this point (eg: won't work with the Quarantine).
472 // FIXME: Make it work with UseQuarantine and tagging enabled. Internals of
473 // iterateOverChunks reads header by tagged and non-tagger pointers so one of
474 // them will fail.
475 if (!UseQuarantine) {
476 std::vector<void *> V;
477 for (scudo::uptr I = 0; I < 64U; I++)
478 V.push_back(Allocator->allocate(
479 static_cast<scudo::uptr>(std::rand()) %
480 (TypeParam::Primary::SizeClassMap::MaxSize / 2U),
481 Origin));
482 Allocator->disable();
483 Allocator->iterateOverChunks(
484 0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
485 [](uintptr_t Base, UNUSED size_t Size, void *Arg) {
486 std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
487 void *P = reinterpret_cast<void *>(Base);
488 EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
489 },
490 reinterpret_cast<void *>(&V));
491 Allocator->enable();
492 for (auto P : V)
493 Allocator->deallocate(P, Origin);
494 }
495}
496
497SCUDO_TYPED_TEST(ScudoCombinedDeathTest, UseAfterFree) {
498 auto *Allocator = this->Allocator.get();
499
500 // Check that use-after-free is detected.
501 for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
502 const scudo::uptr Size = 1U << SizeLog;
503 if (!Allocator->useMemoryTaggingTestOnly())
504 continue;
505 EXPECT_DEATH(
506 {
507 disableDebuggerdMaybe();
508 void *P = Allocator->allocate(Size, Origin);
509 Allocator->deallocate(P, Origin);
510 reinterpret_cast<char *>(P)[0] = 'A';
511 },
512 "");
513 EXPECT_DEATH(
514 {
515 disableDebuggerdMaybe();
516 void *P = Allocator->allocate(Size, Origin);
517 Allocator->deallocate(P, Origin);
518 reinterpret_cast<char *>(P)[Size - 1] = 'A';
519 },
520 "");
521 }
522}
523
524SCUDO_TYPED_TEST(ScudoCombinedDeathTest, DisableMemoryTagging) {
525 auto *Allocator = this->Allocator.get();
526
527 if (Allocator->useMemoryTaggingTestOnly()) {
528 // Check that disabling memory tagging works correctly.
529 void *P = Allocator->allocate(2048, Origin);
530 EXPECT_DEATH(reinterpret_cast<char *>(P)[2048] = 'A', "");
531 scudo::ScopedDisableMemoryTagChecks NoTagChecks;
532 Allocator->disableMemoryTagging();
533 reinterpret_cast<char *>(P)[2048] = 'A';
534 Allocator->deallocate(P, Origin);
535
536 P = Allocator->allocate(2048, Origin);
537 EXPECT_EQ(scudo::untagPointer(Ptr: P), P);
538 reinterpret_cast<char *>(P)[2048] = 'A';
539 Allocator->deallocate(P, Origin);
540
541 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
542 }
543}
544
545SCUDO_TYPED_TEST(ScudoCombinedTest, Stats) {
546 auto *Allocator = this->Allocator.get();
547
548 scudo::uptr BufferSize = 8192;
549 std::vector<char> Buffer(BufferSize);
550 scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
551 while (ActualSize > BufferSize) {
552 BufferSize = ActualSize + 1024;
553 Buffer.resize(BufferSize);
554 ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
555 }
556 std::string Stats(Buffer.begin(), Buffer.end());
557 // Basic checks on the contents of the statistics output, which also allows us
558 // to verify that we got it all.
559 EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos);
560 EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos);
561 EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
562}
563
564SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(ScudoCombinedTest, CacheDrain) {
565 using AllocatorT = typename BaseT::AllocatorT;
566 auto *Allocator = this->Allocator.get();
567
568 std::vector<void *> V;
569 for (scudo::uptr I = 0; I < 64U; I++)
570 V.push_back(Allocator->allocate(
571 static_cast<scudo::uptr>(std::rand()) %
572 (TypeParam::Primary::SizeClassMap::MaxSize / 2U),
573 Origin));
574 for (auto P : V)
575 Allocator->deallocate(P, Origin);
576
577 typename AllocatorT::TSDRegistryT::ScopedTSD TSD(
578 *Allocator->getTSDRegistry());
579 EXPECT_TRUE(!TSD->getCache().isEmpty());
580 TSD->getCache().drain();
581 EXPECT_TRUE(TSD->getCache().isEmpty());
582}
583
584SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(ScudoCombinedTest, ForceCacheDrain) {
585 using AllocatorT = typename BaseT::AllocatorT;
586 auto *Allocator = this->Allocator.get();
587
588 std::vector<void *> V;
589 for (scudo::uptr I = 0; I < 64U; I++)
590 V.push_back(Allocator->allocate(
591 static_cast<scudo::uptr>(std::rand()) %
592 (TypeParam::Primary::SizeClassMap::MaxSize / 2U),
593 Origin));
594 for (auto P : V)
595 Allocator->deallocate(P, Origin);
596
597 // `ForceAll` will also drain the caches.
598 Allocator->releaseToOS(scudo::ReleaseToOS::ForceAll);
599
600 typename AllocatorT::TSDRegistryT::ScopedTSD TSD(
601 *Allocator->getTSDRegistry());
602 EXPECT_TRUE(TSD->getCache().isEmpty());
603 EXPECT_EQ(TSD->getQuarantineCache().getSize(), 0U);
604 EXPECT_TRUE(Allocator->getQuarantine()->isEmpty());
605}
606
607SCUDO_TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
608 std::mutex Mutex;
609 std::condition_variable Cv;
610 bool Ready = false;
611 auto *Allocator = this->Allocator.get();
612 std::thread Threads[32];
613 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
614 Threads[I] = std::thread([&]() {
615 {
616 std::unique_lock<std::mutex> Lock(Mutex);
617 while (!Ready)
618 Cv.wait(Lock);
619 }
620 std::vector<std::pair<void *, scudo::uptr>> V;
621 for (scudo::uptr I = 0; I < 256U; I++) {
622 const scudo::uptr Size = static_cast<scudo::uptr>(std::rand()) % 4096U;
623 void *P = Allocator->allocate(Size, Origin);
624 // A region could have ran out of memory, resulting in a null P.
625 if (P)
626 V.push_back(std::make_pair(P, Size));
627 }
628
629 // Try to interleave pushBlocks(), popBatch() and releaseToOS().
630 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
631
632 while (!V.empty()) {
633 auto Pair = V.back();
634 Allocator->deallocate(Pair.first, Origin, Pair.second);
635 V.pop_back();
636 }
637 });
638 {
639 std::unique_lock<std::mutex> Lock(Mutex);
640 Ready = true;
641 Cv.notify_all();
642 }
643 for (auto &T : Threads)
644 T.join();
645 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
646}
647
648// Test that multiple instantiations of the allocator have not messed up the
649// process's signal handlers (GWP-ASan used to do this).
650TEST(ScudoCombinedDeathTest, SKIP_ON_FUCHSIA(testSEGV)) {
651 const scudo::uptr Size = 4 * scudo::getPageSizeCached();
652 scudo::ReservedMemoryT ReservedMemory;
653 ASSERT_TRUE(ReservedMemory.create(/*Addr=*/0U, Size, Name: "testSEGV"));
654 void *P = reinterpret_cast<void *>(ReservedMemory.getBase());
655 ASSERT_NE(P, nullptr);
656 EXPECT_DEATH(memset(P, 0xaa, Size), "");
657 ReservedMemory.release();
658}
659
660struct DeathSizeClassConfig {
661 static const scudo::uptr NumBits = 1;
662 static const scudo::uptr MinSizeLog = 10;
663 static const scudo::uptr MidSizeLog = 10;
664 static const scudo::uptr MaxSizeLog = 13;
665 static const scudo::u16 MaxNumCachedHint = 8;
666 static const scudo::uptr MaxBytesCachedLog = 12;
667 static const scudo::uptr SizeDelta = 0;
668};
669
670static const scudo::uptr DeathRegionSizeLog = 21U;
671struct DeathConfig {
672 static const bool MaySupportMemoryTagging = false;
673 template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
674
675 struct Primary {
676 // Tiny allocator, its Primary only serves chunks of four sizes.
677 using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
678 static const scudo::uptr RegionSizeLog = DeathRegionSizeLog;
679 static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
680 static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
681 typedef scudo::uptr CompactPtrT;
682 static const scudo::uptr CompactPtrScale = 0;
683 static const bool EnableRandomOffset = true;
684 static const scudo::uptr MapSizeIncrement = 1UL << 18;
685 static const scudo::uptr GroupSizeLog = 18;
686 };
687 template <typename Config>
688 using PrimaryT = scudo::SizeClassAllocator64<Config>;
689
690 struct Secondary {
691 template <typename Config>
692 using CacheT = scudo::MapAllocatorNoCache<Config>;
693 };
694
695 template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
696};
697
698TEST(ScudoCombinedDeathTest, DeathCombined) {
699 using AllocatorT = TestAllocator<DeathConfig>;
700 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
701
702 const scudo::uptr Size = 1000U;
703 void *P = Allocator->allocate(Size, Origin);
704 EXPECT_NE(P, nullptr);
705
706 // Invalid sized deallocation.
707 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size + 8U), "");
708
709 // Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available.
710 UNUSED void *MisalignedP =
711 reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) | 1U);
712 EXPECT_DEATH(Allocator->deallocate(MisalignedP, Origin, Size), "");
713 EXPECT_DEATH(Allocator->reallocate(MisalignedP, Size * 2U), "");
714
715 // Header corruption.
716 scudo::u64 *H =
717 reinterpret_cast<scudo::u64 *>(scudo::Chunk::getAtomicHeader(Ptr: P));
718 *H ^= 0x42U;
719 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
720 *H ^= 0x420042U;
721 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
722 *H ^= 0x420000U;
723
724 // Invalid chunk state.
725 Allocator->deallocate(P, Origin, Size);
726 EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
727 EXPECT_DEATH(Allocator->reallocate(P, Size * 2U), "");
728 EXPECT_DEATH(Allocator->getUsableSize(P), "");
729}
730
731// Verify that when a region gets full, the allocator will still manage to
732// fulfill the allocation through a larger size class.
733TEST(ScudoCombinedTest, FullRegion) {
734 using AllocatorT = TestAllocator<DeathConfig>;
735 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
736
737 std::vector<void *> V;
738 scudo::uptr FailedAllocationsCount = 0;
739 for (scudo::uptr ClassId = 1U;
740 ClassId <= DeathConfig::Primary::SizeClassMap::LargestClassId;
741 ClassId++) {
742 const scudo::uptr Size =
743 DeathConfig::Primary::SizeClassMap::getSizeByClassId(ClassId);
744 // Allocate enough to fill all of the regions above this one.
745 const scudo::uptr MaxNumberOfChunks =
746 ((1U << DeathRegionSizeLog) / Size) *
747 (DeathConfig::Primary::SizeClassMap::LargestClassId - ClassId + 1);
748 void *P;
749 for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) {
750 P = Allocator->allocate(Size - 64U, Origin);
751 if (!P)
752 FailedAllocationsCount++;
753 else
754 V.push_back(P);
755 }
756 while (!V.empty()) {
757 Allocator->deallocate(V.back(), Origin);
758 V.pop_back();
759 }
760 }
761 EXPECT_EQ(FailedAllocationsCount, 0U);
762}
763
764// Ensure that releaseToOS can be called prior to any other allocator
765// operation without issue.
766SCUDO_TYPED_TEST(ScudoCombinedTest, ReleaseToOS) {
767 auto *Allocator = this->Allocator.get();
768 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
769}
770
771SCUDO_TYPED_TEST(ScudoCombinedTest, OddEven) {
772 auto *Allocator = this->Allocator.get();
773 Allocator->setOption(scudo::Option::MemtagTuning, M_MEMTAG_TUNING_BUFFER_OVERFLOW);
774
775 if (!Allocator->useMemoryTaggingTestOnly())
776 return;
777
778 auto CheckOddEven = [](scudo::uptr P1, scudo::uptr P2) {
779 scudo::uptr Tag1 = scudo::extractTag(Ptr: scudo::loadTag(Ptr: P1));
780 scudo::uptr Tag2 = scudo::extractTag(Ptr: scudo::loadTag(Ptr: P2));
781 EXPECT_NE(Tag1 % 2, Tag2 % 2);
782 };
783
784 using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
785 for (scudo::uptr ClassId = 1U; ClassId <= SizeClassMap::LargestClassId;
786 ClassId++) {
787 const scudo::uptr Size = SizeClassMap::getSizeByClassId(ClassId);
788
789 std::set<scudo::uptr> Ptrs;
790 bool Found = false;
791 for (unsigned I = 0; I != 65536; ++I) {
792 scudo::uptr P = scudo::untagPointer(Ptr: reinterpret_cast<scudo::uptr>(
793 Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin)));
794 if (Ptrs.count(P - Size)) {
795 Found = true;
796 CheckOddEven(P, P - Size);
797 break;
798 }
799 if (Ptrs.count(P + Size)) {
800 Found = true;
801 CheckOddEven(P, P + Size);
802 break;
803 }
804 Ptrs.insert(P);
805 }
806 EXPECT_TRUE(Found);
807 }
808}
809
810SCUDO_TYPED_TEST(ScudoCombinedTest, DisableMemInit) {
811 auto *Allocator = this->Allocator.get();
812
813 std::vector<void *> Ptrs(65536);
814
815 Allocator->setOption(scudo::Option::ThreadDisableMemInit, 1);
816
817 constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
818
819 // Test that if mem-init is disabled on a thread, calloc should still work as
820 // expected. This is tricky to ensure when MTE is enabled, so this test tries
821 // to exercise the relevant code on our MTE path.
822 for (scudo::uptr ClassId = 1U; ClassId <= 8; ClassId++) {
823 using SizeClassMap = typename TypeParam::Primary::SizeClassMap;
824 const scudo::uptr Size =
825 SizeClassMap::getSizeByClassId(ClassId) - scudo::Chunk::getHeaderSize();
826 if (Size < 8)
827 continue;
828 for (unsigned I = 0; I != Ptrs.size(); ++I) {
829 Ptrs[I] = Allocator->allocate(Size, Origin);
830 memset(Ptrs[I], 0xaa, Size);
831 }
832 for (unsigned I = 0; I != Ptrs.size(); ++I)
833 Allocator->deallocate(Ptrs[I], Origin, Size);
834 for (unsigned I = 0; I != Ptrs.size(); ++I) {
835 Ptrs[I] = Allocator->allocate(Size - 8, Origin);
836 memset(Ptrs[I], 0xbb, Size - 8);
837 }
838 for (unsigned I = 0; I != Ptrs.size(); ++I)
839 Allocator->deallocate(Ptrs[I], Origin, Size - 8);
840 for (unsigned I = 0; I != Ptrs.size(); ++I) {
841 Ptrs[I] = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true);
842 for (scudo::uptr J = 0; J < Size; ++J)
843 ASSERT_EQ((reinterpret_cast<char *>(Ptrs[I]))[J], '\0');
844 }
845 }
846
847 Allocator->setOption(scudo::Option::ThreadDisableMemInit, 0);
848}
849
850SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateInPlaceStress) {
851 auto *Allocator = this->Allocator.get();
852
853 // Regression test: make realloc-in-place happen at the very right end of a
854 // mapped region.
855 constexpr size_t nPtrs = 10000;
856 for (scudo::uptr i = 1; i < 32; ++i) {
857 scudo::uptr Size = 16 * i - 1;
858 std::vector<void *> Ptrs;
859 for (size_t i = 0; i < nPtrs; ++i) {
860 void *P = Allocator->allocate(Size, Origin);
861 P = Allocator->reallocate(P, Size + 1);
862 Ptrs.push_back(P);
863 }
864
865 for (size_t i = 0; i < nPtrs; ++i)
866 Allocator->deallocate(Ptrs[i], Origin);
867 }
868}
869
870SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferDefaultDisabled) {
871 // The RingBuffer is not initialized until tracking is enabled for the
872 // first time.
873 auto *Allocator = this->Allocator.get();
874 EXPECT_EQ(0u, Allocator->getRingBufferSize());
875 EXPECT_EQ(nullptr, Allocator->getRingBufferAddress());
876}
877
878SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferInitOnce) {
879 auto *Allocator = this->Allocator.get();
880 Allocator->setTrackAllocationStacks(true);
881
882 auto RingBufferSize = Allocator->getRingBufferSize();
883 ASSERT_GT(RingBufferSize, 0u);
884 auto *RingBufferAddress = Allocator->getRingBufferAddress();
885 EXPECT_NE(nullptr, RingBufferAddress);
886
887 // Enable tracking again to verify that the initialization only happens once.
888 Allocator->setTrackAllocationStacks(true);
889 ASSERT_EQ(RingBufferSize, Allocator->getRingBufferSize());
890 EXPECT_EQ(RingBufferAddress, Allocator->getRingBufferAddress());
891}
892
893SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferSize) {
894 auto *Allocator = this->Allocator.get();
895 Allocator->setTrackAllocationStacks(true);
896
897 auto RingBufferSize = Allocator->getRingBufferSize();
898 ASSERT_GT(RingBufferSize, 0u);
899 EXPECT_EQ(Allocator->getRingBufferAddress()[RingBufferSize - 1], '\0');
900}
901
902SCUDO_TYPED_TEST(ScudoCombinedTest, RingBufferAddress) {
903 auto *Allocator = this->Allocator.get();
904 Allocator->setTrackAllocationStacks(true);
905
906 auto *RingBufferAddress = Allocator->getRingBufferAddress();
907 EXPECT_NE(RingBufferAddress, nullptr);
908 EXPECT_EQ(RingBufferAddress, Allocator->getRingBufferAddress());
909}
910
911SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotDefaultDisabled) {
912 // The StackDepot is not initialized until tracking is enabled for the
913 // first time.
914 auto *Allocator = this->Allocator.get();
915 EXPECT_EQ(0u, Allocator->getStackDepotSize());
916 EXPECT_EQ(nullptr, Allocator->getStackDepotAddress());
917}
918
919SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotInitOnce) {
920 auto *Allocator = this->Allocator.get();
921 Allocator->setTrackAllocationStacks(true);
922
923 auto StackDepotSize = Allocator->getStackDepotSize();
924 EXPECT_GT(StackDepotSize, 0u);
925 auto *StackDepotAddress = Allocator->getStackDepotAddress();
926 EXPECT_NE(nullptr, StackDepotAddress);
927
928 // Enable tracking again to verify that the initialization only happens once.
929 Allocator->setTrackAllocationStacks(true);
930 EXPECT_EQ(StackDepotSize, Allocator->getStackDepotSize());
931 EXPECT_EQ(StackDepotAddress, Allocator->getStackDepotAddress());
932}
933
934SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotSize) {
935 auto *Allocator = this->Allocator.get();
936 Allocator->setTrackAllocationStacks(true);
937
938 auto StackDepotSize = Allocator->getStackDepotSize();
939 EXPECT_GT(StackDepotSize, 0u);
940 EXPECT_EQ(Allocator->getStackDepotAddress()[StackDepotSize - 1], '\0');
941}
942
943SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepotAddress) {
944 auto *Allocator = this->Allocator.get();
945 Allocator->setTrackAllocationStacks(true);
946
947 auto *StackDepotAddress = Allocator->getStackDepotAddress();
948 EXPECT_NE(StackDepotAddress, nullptr);
949 EXPECT_EQ(StackDepotAddress, Allocator->getStackDepotAddress());
950}
951
952SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepot) {
953 alignas(scudo::StackDepot) char Buf[sizeof(scudo::StackDepot) +
954 1024 * sizeof(scudo::atomic_u64) +
955 1024 * sizeof(scudo::atomic_u32)] = {};
956 auto *Depot = reinterpret_cast<scudo::StackDepot *>(Buf);
957 Depot->init(RingSz: 1024, TabSz: 1024);
958 ASSERT_TRUE(Depot->isValid(BufSize: sizeof(Buf)));
959 ASSERT_FALSE(Depot->isValid(BufSize: sizeof(Buf) - 1));
960 scudo::uptr Stack[] = {1, 2, 3};
961 scudo::u32 Elem = Depot->insert(Begin: &Stack[0], End: &Stack[3]);
962 scudo::uptr RingPosPtr = 0;
963 scudo::uptr SizePtr = 0;
964 ASSERT_TRUE(Depot->find(Hash: Elem, RingPosPtr: &RingPosPtr, SizePtr: &SizePtr));
965 ASSERT_EQ(SizePtr, 3u);
966 EXPECT_EQ(Depot->at(RingPos: RingPosPtr), 1u);
967 EXPECT_EQ(Depot->at(RingPos: RingPosPtr + 1), 2u);
968 EXPECT_EQ(Depot->at(RingPos: RingPosPtr + 2), 3u);
969}
970
971#if SCUDO_CAN_USE_PRIMARY64
972#if SCUDO_TRUSTY
973
974// TrustyConfig is designed for a domain-specific allocator. Add a basic test
975// which covers only simple operations and ensure the configuration is able to
976// compile.
977TEST(ScudoCombinedTest, BasicTrustyConfig) {
978 using AllocatorT = scudo::Allocator<scudo::TrustyConfig>;
979 auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
980
981 for (scudo::uptr ClassId = 1U;
982 ClassId <= scudo::TrustyConfig::SizeClassMap::LargestClassId;
983 ClassId++) {
984 const scudo::uptr Size =
985 scudo::TrustyConfig::SizeClassMap::getSizeByClassId(ClassId);
986 void *p = Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin);
987 ASSERT_NE(p, nullptr);
988 free(p);
989 }
990
991 bool UnlockRequired;
992 typename AllocatorT::TSDRegistryT::ScopedTSD TSD(
993 *Allocator->getTSDRegistry());
994 TSD->getCache().drain();
995
996 Allocator->releaseToOS(scudo::ReleaseToOS::Force);
997}
998
999#endif
1000#endif
1001

source code of compiler-rt/lib/scudo/standalone/tests/combined_test.cpp