| 1 | //===-- tsd_test.cpp --------------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #include "tests/scudo_unit_test.h" |
| 10 | |
| 11 | #include "tsd_exclusive.h" |
| 12 | #include "tsd_shared.h" |
| 13 | |
| 14 | #include <stdlib.h> |
| 15 | |
| 16 | #include <condition_variable> |
| 17 | #include <mutex> |
| 18 | #include <set> |
| 19 | #include <thread> |
| 20 | #include <type_traits> |
| 21 | |
| 22 | // We mock out an allocator with a TSD registry, mostly using empty stubs. The |
| 23 | // cache contains a single volatile uptr, to be able to test that several |
| 24 | // concurrent threads will not access or modify the same cache at the same time. |
| 25 | template <class Config> class MockAllocator { |
| 26 | public: |
| 27 | using ThisT = MockAllocator<Config>; |
| 28 | using TSDRegistryT = typename Config::template TSDRegistryT<ThisT>; |
| 29 | using SizeClassAllocatorT = struct MockSizeClassAllocator { |
| 30 | volatile scudo::uptr Canary; |
| 31 | }; |
| 32 | using QuarantineCacheT = struct MockQuarantine {}; |
| 33 | |
| 34 | void init() { |
| 35 | // This should only be called once by the registry. |
| 36 | EXPECT_FALSE(Initialized); |
| 37 | Initialized = true; |
| 38 | } |
| 39 | |
| 40 | void unmapTestOnly() { TSDRegistry.unmapTestOnly(this); } |
| 41 | void initAllocator(SizeClassAllocatorT *SizeClassAllocator) { |
| 42 | *SizeClassAllocator = {}; |
| 43 | } |
| 44 | void commitBack(UNUSED scudo::TSD<MockAllocator> *TSD) {} |
| 45 | TSDRegistryT *getTSDRegistry() { return &TSDRegistry; } |
| 46 | void callPostInitCallback() {} |
| 47 | |
| 48 | bool isInitialized() { return Initialized; } |
| 49 | |
| 50 | void *operator new(size_t Size) { |
| 51 | void *P = nullptr; |
| 52 | EXPECT_EQ(0, posix_memalign(memptr: &P, alignment: alignof(ThisT), size: Size)); |
| 53 | return P; |
| 54 | } |
| 55 | void operator delete(void *P) { free(ptr: P); } |
| 56 | |
| 57 | private: |
| 58 | bool Initialized = false; |
| 59 | TSDRegistryT TSDRegistry; |
| 60 | }; |
| 61 | |
| 62 | struct OneCache { |
| 63 | template <class Allocator> |
| 64 | using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 1U, 1U>; |
| 65 | }; |
| 66 | |
| 67 | struct SharedCaches { |
| 68 | template <class Allocator> |
| 69 | using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 16U, 8U>; |
| 70 | }; |
| 71 | |
| 72 | struct ExclusiveCaches { |
| 73 | template <class Allocator> |
| 74 | using TSDRegistryT = scudo::TSDRegistryExT<Allocator>; |
| 75 | }; |
| 76 | |
| 77 | TEST(ScudoTSDTest, TSDRegistryInit) { |
| 78 | using AllocatorT = MockAllocator<OneCache>; |
| 79 | auto Deleter = [](AllocatorT *A) { |
| 80 | A->unmapTestOnly(); |
| 81 | delete A; |
| 82 | }; |
| 83 | std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT, |
| 84 | Deleter); |
| 85 | EXPECT_FALSE(Allocator->isInitialized()); |
| 86 | |
| 87 | auto Registry = Allocator->getTSDRegistry(); |
| 88 | Registry->initOnceMaybe(Allocator.get()); |
| 89 | EXPECT_TRUE(Allocator->isInitialized()); |
| 90 | } |
| 91 | |
| 92 | template <class AllocatorT> |
| 93 | static void testRegistry() NO_THREAD_SAFETY_ANALYSIS { |
| 94 | auto Deleter = [](AllocatorT *A) { |
| 95 | A->unmapTestOnly(); |
| 96 | delete A; |
| 97 | }; |
| 98 | std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT, |
| 99 | Deleter); |
| 100 | EXPECT_FALSE(Allocator->isInitialized()); |
| 101 | |
| 102 | auto Registry = Allocator->getTSDRegistry(); |
| 103 | Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/true); |
| 104 | EXPECT_TRUE(Allocator->isInitialized()); |
| 105 | |
| 106 | { |
| 107 | typename AllocatorT::TSDRegistryT::ScopedTSD TSD(*Registry); |
| 108 | EXPECT_EQ(TSD->getSizeClassAllocator().Canary, 0U); |
| 109 | } |
| 110 | |
| 111 | Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false); |
| 112 | { |
| 113 | typename AllocatorT::TSDRegistryT::ScopedTSD TSD(*Registry); |
| 114 | EXPECT_EQ(TSD->getSizeClassAllocator().Canary, 0U); |
| 115 | memset(&TSD->getSizeClassAllocator(), 0x42, |
| 116 | sizeof(TSD->getSizeClassAllocator())); |
| 117 | } |
| 118 | } |
| 119 | |
| 120 | TEST(ScudoTSDTest, TSDRegistryBasic) { |
| 121 | testRegistry<MockAllocator<OneCache>>(); |
| 122 | testRegistry<MockAllocator<SharedCaches>>(); |
| 123 | #if !SCUDO_FUCHSIA |
| 124 | testRegistry<MockAllocator<ExclusiveCaches>>(); |
| 125 | #endif |
| 126 | } |
| 127 | |
| 128 | static std::mutex Mutex; |
| 129 | static std::condition_variable Cv; |
| 130 | static bool Ready; |
| 131 | |
| 132 | // Accessing `TSD->getSizeClassAllocator()` requires `TSD::Mutex` which isn't |
| 133 | // easy to test using thread-safety analysis. Alternatively, we verify the |
| 134 | // thread safety through a runtime check in ScopedTSD and mark the test body |
| 135 | // with NO_THREAD_SAFETY_ANALYSIS. |
| 136 | template <typename AllocatorT> |
| 137 | static void stressCache(AllocatorT *Allocator) NO_THREAD_SAFETY_ANALYSIS { |
| 138 | auto Registry = Allocator->getTSDRegistry(); |
| 139 | { |
| 140 | std::unique_lock<std::mutex> Lock(Mutex); |
| 141 | while (!Ready) |
| 142 | Cv.wait(Lock); |
| 143 | } |
| 144 | Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false); |
| 145 | typename AllocatorT::TSDRegistryT::ScopedTSD TSD(*Registry); |
| 146 | // For an exclusive TSD, the cache should be empty. We cannot guarantee the |
| 147 | // same for a shared TSD. |
| 148 | if (std::is_same<typename AllocatorT::TSDRegistryT, |
| 149 | scudo::TSDRegistryExT<AllocatorT>>()) { |
| 150 | EXPECT_EQ(TSD->getSizeClassAllocator().Canary, 0U); |
| 151 | } |
| 152 | // Transform the thread id to a uptr to use it as canary. |
| 153 | const scudo::uptr Canary = static_cast<scudo::uptr>( |
| 154 | std::hash<std::thread::id>{}(std::this_thread::get_id())); |
| 155 | TSD->getSizeClassAllocator().Canary = Canary; |
| 156 | // Loop a few times to make sure that a concurrent thread isn't modifying it. |
| 157 | for (scudo::uptr I = 0; I < 4096U; I++) |
| 158 | EXPECT_EQ(TSD->getSizeClassAllocator().Canary, Canary); |
| 159 | } |
| 160 | |
| 161 | template <class AllocatorT> static void testRegistryThreaded() { |
| 162 | Ready = false; |
| 163 | auto Deleter = [](AllocatorT *A) { |
| 164 | A->unmapTestOnly(); |
| 165 | delete A; |
| 166 | }; |
| 167 | std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT, |
| 168 | Deleter); |
| 169 | std::thread Threads[32]; |
| 170 | for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++) |
| 171 | Threads[I] = std::thread(stressCache<AllocatorT>, Allocator.get()); |
| 172 | { |
| 173 | std::unique_lock<std::mutex> Lock(Mutex); |
| 174 | Ready = true; |
| 175 | Cv.notify_all(); |
| 176 | } |
| 177 | for (auto &T : Threads) |
| 178 | T.join(); |
| 179 | } |
| 180 | |
| 181 | TEST(ScudoTSDTest, TSDRegistryThreaded) { |
| 182 | testRegistryThreaded<MockAllocator<OneCache>>(); |
| 183 | testRegistryThreaded<MockAllocator<SharedCaches>>(); |
| 184 | #if !SCUDO_FUCHSIA |
| 185 | testRegistryThreaded<MockAllocator<ExclusiveCaches>>(); |
| 186 | #endif |
| 187 | } |
| 188 | |
| 189 | static std::set<void *> Pointers; |
| 190 | |
| 191 | static void stressSharedRegistry(MockAllocator<SharedCaches> *Allocator) { |
| 192 | std::set<void *> Set; |
| 193 | auto Registry = Allocator->getTSDRegistry(); |
| 194 | { |
| 195 | std::unique_lock<std::mutex> Lock(Mutex); |
| 196 | while (!Ready) |
| 197 | Cv.wait(Lock); |
| 198 | } |
| 199 | Registry->initThreadMaybe(Instance: Allocator, /*MinimalInit=*/false); |
| 200 | for (scudo::uptr I = 0; I < 4096U; I++) { |
| 201 | typename MockAllocator<SharedCaches>::TSDRegistryT::ScopedTSD TSD( |
| 202 | *Registry); |
| 203 | Set.insert(reinterpret_cast<void *>(&*TSD)); |
| 204 | } |
| 205 | { |
| 206 | std::unique_lock<std::mutex> Lock(Mutex); |
| 207 | Pointers.insert(Set.begin(), Set.end()); |
| 208 | } |
| 209 | } |
| 210 | |
| 211 | TEST(ScudoTSDTest, TSDRegistryTSDsCount) { |
| 212 | Ready = false; |
| 213 | Pointers.clear(); |
| 214 | using AllocatorT = MockAllocator<SharedCaches>; |
| 215 | auto Deleter = [](AllocatorT *A) { |
| 216 | A->unmapTestOnly(); |
| 217 | delete A; |
| 218 | }; |
| 219 | std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT, |
| 220 | Deleter); |
| 221 | // We attempt to use as many TSDs as the shared cache offers by creating a |
| 222 | // decent amount of threads that will be run concurrently and attempt to get |
| 223 | // and lock TSDs. We put them all in a set and count the number of entries |
| 224 | // after we are done. |
| 225 | std::thread Threads[32]; |
| 226 | for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++) |
| 227 | Threads[I] = std::thread(stressSharedRegistry, Allocator.get()); |
| 228 | { |
| 229 | std::unique_lock<std::mutex> Lock(Mutex); |
| 230 | Ready = true; |
| 231 | Cv.notify_all(); |
| 232 | } |
| 233 | for (auto &T : Threads) |
| 234 | T.join(); |
| 235 | // The initial number of TSDs we get will be the minimum of the default count |
| 236 | // and the number of CPUs. |
| 237 | EXPECT_LE(Pointers.size(), 8U); |
| 238 | Pointers.clear(); |
| 239 | auto Registry = Allocator->getTSDRegistry(); |
| 240 | // Increase the number of TSDs to 16. |
| 241 | Registry->setOption(scudo::Option::MaxTSDsCount, 16); |
| 242 | Ready = false; |
| 243 | for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++) |
| 244 | Threads[I] = std::thread(stressSharedRegistry, Allocator.get()); |
| 245 | { |
| 246 | std::unique_lock<std::mutex> Lock(Mutex); |
| 247 | Ready = true; |
| 248 | Cv.notify_all(); |
| 249 | } |
| 250 | for (auto &T : Threads) |
| 251 | T.join(); |
| 252 | // We should get 16 distinct TSDs back. |
| 253 | EXPECT_EQ(Pointers.size(), 16U); |
| 254 | } |
| 255 | |