| 1 | //===-- asan_interface_test.cpp -------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file is a part of AddressSanitizer, an address sanity checker. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | #include "asan_test_utils.h" |
| 13 | #include "sanitizer_common/sanitizer_internal_defs.h" |
| 14 | #include <sanitizer/allocator_interface.h> |
| 15 | #include <sanitizer/asan_interface.h> |
| 16 | #include <vector> |
| 17 | |
| 18 | TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) { |
| 19 | EXPECT_EQ(0U, __sanitizer_get_estimated_allocated_size(size: 0)); |
| 20 | const size_t sizes[] = { 1, 30, 1<<30 }; |
| 21 | for (size_t i = 0; i < 3; i++) { |
| 22 | EXPECT_EQ(sizes[i], __sanitizer_get_estimated_allocated_size(size: sizes[i])); |
| 23 | } |
| 24 | } |
| 25 | |
| 26 | static const char* kGetAllocatedSizeErrorMsg = |
| 27 | "attempting to call __sanitizer_get_allocated_size" ; |
| 28 | |
| 29 | TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) { |
| 30 | const size_t kArraySize = 100; |
| 31 | char *array = Ident((char*)malloc(size: kArraySize)); |
| 32 | int *int_ptr = Ident(new int); |
| 33 | |
| 34 | // Allocated memory is owned by allocator. Allocated size should be |
| 35 | // equal to requested size. |
| 36 | EXPECT_EQ(true, __sanitizer_get_ownership(p: array)); |
| 37 | EXPECT_EQ(kArraySize, __sanitizer_get_allocated_size(p: array)); |
| 38 | EXPECT_EQ(true, __sanitizer_get_ownership(p: int_ptr)); |
| 39 | EXPECT_EQ(sizeof(int), __sanitizer_get_allocated_size(p: int_ptr)); |
| 40 | |
| 41 | // We cannot call GetAllocatedSize from the memory we didn't map, |
| 42 | // and from the interior pointers (not returned by previous malloc). |
| 43 | void *wild_addr = (void*)0x1; |
| 44 | EXPECT_FALSE(__sanitizer_get_ownership(p: wild_addr)); |
| 45 | EXPECT_DEATH(__sanitizer_get_allocated_size(p: wild_addr), |
| 46 | kGetAllocatedSizeErrorMsg); |
| 47 | EXPECT_FALSE(__sanitizer_get_ownership(p: array + kArraySize / 2)); |
| 48 | EXPECT_DEATH(__sanitizer_get_allocated_size(p: array + kArraySize / 2), |
| 49 | kGetAllocatedSizeErrorMsg); |
| 50 | |
| 51 | // NULL is not owned, but is a valid argument for |
| 52 | // __sanitizer_get_allocated_size(). |
| 53 | EXPECT_FALSE(__sanitizer_get_ownership(NULL)); |
| 54 | EXPECT_EQ(0U, __sanitizer_get_allocated_size(NULL)); |
| 55 | |
| 56 | // When memory is freed, it's not owned, and call to GetAllocatedSize |
| 57 | // is forbidden. |
| 58 | free(ptr: array); |
| 59 | EXPECT_FALSE(__sanitizer_get_ownership(p: array)); |
| 60 | EXPECT_DEATH(__sanitizer_get_allocated_size(p: array), |
| 61 | kGetAllocatedSizeErrorMsg); |
| 62 | delete int_ptr; |
| 63 | |
| 64 | void *zero_alloc = Ident(malloc(size: 0)); |
| 65 | if (zero_alloc != 0) { |
| 66 | // If malloc(0) is not null, this pointer is owned and should have valid |
| 67 | // allocated size. |
| 68 | EXPECT_TRUE(__sanitizer_get_ownership(p: zero_alloc)); |
| 69 | // Allocated size is 0 or 1 depending on the allocator used. |
| 70 | EXPECT_LT(__sanitizer_get_allocated_size(p: zero_alloc), 2U); |
| 71 | } |
| 72 | free(ptr: zero_alloc); |
| 73 | } |
| 74 | |
| 75 | TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) { |
| 76 | size_t before_malloc, after_malloc, after_free; |
| 77 | char *array; |
| 78 | const size_t kMallocSize = 100; |
| 79 | before_malloc = __sanitizer_get_current_allocated_bytes(); |
| 80 | |
| 81 | array = Ident((char*)malloc(size: kMallocSize)); |
| 82 | after_malloc = __sanitizer_get_current_allocated_bytes(); |
| 83 | EXPECT_EQ(before_malloc + kMallocSize, after_malloc); |
| 84 | |
| 85 | free(ptr: array); |
| 86 | after_free = __sanitizer_get_current_allocated_bytes(); |
| 87 | EXPECT_EQ(before_malloc, after_free); |
| 88 | } |
| 89 | |
| 90 | TEST(AddressSanitizerInterface, GetHeapSizeTest) { |
| 91 | // ASan allocator does not keep huge chunks in free list, but unmaps them. |
| 92 | // The chunk should be greater than the quarantine size, |
| 93 | // otherwise it will be stuck in quarantine instead of being unmapped. |
| 94 | static const size_t kLargeMallocSize = (1 << 28) + 1; // 256M |
| 95 | free(Ident(malloc(size: kLargeMallocSize))); // Drain quarantine. |
| 96 | size_t old_heap_size = __sanitizer_get_heap_size(); |
| 97 | for (int i = 0; i < 3; i++) { |
| 98 | // fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize); |
| 99 | free(Ident(malloc(size: kLargeMallocSize))); |
| 100 | EXPECT_EQ(old_heap_size, __sanitizer_get_heap_size()); |
| 101 | } |
| 102 | } |
| 103 | |
| 104 | #if !defined(__NetBSD__) |
| 105 | static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<14, 357}; |
| 106 | static const size_t kManyThreadsIterations = 250; |
| 107 | static const size_t kManyThreadsNumThreads = |
| 108 | (SANITIZER_WORDSIZE == 32) ? 40 : 200; |
| 109 | |
| 110 | static void *ManyThreadsWithStatsWorker(void *arg) { |
| 111 | (void)arg; |
| 112 | for (size_t iter = 0; iter < kManyThreadsIterations; iter++) { |
| 113 | for (size_t size_index = 0; size_index < 4; size_index++) { |
| 114 | free(Ident(malloc(size: kManyThreadsMallocSizes[size_index]))); |
| 115 | } |
| 116 | } |
| 117 | // Just one large allocation. |
| 118 | free(Ident(malloc(size: 1 << 20))); |
| 119 | return 0; |
| 120 | } |
| 121 | |
| 122 | TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) { |
| 123 | size_t before_test, after_test, i; |
| 124 | pthread_t threads[kManyThreadsNumThreads]; |
| 125 | before_test = __sanitizer_get_current_allocated_bytes(); |
| 126 | for (i = 0; i < kManyThreadsNumThreads; i++) { |
| 127 | PTHREAD_CREATE(&threads[i], 0, |
| 128 | (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i); |
| 129 | } |
| 130 | for (i = 0; i < kManyThreadsNumThreads; i++) { |
| 131 | PTHREAD_JOIN(threads[i], 0); |
| 132 | } |
| 133 | after_test = __sanitizer_get_current_allocated_bytes(); |
| 134 | // ASan stats also reflect memory usage of internal ASan RTL structs, |
| 135 | // so we can't check for equality here. |
| 136 | EXPECT_LT(after_test, before_test + (1UL<<20)); |
| 137 | } |
| 138 | #endif |
| 139 | |
| 140 | static void DoDoubleFree() { |
| 141 | int *x = Ident(new int); |
| 142 | delete Ident(x); |
| 143 | delete Ident(x); |
| 144 | } |
| 145 | |
| 146 | static void MyDeathCallback() { |
| 147 | fprintf(stderr, format: "MyDeathCallback\n" ); |
| 148 | fflush(stream: 0); // On Windows, stderr doesn't flush on crash. |
| 149 | } |
| 150 | |
| 151 | TEST(AddressSanitizerInterface, DeathCallbackTest) { |
| 152 | __asan_set_death_callback(callback: MyDeathCallback); |
| 153 | EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback" ); |
| 154 | __asan_set_death_callback(NULL); |
| 155 | } |
| 156 | |
| 157 | #define GOOD_ACCESS(ptr, offset) \ |
| 158 | EXPECT_FALSE(__asan_address_is_poisoned(ptr + offset)) |
| 159 | |
| 160 | #define BAD_ACCESS(ptr, offset) \ |
| 161 | EXPECT_TRUE(__asan_address_is_poisoned(ptr + offset)) |
| 162 | |
| 163 | static const char* kUseAfterPoisonErrorMessage = "use-after-poison" ; |
| 164 | |
| 165 | TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) { |
| 166 | char *array = Ident((char*)malloc(size: 120)); |
| 167 | // poison array[40..80) |
| 168 | __asan_poison_memory_region(addr: array + 40, size: 40); |
| 169 | GOOD_ACCESS(array, 39); |
| 170 | GOOD_ACCESS(array, 80); |
| 171 | BAD_ACCESS(array, 40); |
| 172 | BAD_ACCESS(array, 60); |
| 173 | BAD_ACCESS(array, 79); |
| 174 | EXPECT_DEATH(Ident(array[40]), kUseAfterPoisonErrorMessage); |
| 175 | __asan_unpoison_memory_region(addr: array + 40, size: 40); |
| 176 | // access previously poisoned memory. |
| 177 | GOOD_ACCESS(array, 40); |
| 178 | GOOD_ACCESS(array, 79); |
| 179 | free(ptr: array); |
| 180 | } |
| 181 | |
| 182 | TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) { |
| 183 | char *array = Ident((char*)malloc(size: 120)); |
| 184 | // Poison [0..40) and [80..120) |
| 185 | __asan_poison_memory_region(addr: array, size: 40); |
| 186 | __asan_poison_memory_region(addr: array + 80, size: 40); |
| 187 | BAD_ACCESS(array, 20); |
| 188 | GOOD_ACCESS(array, 60); |
| 189 | BAD_ACCESS(array, 100); |
| 190 | // Poison whole array - [0..120) |
| 191 | __asan_poison_memory_region(addr: array, size: 120); |
| 192 | BAD_ACCESS(array, 60); |
| 193 | // Unpoison [24..96) |
| 194 | __asan_unpoison_memory_region(addr: array + 24, size: 72); |
| 195 | BAD_ACCESS(array, 23); |
| 196 | GOOD_ACCESS(array, 24); |
| 197 | GOOD_ACCESS(array, 60); |
| 198 | GOOD_ACCESS(array, 95); |
| 199 | BAD_ACCESS(array, 96); |
| 200 | free(ptr: array); |
| 201 | } |
| 202 | |
| 203 | TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) { |
| 204 | // Vector of capacity 20 |
| 205 | char *vec = Ident((char*)malloc(size: 20)); |
| 206 | __asan_poison_memory_region(addr: vec, size: 20); |
| 207 | for (size_t i = 0; i < 7; i++) { |
| 208 | // Simulate push_back. |
| 209 | __asan_unpoison_memory_region(addr: vec + i, size: 1); |
| 210 | GOOD_ACCESS(vec, i); |
| 211 | BAD_ACCESS(vec, i + 1); |
| 212 | } |
| 213 | for (size_t i = 7; i > 0; i--) { |
| 214 | // Simulate pop_back. |
| 215 | __asan_poison_memory_region(addr: vec + i - 1, size: 1); |
| 216 | BAD_ACCESS(vec, i - 1); |
| 217 | if (i > 1) GOOD_ACCESS(vec, i - 2); |
| 218 | } |
| 219 | free(ptr: vec); |
| 220 | } |
| 221 | |
| 222 | #if !defined(ASAN_SHADOW_SCALE) || ASAN_SHADOW_SCALE == 3 |
| 223 | // Make sure that each aligned block of size "2^granularity" doesn't have |
| 224 | // "true" value before "false" value. |
| 225 | static void MakeShadowValid(bool *shadow, int length, int granularity) { |
| 226 | bool can_be_poisoned = true; |
| 227 | for (int i = length - 1; i >= 0; i--) { |
| 228 | if (!shadow[i]) |
| 229 | can_be_poisoned = false; |
| 230 | if (!can_be_poisoned) |
| 231 | shadow[i] = false; |
| 232 | if (i % (1 << granularity) == 0) { |
| 233 | can_be_poisoned = true; |
| 234 | } |
| 235 | } |
| 236 | } |
| 237 | |
| 238 | TEST(AddressSanitizerInterface, PoisoningStressTest) { |
| 239 | const size_t kSize = 24; |
| 240 | bool expected[kSize]; |
| 241 | char *arr = Ident((char*)malloc(size: kSize)); |
| 242 | for (size_t l1 = 0; l1 < kSize; l1++) { |
| 243 | for (size_t s1 = 1; l1 + s1 <= kSize; s1++) { |
| 244 | for (size_t l2 = 0; l2 < kSize; l2++) { |
| 245 | for (size_t s2 = 1; l2 + s2 <= kSize; s2++) { |
| 246 | // Poison [l1, l1+s1), [l2, l2+s2) and check result. |
| 247 | __asan_unpoison_memory_region(addr: arr, size: kSize); |
| 248 | __asan_poison_memory_region(addr: arr + l1, size: s1); |
| 249 | __asan_poison_memory_region(addr: arr + l2, size: s2); |
| 250 | memset(s: expected, c: false, n: kSize); |
| 251 | memset(s: expected + l1, c: true, n: s1); |
| 252 | MakeShadowValid(shadow: expected, length: kSize, /*granularity*/ 3); |
| 253 | memset(s: expected + l2, c: true, n: s2); |
| 254 | MakeShadowValid(shadow: expected, length: kSize, /*granularity*/ 3); |
| 255 | for (size_t i = 0; i < kSize; i++) { |
| 256 | ASSERT_EQ(expected[i], __asan_address_is_poisoned(addr: arr + i)); |
| 257 | } |
| 258 | // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result. |
| 259 | __asan_poison_memory_region(addr: arr, size: kSize); |
| 260 | __asan_unpoison_memory_region(addr: arr + l1, size: s1); |
| 261 | __asan_unpoison_memory_region(addr: arr + l2, size: s2); |
| 262 | memset(s: expected, c: true, n: kSize); |
| 263 | memset(s: expected + l1, c: false, n: s1); |
| 264 | MakeShadowValid(shadow: expected, length: kSize, /*granularity*/ 3); |
| 265 | memset(s: expected + l2, c: false, n: s2); |
| 266 | MakeShadowValid(shadow: expected, length: kSize, /*granularity*/ 3); |
| 267 | for (size_t i = 0; i < kSize; i++) { |
| 268 | ASSERT_EQ(expected[i], __asan_address_is_poisoned(addr: arr + i)); |
| 269 | } |
| 270 | } |
| 271 | } |
| 272 | } |
| 273 | } |
| 274 | free(ptr: arr); |
| 275 | } |
| 276 | #endif // !defined(ASAN_SHADOW_SCALE) || ASAN_SHADOW_SCALE == 3 |
| 277 | |
| 278 | TEST(AddressSanitizerInterface, GlobalRedzones) { |
| 279 | GOOD_ACCESS(glob1, 1 - 1); |
| 280 | GOOD_ACCESS(glob2, 2 - 1); |
| 281 | GOOD_ACCESS(glob3, 3 - 1); |
| 282 | GOOD_ACCESS(glob4, 4 - 1); |
| 283 | GOOD_ACCESS(glob5, 5 - 1); |
| 284 | GOOD_ACCESS(glob6, 6 - 1); |
| 285 | GOOD_ACCESS(glob7, 7 - 1); |
| 286 | GOOD_ACCESS(glob8, 8 - 1); |
| 287 | GOOD_ACCESS(glob9, 9 - 1); |
| 288 | GOOD_ACCESS(glob10, 10 - 1); |
| 289 | GOOD_ACCESS(glob11, 11 - 1); |
| 290 | GOOD_ACCESS(glob12, 12 - 1); |
| 291 | GOOD_ACCESS(glob13, 13 - 1); |
| 292 | GOOD_ACCESS(glob14, 14 - 1); |
| 293 | GOOD_ACCESS(glob15, 15 - 1); |
| 294 | GOOD_ACCESS(glob16, 16 - 1); |
| 295 | GOOD_ACCESS(glob17, 17 - 1); |
| 296 | GOOD_ACCESS(glob1000, 1000 - 1); |
| 297 | GOOD_ACCESS(glob10000, 10000 - 1); |
| 298 | GOOD_ACCESS(glob100000, 100000 - 1); |
| 299 | |
| 300 | BAD_ACCESS(glob1, 1); |
| 301 | BAD_ACCESS(glob2, 2); |
| 302 | BAD_ACCESS(glob3, 3); |
| 303 | BAD_ACCESS(glob4, 4); |
| 304 | BAD_ACCESS(glob5, 5); |
| 305 | BAD_ACCESS(glob6, 6); |
| 306 | BAD_ACCESS(glob7, 7); |
| 307 | BAD_ACCESS(glob8, 8); |
| 308 | BAD_ACCESS(glob9, 9); |
| 309 | BAD_ACCESS(glob10, 10); |
| 310 | BAD_ACCESS(glob11, 11); |
| 311 | BAD_ACCESS(glob12, 12); |
| 312 | BAD_ACCESS(glob13, 13); |
| 313 | BAD_ACCESS(glob14, 14); |
| 314 | BAD_ACCESS(glob15, 15); |
| 315 | BAD_ACCESS(glob16, 16); |
| 316 | BAD_ACCESS(glob17, 17); |
| 317 | BAD_ACCESS(glob1000, 1000); |
| 318 | BAD_ACCESS(glob1000, 1100); // Redzone is at least 101 bytes. |
| 319 | BAD_ACCESS(glob10000, 10000); |
| 320 | BAD_ACCESS(glob10000, 11000); // Redzone is at least 1001 bytes. |
| 321 | BAD_ACCESS(glob100000, 100000); |
| 322 | BAD_ACCESS(glob100000, 110000); // Redzone is at least 10001 bytes. |
| 323 | } |
| 324 | |
| 325 | TEST(AddressSanitizerInterface, PoisonedRegion) { |
| 326 | size_t rz = 16; |
| 327 | for (size_t size = 1; size <= 64; size++) { |
| 328 | char *p = new char[size]; |
| 329 | for (size_t beg = 0; beg < size + rz; beg++) { |
| 330 | for (size_t end = beg; end < size + rz; end++) { |
| 331 | void *first_poisoned = __asan_region_is_poisoned(beg: p + beg, size: end - beg); |
| 332 | if (beg == end) { |
| 333 | EXPECT_FALSE(first_poisoned); |
| 334 | } else if (beg < size && end <= size) { |
| 335 | EXPECT_FALSE(first_poisoned); |
| 336 | } else if (beg >= size) { |
| 337 | EXPECT_EQ(p + beg, first_poisoned); |
| 338 | } else { |
| 339 | EXPECT_GT(end, size); |
| 340 | EXPECT_EQ(p + size, first_poisoned); |
| 341 | } |
| 342 | } |
| 343 | } |
| 344 | delete [] p; |
| 345 | } |
| 346 | } |
| 347 | |
| 348 | // This is a performance benchmark for manual runs. |
| 349 | // asan's memset interceptor calls mem_is_zero for the entire shadow region. |
| 350 | // the profile should look like this: |
| 351 | // 89.10% [.] __memset_sse2 |
| 352 | // 10.50% [.] __sanitizer::mem_is_zero |
| 353 | // I.e. mem_is_zero should consume ~ SHADOW_GRANULARITY less CPU cycles |
| 354 | // than memset itself. |
| 355 | TEST(AddressSanitizerInterface, DISABLED_StressLargeMemset) { |
| 356 | size_t size = 1 << 20; |
| 357 | char *x = new char[size]; |
| 358 | for (int i = 0; i < 100000; i++) |
| 359 | Ident(memset)(x, 0, size); |
| 360 | delete [] x; |
| 361 | } |
| 362 | |
| 363 | // Same here, but we run memset with small sizes. |
| 364 | TEST(AddressSanitizerInterface, DISABLED_StressSmallMemset) { |
| 365 | size_t size = 32; |
| 366 | char *x = new char[size]; |
| 367 | for (int i = 0; i < 100000000; i++) |
| 368 | Ident(memset)(x, 0, size); |
| 369 | delete [] x; |
| 370 | } |
| 371 | static const char *kInvalidPoisonMessage = "invalid-poison-memory-range" ; |
| 372 | static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range" ; |
| 373 | |
| 374 | TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) { |
| 375 | char *array = Ident((char*)malloc(size: 120)); |
| 376 | __asan_unpoison_memory_region(addr: array, size: 120); |
| 377 | // Try to unpoison not owned memory |
| 378 | EXPECT_DEATH(__asan_unpoison_memory_region(addr: array, size: 121), |
| 379 | kInvalidUnpoisonMessage); |
| 380 | EXPECT_DEATH(__asan_unpoison_memory_region(addr: array - 1, size: 120), |
| 381 | kInvalidUnpoisonMessage); |
| 382 | |
| 383 | __asan_poison_memory_region(addr: array, size: 120); |
| 384 | // Try to poison not owned memory. |
| 385 | EXPECT_DEATH(__asan_poison_memory_region(addr: array, size: 121), kInvalidPoisonMessage); |
| 386 | EXPECT_DEATH(__asan_poison_memory_region(addr: array - 1, size: 120), |
| 387 | kInvalidPoisonMessage); |
| 388 | free(ptr: array); |
| 389 | } |
| 390 | |
| 391 | TEST(AddressSanitizerInterface, GetOwnershipStressTest) { |
| 392 | std::vector<char *> pointers; |
| 393 | std::vector<size_t> sizes; |
| 394 | const size_t kNumMallocs = 1 << 9; |
| 395 | for (size_t i = 0; i < kNumMallocs; i++) { |
| 396 | size_t size = i * 100 + 1; |
| 397 | pointers.push_back((char*)malloc(size)); |
| 398 | sizes.push_back(size); |
| 399 | } |
| 400 | for (size_t i = 0; i < 4000000; i++) { |
| 401 | EXPECT_FALSE(__sanitizer_get_ownership(&pointers)); |
| 402 | EXPECT_FALSE(__sanitizer_get_ownership(p: (void*)0x1234)); |
| 403 | size_t idx = i % kNumMallocs; |
| 404 | EXPECT_TRUE(__sanitizer_get_ownership(pointers[idx])); |
| 405 | EXPECT_EQ(sizes[idx], __sanitizer_get_allocated_size(pointers[idx])); |
| 406 | } |
| 407 | for (size_t i = 0, n = pointers.size(); i < n; i++) |
| 408 | free(pointers[i]); |
| 409 | } |
| 410 | |
| 411 | TEST(AddressSanitizerInterface, HandleNoReturnTest) { |
| 412 | char array[40]; |
| 413 | __asan_poison_memory_region(addr: array, size: sizeof(array)); |
| 414 | BAD_ACCESS(array, 20); |
| 415 | __asan_handle_no_return(); |
| 416 | // Fake stack does not need to be unpoisoned. |
| 417 | if (__asan_get_current_fake_stack()) |
| 418 | return; |
| 419 | // It unpoisons the whole thread stack. |
| 420 | GOOD_ACCESS(array, 20); |
| 421 | } |
| 422 | |