1//===-- asan_noinst_test.cpp ----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address sanity checker.
10//
11// This test file should be compiled w/o asan instrumentation.
12//===----------------------------------------------------------------------===//
13
14#include <assert.h>
15#include <sanitizer/allocator_interface.h>
16#include <stdio.h>
17#include <stdlib.h>
18#include <string.h> // for memset()
19
20#include <algorithm>
21#include <limits>
22#include <vector>
23
24#include "asan_allocator.h"
25#include "asan_internal.h"
26#include "asan_mapping.h"
27#include "asan_test_utils.h"
28
29using namespace __sanitizer;
30
31// ATTENTION!
32// Please don't call intercepted functions (including malloc() and friends)
33// in this test. The static runtime library is linked explicitly (without
34// -fsanitize=address), thus the interceptors do not work correctly on OS X.
35
36// Make sure __asan_init is called before any test case is run.
37struct AsanInitCaller {
38 AsanInitCaller() {
39 __asan_init();
40 }
41};
42static AsanInitCaller asan_init_caller;
43
44TEST(AddressSanitizer, InternalSimpleDeathTest) {
45 EXPECT_DEATH(exit(status: 1), "");
46}
47
48static void *MallocStress(void *NumOfItrPtr) {
49 size_t n = *((size_t *)NumOfItrPtr);
50 u32 seed = my_rand();
51 BufferedStackTrace stack1;
52 stack1.trace_buffer[0] = 0xa123;
53 stack1.trace_buffer[1] = 0xa456;
54 stack1.size = 2;
55
56 BufferedStackTrace stack2;
57 stack2.trace_buffer[0] = 0xb123;
58 stack2.trace_buffer[1] = 0xb456;
59 stack2.size = 2;
60
61 BufferedStackTrace stack3;
62 stack3.trace_buffer[0] = 0xc123;
63 stack3.trace_buffer[1] = 0xc456;
64 stack3.size = 2;
65
66 std::vector<void *> vec;
67 for (size_t i = 0; i < n; i++) {
68 if ((i % 3) == 0) {
69 if (vec.empty()) continue;
70 size_t idx = my_rand_r(seed: &seed) % vec.size();
71 void *ptr = vec[idx];
72 vec[idx] = vec.back();
73 vec.pop_back();
74 __asan::asan_free(ptr, stack: &stack1, alloc_type: __asan::FROM_MALLOC);
75 } else {
76 size_t size = my_rand_r(seed: &seed) % 1000 + 1;
77 switch ((my_rand_r(seed: &seed) % 128)) {
78 case 0: size += 1024; break;
79 case 1: size += 2048; break;
80 case 2: size += 4096; break;
81 }
82 size_t alignment = 1 << (my_rand_r(seed: &seed) % 10 + 1);
83 char *ptr = (char*)__asan::asan_memalign(alignment, size,
84 stack: &stack2, alloc_type: __asan::FROM_MALLOC);
85 EXPECT_EQ(size, __asan::asan_malloc_usable_size(ptr, pc: 0, bp: 0));
86 vec.push_back(ptr);
87 ptr[0] = 0;
88 ptr[size-1] = 0;
89 ptr[size/2] = 0;
90 }
91 }
92 for (size_t i = 0; i < vec.size(); i++)
93 __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC);
94 return nullptr;
95}
96
97TEST(AddressSanitizer, NoInstMallocTest) {
98 const size_t kNumIterations = (ASAN_LOW_MEMORY) ? 300000 : 1000000;
99 MallocStress(NumOfItrPtr: (void *)&kNumIterations);
100}
101
102TEST(AddressSanitizer, ThreadedMallocStressTest) {
103 const int kNumThreads = 4;
104 const size_t kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000;
105 pthread_t t[kNumThreads];
106 for (int i = 0; i < kNumThreads; i++) {
107 PTHREAD_CREATE(&t[i], 0, (void *(*)(void *x))MallocStress,
108 (void *)&kNumIterations);
109 }
110 for (int i = 0; i < kNumThreads; i++) {
111 PTHREAD_JOIN(t[i], 0);
112 }
113}
114
115static void PrintShadow(const char *tag, uptr ptr, size_t size) {
116 fprintf(stderr, format: "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
117 uptr prev_shadow = 0;
118 for (sptr i = -32; i < (sptr)size + 32; i++) {
119 uptr shadow = __asan::MemToShadow(p: ptr + i);
120 if (i == 0 || i == (sptr)size)
121 fprintf(stderr, format: ".");
122 if (shadow != prev_shadow) {
123 prev_shadow = shadow;
124 fprintf(stderr, format: "%02x", (int)*(u8*)shadow);
125 }
126 }
127 fprintf(stderr, format: "\n");
128}
129
130TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
131 for (size_t size = 1; size <= 513; size++) {
132 char *ptr = new char[size];
133 PrintShadow(tag: "m", ptr: (uptr)ptr, size);
134 delete [] ptr;
135 PrintShadow(tag: "f", ptr: (uptr)ptr, size);
136 }
137}
138
139TEST(AddressSanitizer, QuarantineTest) {
140 BufferedStackTrace stack;
141 stack.trace_buffer[0] = 0x890;
142 stack.size = 1;
143
144 const int size = 1024;
145 void *p = __asan::asan_malloc(size, stack: &stack);
146 __asan::asan_free(ptr: p, stack: &stack, alloc_type: __asan::FROM_MALLOC);
147 size_t i;
148 size_t max_i = 1 << 30;
149 for (i = 0; i < max_i; i++) {
150 void *p1 = __asan::asan_malloc(size, stack: &stack);
151 __asan::asan_free(ptr: p1, stack: &stack, alloc_type: __asan::FROM_MALLOC);
152 if (p1 == p) break;
153 }
154 EXPECT_GE(i, 10000U);
155 EXPECT_LT(i, max_i);
156}
157
158#if !defined(__NetBSD__)
159void *ThreadedQuarantineTestWorker(void *unused) {
160 (void)unused;
161 u32 seed = my_rand();
162 BufferedStackTrace stack;
163 stack.trace_buffer[0] = 0x890;
164 stack.size = 1;
165
166 for (size_t i = 0; i < 1000; i++) {
167 void *p = __asan::asan_malloc(size: 1 + (my_rand_r(seed: &seed) % 4000), stack: &stack);
168 __asan::asan_free(ptr: p, stack: &stack, alloc_type: __asan::FROM_MALLOC);
169 }
170 return NULL;
171}
172
173// Check that the thread local allocators are flushed when threads are
174// destroyed.
175TEST(AddressSanitizer, ThreadedQuarantineTest) {
176 // Run the routine once to warm up ASAN internal structures to get more
177 // predictable incremental memory changes.
178 pthread_t t;
179 PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
180 PTHREAD_JOIN(t, 0);
181
182 const int n_threads = 3000;
183 size_t mmaped1 = __sanitizer_get_heap_size();
184 for (int i = 0; i < n_threads; i++) {
185 pthread_t t;
186 PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
187 PTHREAD_JOIN(t, 0);
188 size_t mmaped2 = __sanitizer_get_heap_size();
189 // Figure out why this much memory is required.
190 EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
191 }
192}
193#endif
194
195void *ThreadedOneSizeMallocStress(void *unused) {
196 (void)unused;
197 BufferedStackTrace stack;
198 stack.trace_buffer[0] = 0x890;
199 stack.size = 1;
200 const size_t kNumMallocs = 1000;
201 for (int iter = 0; iter < 1000; iter++) {
202 void *p[kNumMallocs];
203 for (size_t i = 0; i < kNumMallocs; i++) {
204 p[i] = __asan::asan_malloc(size: 32, stack: &stack);
205 }
206 for (size_t i = 0; i < kNumMallocs; i++) {
207 __asan::asan_free(ptr: p[i], stack: &stack, alloc_type: __asan::FROM_MALLOC);
208 }
209 }
210 return NULL;
211}
212
213TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
214 const int kNumThreads = 4;
215 pthread_t t[kNumThreads];
216 for (int i = 0; i < kNumThreads; i++) {
217 PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0);
218 }
219 for (int i = 0; i < kNumThreads; i++) {
220 PTHREAD_JOIN(t[i], 0);
221 }
222}
223
224TEST(AddressSanitizer, ShadowRegionIsPoisonedTest) {
225 using __asan::kHighMemEnd;
226 // Check that __asan_region_is_poisoned works for shadow regions.
227 uptr ptr = kLowShadowBeg + 200;
228 EXPECT_EQ(ptr, __asan_region_is_poisoned(beg: ptr, size: 100));
229 ptr = kShadowGapBeg + 200;
230 EXPECT_EQ(ptr, __asan_region_is_poisoned(beg: ptr, size: 100));
231 ptr = kHighShadowBeg + 200;
232 EXPECT_EQ(ptr, __asan_region_is_poisoned(beg: ptr, size: 100));
233}
234
235// Test __asan_load1 & friends.
236typedef void (*CB)(uptr p);
237static void TestLoadStoreCallbacks(CB cb[2][5]) {
238 uptr buggy_ptr;
239
240 __asan_test_only_reported_buggy_pointer = &buggy_ptr;
241 BufferedStackTrace stack;
242 stack.trace_buffer[0] = 0x890;
243 stack.size = 1;
244
245 for (uptr len = 16; len <= 32; len++) {
246 char *ptr = (char*) __asan::asan_malloc(size: len, stack: &stack);
247 uptr p = reinterpret_cast<uptr>(ptr);
248 for (uptr is_write = 0; is_write <= 1; is_write++) {
249 for (uptr size_log = 0; size_log <= 4; size_log++) {
250 uptr size = 1 << size_log;
251 CB call = cb[is_write][size_log];
252 // Iterate only size-aligned offsets.
253 for (uptr offset = 0; offset <= len; offset += size) {
254 buggy_ptr = 0;
255 call(p + offset);
256 if (offset + size <= len)
257 EXPECT_EQ(buggy_ptr, 0U);
258 else
259 EXPECT_EQ(buggy_ptr, p + offset);
260 }
261 }
262 }
263 __asan::asan_free(ptr, stack: &stack, alloc_type: __asan::FROM_MALLOC);
264 }
265 __asan_test_only_reported_buggy_pointer = 0;
266}
267
268TEST(AddressSanitizer, LoadStoreCallbacks) {
269 CB cb[2][5] = {{
270 __asan_load1,
271 __asan_load2,
272 __asan_load4,
273 __asan_load8,
274 __asan_load16,
275 },
276 {
277 __asan_store1,
278 __asan_store2,
279 __asan_store4,
280 __asan_store8,
281 __asan_store16,
282 }};
283 TestLoadStoreCallbacks(cb);
284}
285
286#if defined(__x86_64__) && \
287 !(defined(SANITIZER_APPLE) || defined(SANITIZER_WINDOWS))
288// clang-format off
289
290#define CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(s, reg, op) \
291 void CallAsanMemoryAccessAdd##reg##op##s(uptr address) { \
292 asm("push %%" #reg " \n" \
293 "mov %[x], %%" #reg " \n" \
294 "call __asan_check_" #op "_add_" #s "_" #reg "\n" \
295 "pop %%" #reg " \n" \
296 : \
297 : [x] "r"(address) \
298 : "r8", "rdi"); \
299 }
300
301#define TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(reg) \
302 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(1, reg, load) \
303 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(1, reg, store) \
304 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(2, reg, load) \
305 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(2, reg, store) \
306 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(4, reg, load) \
307 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(4, reg, store) \
308 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(8, reg, load) \
309 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(8, reg, store) \
310 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(16, reg, load) \
311 CALL_ASAN_MEMORY_ACCESS_CALLBACK_ADD(16, reg, store) \
312 \
313 TEST(AddressSanitizer, LoadStoreCallbacksAddX86##reg) { \
314 CB cb[2][5] = {{ \
315 CallAsanMemoryAccessAdd##reg##load1, \
316 CallAsanMemoryAccessAdd##reg##load2, \
317 CallAsanMemoryAccessAdd##reg##load4, \
318 CallAsanMemoryAccessAdd##reg##load8, \
319 CallAsanMemoryAccessAdd##reg##load16, \
320 }, \
321 { \
322 CallAsanMemoryAccessAdd##reg##store1, \
323 CallAsanMemoryAccessAdd##reg##store2, \
324 CallAsanMemoryAccessAdd##reg##store4, \
325 CallAsanMemoryAccessAdd##reg##store8, \
326 CallAsanMemoryAccessAdd##reg##store16, \
327 }}; \
328 TestLoadStoreCallbacks(cb); \
329 }
330
331// Instantiate all but R10 and R11 callbacks. We are using PLTSafe class with
332// the intrinsic, which guarantees that the code generation will never emit
333// R10 or R11 callbacks.
334TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RAX)
335TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RBX)
336TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RCX)
337TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RDX)
338TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RSI)
339TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RDI)
340TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RBP)
341TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R8)
342TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R9)
343TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R12)
344TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R13)
345TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R14)
346TEST_ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R15)
347
348// clang-format on
349#endif
350

source code of compiler-rt/lib/asan/tests/asan_noinst_test.cpp