1 | //===-- sanitizer_common_test.cpp -----------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of ThreadSanitizer/AddressSanitizer runtime. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | #include <algorithm> |
13 | |
14 | // This ensures that including both internal sanitizer_common headers |
15 | // and the interface headers does not lead to compilation failures. |
16 | // Both may be included in unit tests, where googletest transitively |
17 | // pulls in sanitizer interface headers. |
18 | // The headers are specifically included using relative paths, |
19 | // because a compiler may use a different mismatching version |
20 | // of sanitizer headers. |
21 | #include "../../../include/sanitizer/asan_interface.h" |
22 | #include "../../../include/sanitizer/msan_interface.h" |
23 | #include "../../../include/sanitizer/tsan_interface.h" |
24 | #include "gtest/gtest.h" |
25 | #include "sanitizer_common/sanitizer_allocator_internal.h" |
26 | #include "sanitizer_common/sanitizer_common.h" |
27 | #include "sanitizer_common/sanitizer_file.h" |
28 | #include "sanitizer_common/sanitizer_flags.h" |
29 | #include "sanitizer_common/sanitizer_libc.h" |
30 | #include "sanitizer_common/sanitizer_platform.h" |
31 | #include "sanitizer_pthread_wrappers.h" |
32 | |
33 | namespace __sanitizer { |
34 | |
35 | static bool IsSorted(const uptr *array, uptr n) { |
36 | for (uptr i = 1; i < n; i++) { |
37 | if (array[i] < array[i - 1]) return false; |
38 | } |
39 | return true; |
40 | } |
41 | |
42 | TEST(SanitizerCommon, SortTest) { |
43 | uptr array[100]; |
44 | uptr n = 100; |
45 | // Already sorted. |
46 | for (uptr i = 0; i < n; i++) { |
47 | array[i] = i; |
48 | } |
49 | Sort(v: array, size: n); |
50 | EXPECT_TRUE(IsSorted(array, n)); |
51 | // Reverse order. |
52 | for (uptr i = 0; i < n; i++) { |
53 | array[i] = n - 1 - i; |
54 | } |
55 | Sort(v: array, size: n); |
56 | EXPECT_TRUE(IsSorted(array, n)); |
57 | // Mixed order. |
58 | for (uptr i = 0; i < n; i++) { |
59 | array[i] = (i % 2 == 0) ? i : n - 1 - i; |
60 | } |
61 | Sort(v: array, size: n); |
62 | EXPECT_TRUE(IsSorted(array, n)); |
63 | // All equal. |
64 | for (uptr i = 0; i < n; i++) { |
65 | array[i] = 42; |
66 | } |
67 | Sort(v: array, size: n); |
68 | EXPECT_TRUE(IsSorted(array, n)); |
69 | // All but one sorted. |
70 | for (uptr i = 0; i < n - 1; i++) { |
71 | array[i] = i; |
72 | } |
73 | array[n - 1] = 42; |
74 | Sort(v: array, size: n); |
75 | EXPECT_TRUE(IsSorted(array, n)); |
76 | // Minimal case - sort three elements. |
77 | array[0] = 1; |
78 | array[1] = 0; |
79 | Sort(v: array, size: 2); |
80 | EXPECT_TRUE(IsSorted(array, n: 2)); |
81 | } |
82 | |
83 | TEST(SanitizerCommon, MmapAlignedOrDieOnFatalError) { |
84 | uptr PageSize = GetPageSizeCached(); |
85 | for (uptr size = 1; size <= 32; size *= 2) { |
86 | for (uptr alignment = 1; alignment <= 32; alignment *= 2) { |
87 | for (int iter = 0; iter < 100; iter++) { |
88 | uptr res = (uptr)MmapAlignedOrDieOnFatalError( |
89 | size * PageSize, alignment * PageSize, "MmapAlignedOrDieTest" ); |
90 | EXPECT_EQ(0U, res % (alignment * PageSize)); |
91 | internal_memset((void*)res, 1, size * PageSize); |
92 | UnmapOrDie((void*)res, size * PageSize); |
93 | } |
94 | } |
95 | } |
96 | } |
97 | |
98 | TEST(SanitizerCommon, Mprotect) { |
99 | uptr PageSize = GetPageSizeCached(); |
100 | u8 *mem = reinterpret_cast<u8 *>(MmapOrDie(size: PageSize, mem_type: "MprotectTest" )); |
101 | for (u8 *p = mem; p < mem + PageSize; ++p) ++(*p); |
102 | |
103 | MprotectReadOnly(addr: reinterpret_cast<uptr>(mem), size: PageSize); |
104 | for (u8 *p = mem; p < mem + PageSize; ++p) EXPECT_EQ(1u, *p); |
105 | EXPECT_DEATH(++mem[0], "" ); |
106 | EXPECT_DEATH(++mem[PageSize / 2], "" ); |
107 | EXPECT_DEATH(++mem[PageSize - 1], "" ); |
108 | |
109 | MprotectNoAccess(addr: reinterpret_cast<uptr>(mem), size: PageSize); |
110 | volatile u8 t; |
111 | (void)t; |
112 | EXPECT_DEATH(t = mem[0], "" ); |
113 | EXPECT_DEATH(t = mem[PageSize / 2], "" ); |
114 | EXPECT_DEATH(t = mem[PageSize - 1], "" ); |
115 | } |
116 | |
117 | TEST(SanitizerCommon, InternalMmapVectorRoundUpCapacity) { |
118 | InternalMmapVector<uptr> v; |
119 | v.reserve(new_size: 1); |
120 | CHECK_EQ(v.capacity(), GetPageSizeCached() / sizeof(uptr)); |
121 | } |
122 | |
123 | TEST(SanitizerCommon, InternalMmapVectorReize) { |
124 | InternalMmapVector<uptr> v; |
125 | CHECK_EQ(0U, v.size()); |
126 | CHECK_GE(v.capacity(), v.size()); |
127 | |
128 | v.reserve(new_size: 1000); |
129 | CHECK_EQ(0U, v.size()); |
130 | CHECK_GE(v.capacity(), 1000U); |
131 | |
132 | v.resize(new_size: 10000); |
133 | CHECK_EQ(10000U, v.size()); |
134 | CHECK_GE(v.capacity(), v.size()); |
135 | uptr cap = v.capacity(); |
136 | |
137 | v.resize(new_size: 100); |
138 | CHECK_EQ(100U, v.size()); |
139 | CHECK_EQ(v.capacity(), cap); |
140 | |
141 | v.reserve(new_size: 10); |
142 | CHECK_EQ(100U, v.size()); |
143 | CHECK_EQ(v.capacity(), cap); |
144 | } |
145 | |
146 | TEST(SanitizerCommon, InternalMmapVector) { |
147 | InternalMmapVector<uptr> vector; |
148 | for (uptr i = 0; i < 100; i++) { |
149 | EXPECT_EQ(i, vector.size()); |
150 | vector.push_back(element: i); |
151 | } |
152 | for (uptr i = 0; i < 100; i++) { |
153 | EXPECT_EQ(i, vector[i]); |
154 | } |
155 | for (int i = 99; i >= 0; i--) { |
156 | EXPECT_EQ((uptr)i, vector.back()); |
157 | vector.pop_back(); |
158 | EXPECT_EQ((uptr)i, vector.size()); |
159 | } |
160 | InternalMmapVector<uptr> empty_vector; |
161 | CHECK_EQ(empty_vector.capacity(), 0U); |
162 | CHECK_EQ(0U, empty_vector.size()); |
163 | } |
164 | |
165 | TEST(SanitizerCommon, InternalMmapVectorEq) { |
166 | InternalMmapVector<uptr> vector1; |
167 | InternalMmapVector<uptr> vector2; |
168 | for (uptr i = 0; i < 100; i++) { |
169 | vector1.push_back(element: i); |
170 | vector2.push_back(element: i); |
171 | } |
172 | EXPECT_TRUE(vector1 == vector2); |
173 | EXPECT_FALSE(vector1 != vector2); |
174 | |
175 | vector1.push_back(element: 1); |
176 | EXPECT_FALSE(vector1 == vector2); |
177 | EXPECT_TRUE(vector1 != vector2); |
178 | |
179 | vector2.push_back(element: 1); |
180 | EXPECT_TRUE(vector1 == vector2); |
181 | EXPECT_FALSE(vector1 != vector2); |
182 | |
183 | vector1[55] = 1; |
184 | EXPECT_FALSE(vector1 == vector2); |
185 | EXPECT_TRUE(vector1 != vector2); |
186 | } |
187 | |
188 | TEST(SanitizerCommon, InternalMmapVectorSwap) { |
189 | InternalMmapVector<uptr> vector1; |
190 | InternalMmapVector<uptr> vector2; |
191 | InternalMmapVector<uptr> vector3; |
192 | InternalMmapVector<uptr> vector4; |
193 | for (uptr i = 0; i < 100; i++) { |
194 | vector1.push_back(element: i); |
195 | vector2.push_back(element: i); |
196 | vector3.push_back(element: -i); |
197 | vector4.push_back(element: -i); |
198 | } |
199 | EXPECT_NE(vector2, vector3); |
200 | EXPECT_NE(vector1, vector4); |
201 | vector1.swap(other&: vector3); |
202 | EXPECT_EQ(vector2, vector3); |
203 | EXPECT_EQ(vector1, vector4); |
204 | } |
205 | |
206 | void TestThreadInfo(bool main) { |
207 | uptr stk_addr = 0; |
208 | uptr stk_size = 0; |
209 | uptr tls_addr = 0; |
210 | uptr tls_size = 0; |
211 | GetThreadStackAndTls(main, stk_addr: &stk_addr, stk_size: &stk_size, tls_addr: &tls_addr, tls_size: &tls_size); |
212 | |
213 | int stack_var; |
214 | EXPECT_NE(stk_addr, (uptr)0); |
215 | EXPECT_NE(stk_size, (uptr)0); |
216 | EXPECT_GT((uptr)&stack_var, stk_addr); |
217 | EXPECT_LT((uptr)&stack_var, stk_addr + stk_size); |
218 | |
219 | #if SANITIZER_LINUX && defined(__x86_64__) |
220 | static __thread int thread_var; |
221 | EXPECT_NE(tls_addr, (uptr)0); |
222 | EXPECT_NE(tls_size, (uptr)0); |
223 | EXPECT_GT((uptr)&thread_var, tls_addr); |
224 | EXPECT_LT((uptr)&thread_var, tls_addr + tls_size); |
225 | |
226 | // Ensure that tls and stack do not intersect. |
227 | uptr tls_end = tls_addr + tls_size; |
228 | EXPECT_TRUE(tls_addr < stk_addr || tls_addr >= stk_addr + stk_size); |
229 | EXPECT_TRUE(tls_end < stk_addr || tls_end >= stk_addr + stk_size); |
230 | EXPECT_TRUE((tls_addr < stk_addr) == (tls_end < stk_addr)); |
231 | #endif |
232 | } |
233 | |
234 | static void *WorkerThread(void *arg) { |
235 | TestThreadInfo(main: false); |
236 | return 0; |
237 | } |
238 | |
239 | TEST(SanitizerCommon, ThreadStackTlsMain) { |
240 | InitTlsSize(); |
241 | TestThreadInfo(main: true); |
242 | } |
243 | |
244 | TEST(SanitizerCommon, ThreadStackTlsWorker) { |
245 | InitTlsSize(); |
246 | pthread_t t; |
247 | PTHREAD_CREATE(&t, 0, WorkerThread, 0); |
248 | PTHREAD_JOIN(t, 0); |
249 | } |
250 | |
251 | bool UptrLess(uptr a, uptr b) { |
252 | return a < b; |
253 | } |
254 | |
255 | TEST(SanitizerCommon, InternalLowerBound) { |
256 | std::vector<int> arr = {1, 3, 5, 7, 11}; |
257 | |
258 | EXPECT_EQ(0u, InternalLowerBound(arr, 0)); |
259 | EXPECT_EQ(0u, InternalLowerBound(arr, 1)); |
260 | EXPECT_EQ(1u, InternalLowerBound(arr, 2)); |
261 | EXPECT_EQ(1u, InternalLowerBound(arr, 3)); |
262 | EXPECT_EQ(2u, InternalLowerBound(arr, 4)); |
263 | EXPECT_EQ(2u, InternalLowerBound(arr, 5)); |
264 | EXPECT_EQ(3u, InternalLowerBound(arr, 6)); |
265 | EXPECT_EQ(3u, InternalLowerBound(arr, 7)); |
266 | EXPECT_EQ(4u, InternalLowerBound(arr, 8)); |
267 | EXPECT_EQ(4u, InternalLowerBound(arr, 9)); |
268 | EXPECT_EQ(4u, InternalLowerBound(arr, 10)); |
269 | EXPECT_EQ(4u, InternalLowerBound(arr, 11)); |
270 | EXPECT_EQ(5u, InternalLowerBound(arr, 12)); |
271 | } |
272 | |
273 | TEST(SanitizerCommon, InternalLowerBoundVsStdLowerBound) { |
274 | std::vector<int> data; |
275 | auto create_item = [] (size_t i, size_t j) { |
276 | auto v = i * 10000 + j; |
277 | return ((v << 6) + (v >> 6) + 0x9e3779b9) % 100; |
278 | }; |
279 | for (size_t i = 0; i < 1000; ++i) { |
280 | data.resize(i); |
281 | for (size_t j = 0; j < i; ++j) { |
282 | data[j] = create_item(i, j); |
283 | } |
284 | |
285 | std::sort(data.begin(), data.end()); |
286 | |
287 | for (size_t j = 0; j < i; ++j) { |
288 | int val = create_item(i, j); |
289 | for (auto to_find : {val - 1, val, val + 1}) { |
290 | uptr expected = |
291 | std::lower_bound(data.begin(), data.end(), to_find) - data.begin(); |
292 | EXPECT_EQ(expected, |
293 | InternalLowerBound(data, to_find, std::less<int>())); |
294 | } |
295 | } |
296 | } |
297 | } |
298 | |
299 | class SortAndDedupTest : public ::testing::TestWithParam<std::vector<int>> {}; |
300 | |
301 | TEST_P(SortAndDedupTest, SortAndDedup) { |
302 | std::vector<int> v_std = GetParam(); |
303 | std::sort(v_std.begin(), v_std.end()); |
304 | v_std.erase(std::unique(v_std.begin(), v_std.end()), v_std.end()); |
305 | |
306 | std::vector<int> v = GetParam(); |
307 | SortAndDedup(v); |
308 | |
309 | EXPECT_EQ(v_std, v); |
310 | } |
311 | |
312 | const std::vector<int> kSortAndDedupTests[] = { |
313 | {}, |
314 | {1}, |
315 | {1, 1}, |
316 | {1, 1, 1}, |
317 | {1, 2, 3}, |
318 | {3, 2, 1}, |
319 | {1, 2, 2, 3}, |
320 | {3, 3, 2, 1, 2}, |
321 | {3, 3, 2, 1, 2}, |
322 | {1, 2, 1, 1, 2, 1, 1, 1, 2, 2}, |
323 | {1, 3, 3, 2, 3, 1, 3, 1, 4, 4, 2, 1, 4, 1, 1, 2, 2}, |
324 | }; |
325 | INSTANTIATE_TEST_SUITE_P(SortAndDedupTest, SortAndDedupTest, |
326 | ::testing::ValuesIn(kSortAndDedupTests)); |
327 | |
328 | #if SANITIZER_LINUX && !SANITIZER_ANDROID |
329 | TEST(SanitizerCommon, FindPathToBinary) { |
330 | char *true_path = FindPathToBinary("true" ); |
331 | EXPECT_NE((char*)0, internal_strstr(true_path, "/bin/true" )); |
332 | InternalFree(true_path); |
333 | EXPECT_EQ(0, FindPathToBinary("unexisting_binary.ergjeorj" )); |
334 | } |
335 | #elif SANITIZER_WINDOWS |
336 | TEST(SanitizerCommon, FindPathToBinary) { |
337 | // ntdll.dll should be on PATH in all supported test environments on all |
338 | // supported Windows versions. |
339 | char *ntdll_path = FindPathToBinary("ntdll.dll" ); |
340 | EXPECT_NE((char*)0, internal_strstr(ntdll_path, "ntdll.dll" )); |
341 | InternalFree(ntdll_path); |
342 | EXPECT_EQ(0, FindPathToBinary("unexisting_binary.ergjeorj" )); |
343 | } |
344 | #endif |
345 | |
346 | TEST(SanitizerCommon, StripPathPrefix) { |
347 | EXPECT_EQ(0, StripPathPrefix(0, "prefix" )); |
348 | EXPECT_STREQ("foo" , StripPathPrefix("foo" , 0)); |
349 | EXPECT_STREQ("dir/file.cc" , |
350 | StripPathPrefix("/usr/lib/dir/file.cc" , "/usr/lib/" )); |
351 | EXPECT_STREQ("/file.cc" , StripPathPrefix("/usr/myroot/file.cc" , "/myroot" )); |
352 | EXPECT_STREQ("file.h" , StripPathPrefix("/usr/lib/./file.h" , "/usr/lib/" )); |
353 | } |
354 | |
355 | TEST(SanitizerCommon, RemoveANSIEscapeSequencesFromString) { |
356 | RemoveANSIEscapeSequencesFromString(nullptr); |
357 | const char *buffs[22] = { |
358 | "Default" , "Default" , |
359 | "\033[95mLight magenta" , "Light magenta" , |
360 | "\033[30mBlack\033[32mGreen\033[90mGray" , "BlackGreenGray" , |
361 | "\033[106mLight cyan \033[107mWhite " , "Light cyan White " , |
362 | "\033[31mHello\033[0m World" , "Hello World" , |
363 | "\033[38;5;82mHello \033[38;5;198mWorld" , "Hello World" , |
364 | "123[653456789012" , "123[653456789012" , |
365 | "Normal \033[5mBlink \033[25mNormal" , "Normal Blink Normal" , |
366 | "\033[106m\033[107m" , "" , |
367 | "" , "" , |
368 | " " , " " , |
369 | }; |
370 | |
371 | for (size_t i = 0; i < ARRAY_SIZE(buffs); i+=2) { |
372 | char *buffer_copy = internal_strdup(buffs[i]); |
373 | RemoveANSIEscapeSequencesFromString(buffer_copy); |
374 | EXPECT_STREQ(buffer_copy, buffs[i+1]); |
375 | InternalFree(buffer_copy); |
376 | } |
377 | } |
378 | |
379 | TEST(SanitizerCommon, InternalScopedStringAppend) { |
380 | InternalScopedString str; |
381 | EXPECT_EQ(0U, str.length()); |
382 | EXPECT_STREQ("" , str.data()); |
383 | |
384 | str.Append(str: "" ); |
385 | EXPECT_EQ(0U, str.length()); |
386 | EXPECT_STREQ("" , str.data()); |
387 | |
388 | str.Append(str: "foo" ); |
389 | EXPECT_EQ(3U, str.length()); |
390 | EXPECT_STREQ("foo" , str.data()); |
391 | |
392 | str.Append(str: "" ); |
393 | EXPECT_EQ(3U, str.length()); |
394 | EXPECT_STREQ("foo" , str.data()); |
395 | |
396 | str.Append(str: "123\000456" ); |
397 | EXPECT_EQ(6U, str.length()); |
398 | EXPECT_STREQ("foo123" , str.data()); |
399 | } |
400 | |
401 | TEST(SanitizerCommon, InternalScopedStringAppendF) { |
402 | InternalScopedString str; |
403 | EXPECT_EQ(0U, str.length()); |
404 | EXPECT_STREQ("" , str.data()); |
405 | |
406 | str.AppendF(format: "foo" ); |
407 | EXPECT_EQ(3U, str.length()); |
408 | EXPECT_STREQ("foo" , str.data()); |
409 | |
410 | int x = 1234; |
411 | str.AppendF(format: "%d" , x); |
412 | EXPECT_EQ(7U, str.length()); |
413 | EXPECT_STREQ("foo1234" , str.data()); |
414 | |
415 | str.AppendF(format: "%d" , x); |
416 | EXPECT_EQ(11U, str.length()); |
417 | EXPECT_STREQ("foo12341234" , str.data()); |
418 | |
419 | str.clear(); |
420 | EXPECT_EQ(0U, str.length()); |
421 | EXPECT_STREQ("" , str.data()); |
422 | } |
423 | |
424 | TEST(SanitizerCommon, InternalScopedStringLarge) { |
425 | InternalScopedString str; |
426 | std::string expected; |
427 | for (int i = 0; i < 1000; ++i) { |
428 | std::string append(i, 'a' + i % 26); |
429 | expected += append; |
430 | str.AppendF(format: "%s" , append.c_str()); |
431 | EXPECT_EQ(expected, str.data()); |
432 | } |
433 | } |
434 | |
435 | TEST(SanitizerCommon, InternalScopedStringLargeFormat) { |
436 | InternalScopedString str; |
437 | std::string expected; |
438 | for (int i = 0; i < 1000; ++i) { |
439 | std::string append(i, 'a' + i % 26); |
440 | expected += append; |
441 | str.AppendF(format: "%s" , append.c_str()); |
442 | EXPECT_EQ(expected, str.data()); |
443 | } |
444 | } |
445 | |
446 | #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_IOS |
447 | TEST(SanitizerCommon, GetRandom) { |
448 | u8 buffer_1[32], buffer_2[32]; |
449 | for (bool blocking : { false, true }) { |
450 | EXPECT_FALSE(GetRandom(nullptr, 32, blocking)); |
451 | EXPECT_FALSE(GetRandom(buffer_1, 0, blocking)); |
452 | EXPECT_FALSE(GetRandom(buffer_1, 512, blocking)); |
453 | EXPECT_EQ(ARRAY_SIZE(buffer_1), ARRAY_SIZE(buffer_2)); |
454 | for (uptr size = 4; size <= ARRAY_SIZE(buffer_1); size += 4) { |
455 | for (uptr i = 0; i < 100; i++) { |
456 | EXPECT_TRUE(GetRandom(buffer_1, size, blocking)); |
457 | EXPECT_TRUE(GetRandom(buffer_2, size, blocking)); |
458 | EXPECT_NE(internal_memcmp(buffer_1, buffer_2, size), 0); |
459 | } |
460 | } |
461 | } |
462 | } |
463 | #endif |
464 | |
465 | TEST(SanitizerCommon, ReservedAddressRangeInit) { |
466 | uptr init_size = 0xffff; |
467 | ReservedAddressRange address_range; |
468 | uptr res = address_range.Init(size: init_size); |
469 | CHECK_NE(res, (void*)-1); |
470 | UnmapOrDie(addr: (void*)res, size: init_size); |
471 | // Should be able to map into the same space now. |
472 | ReservedAddressRange address_range2; |
473 | uptr res2 = address_range2.Init(size: init_size, name: nullptr, fixed_addr: res); |
474 | CHECK_EQ(res, res2); |
475 | |
476 | // TODO(flowerhack): Once this is switched to the "real" implementation |
477 | // (rather than passing through to MmapNoAccess*), enforce and test "no |
478 | // double initializations allowed" |
479 | } |
480 | |
481 | TEST(SanitizerCommon, ReservedAddressRangeMap) { |
482 | constexpr uptr init_size = 0xffff; |
483 | ReservedAddressRange address_range; |
484 | uptr res = address_range.Init(size: init_size); |
485 | CHECK_NE(res, (void*) -1); |
486 | |
487 | // Valid mappings should succeed. |
488 | CHECK_EQ(res, address_range.Map(res, init_size)); |
489 | |
490 | // Valid mappings should be readable. |
491 | unsigned char buffer[init_size]; |
492 | memcpy(buffer, reinterpret_cast<void *>(res), init_size); |
493 | |
494 | // TODO(flowerhack): Once this is switched to the "real" implementation, make |
495 | // sure you can only mmap into offsets in the Init range. |
496 | } |
497 | |
498 | TEST(SanitizerCommon, ReservedAddressRangeUnmap) { |
499 | uptr PageSize = GetPageSizeCached(); |
500 | uptr init_size = PageSize * 8; |
501 | ReservedAddressRange address_range; |
502 | uptr base_addr = address_range.Init(size: init_size); |
503 | CHECK_NE(base_addr, (void*)-1); |
504 | CHECK_EQ(base_addr, address_range.Map(base_addr, init_size)); |
505 | |
506 | // Unmapping the entire range should succeed. |
507 | address_range.Unmap(addr: base_addr, size: init_size); |
508 | |
509 | // Map a new range. |
510 | base_addr = address_range.Init(size: init_size); |
511 | CHECK_EQ(base_addr, address_range.Map(base_addr, init_size)); |
512 | |
513 | // Windows doesn't allow partial unmappings. |
514 | #if !SANITIZER_WINDOWS |
515 | |
516 | // Unmapping at the beginning should succeed. |
517 | address_range.Unmap(addr: base_addr, size: PageSize); |
518 | |
519 | // Unmapping at the end should succeed. |
520 | uptr new_start = reinterpret_cast<uptr>(address_range.base()) + |
521 | address_range.size() - PageSize; |
522 | address_range.Unmap(addr: new_start, size: PageSize); |
523 | |
524 | #endif |
525 | |
526 | // Unmapping in the middle of the ReservedAddressRange should fail. |
527 | EXPECT_DEATH(address_range.Unmap(addr: base_addr + (PageSize * 2), size: PageSize), ".*" ); |
528 | } |
529 | |
530 | TEST(SanitizerCommon, ReadBinaryNameCached) { |
531 | char buf[256]; |
532 | EXPECT_NE((uptr)0, ReadBinaryNameCached(buf, sizeof(buf))); |
533 | } |
534 | |
535 | } // namespace __sanitizer |
536 | |