1 | //===-- sanitizer_win.cpp -------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is shared between AddressSanitizer and ThreadSanitizer |
10 | // run-time libraries and implements windows-specific functions from |
11 | // sanitizer_libc.h. |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "sanitizer_platform.h" |
15 | #if SANITIZER_WINDOWS |
16 | |
17 | #define WIN32_LEAN_AND_MEAN |
18 | #define NOGDI |
19 | #include <windows.h> |
20 | #include <io.h> |
21 | #include <psapi.h> |
22 | #include <stdlib.h> |
23 | |
24 | #include "sanitizer_common.h" |
25 | #include "sanitizer_file.h" |
26 | #include "sanitizer_libc.h" |
27 | #include "sanitizer_mutex.h" |
28 | #include "sanitizer_placement_new.h" |
29 | #include "sanitizer_win_defs.h" |
30 | |
31 | #if defined(PSAPI_VERSION) && PSAPI_VERSION == 1 |
32 | #pragma comment(lib, "psapi") |
33 | #endif |
34 | #if SANITIZER_WIN_TRACE |
35 | #include <traceloggingprovider.h> |
36 | // Windows trace logging provider init |
37 | #pragma comment(lib, "advapi32.lib") |
38 | TRACELOGGING_DECLARE_PROVIDER(g_asan_provider); |
39 | // GUID must be the same in utils/AddressSanitizerLoggingProvider.wprp |
40 | TRACELOGGING_DEFINE_PROVIDER(g_asan_provider, "AddressSanitizerLoggingProvider" , |
41 | (0x6c6c766d, 0x3846, 0x4e6a, 0xa4, 0xfb, 0x5b, |
42 | 0x53, 0x0b, 0xd0, 0xf3, 0xfa)); |
43 | #else |
44 | #define TraceLoggingUnregister(x) |
45 | #endif |
46 | |
47 | // For WaitOnAddress |
48 | # pragma comment(lib, "synchronization.lib") |
49 | |
50 | // A macro to tell the compiler that this part of the code cannot be reached, |
51 | // if the compiler supports this feature. Since we're using this in |
52 | // code that is called when terminating the process, the expansion of the |
53 | // macro should not terminate the process to avoid infinite recursion. |
54 | #if defined(__clang__) |
55 | # define BUILTIN_UNREACHABLE() __builtin_unreachable() |
56 | #elif defined(__GNUC__) && \ |
57 | (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)) |
58 | # define BUILTIN_UNREACHABLE() __builtin_unreachable() |
59 | #elif defined(_MSC_VER) |
60 | # define BUILTIN_UNREACHABLE() __assume(0) |
61 | #else |
62 | # define BUILTIN_UNREACHABLE() |
63 | #endif |
64 | |
65 | namespace __sanitizer { |
66 | |
67 | #include "sanitizer_syscall_generic.inc" |
68 | |
69 | // --------------------- sanitizer_common.h |
70 | uptr GetPageSize() { |
71 | SYSTEM_INFO si; |
72 | GetSystemInfo(&si); |
73 | return si.dwPageSize; |
74 | } |
75 | |
76 | uptr GetMmapGranularity() { |
77 | SYSTEM_INFO si; |
78 | GetSystemInfo(&si); |
79 | return si.dwAllocationGranularity; |
80 | } |
81 | |
82 | uptr GetMaxUserVirtualAddress() { |
83 | SYSTEM_INFO si; |
84 | GetSystemInfo(&si); |
85 | return (uptr)si.lpMaximumApplicationAddress; |
86 | } |
87 | |
88 | uptr GetMaxVirtualAddress() { |
89 | return GetMaxUserVirtualAddress(); |
90 | } |
91 | |
92 | bool FileExists(const char *filename) { |
93 | return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES; |
94 | } |
95 | |
96 | bool DirExists(const char *path) { |
97 | auto attr = ::GetFileAttributesA(path); |
98 | return (attr != INVALID_FILE_ATTRIBUTES) && (attr & FILE_ATTRIBUTE_DIRECTORY); |
99 | } |
100 | |
101 | uptr internal_getpid() { |
102 | return GetProcessId(GetCurrentProcess()); |
103 | } |
104 | |
105 | int internal_dlinfo(void *handle, int request, void *p) { |
106 | UNIMPLEMENTED(); |
107 | } |
108 | |
109 | // In contrast to POSIX, on Windows GetCurrentThreadId() |
110 | // returns a system-unique identifier. |
111 | tid_t GetTid() { |
112 | return GetCurrentThreadId(); |
113 | } |
114 | |
115 | uptr GetThreadSelf() { |
116 | return GetTid(); |
117 | } |
118 | |
119 | #if !SANITIZER_GO |
120 | void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, |
121 | uptr *stack_bottom) { |
122 | CHECK(stack_top); |
123 | CHECK(stack_bottom); |
124 | MEMORY_BASIC_INFORMATION mbi; |
125 | CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0); |
126 | // FIXME: is it possible for the stack to not be a single allocation? |
127 | // Are these values what ASan expects to get (reserved, not committed; |
128 | // including stack guard page) ? |
129 | *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize; |
130 | *stack_bottom = (uptr)mbi.AllocationBase; |
131 | } |
132 | #endif // #if !SANITIZER_GO |
133 | |
134 | bool ErrorIsOOM(error_t err) { |
135 | // TODO: This should check which `err`s correspond to OOM. |
136 | return false; |
137 | } |
138 | |
139 | void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { |
140 | void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); |
141 | if (rv == 0) |
142 | ReportMmapFailureAndDie(size, mem_type, "allocate" , |
143 | GetLastError(), raw_report); |
144 | return rv; |
145 | } |
146 | |
147 | void UnmapOrDie(void *addr, uptr size, bool raw_report) { |
148 | if (!size || !addr) |
149 | return; |
150 | |
151 | MEMORY_BASIC_INFORMATION mbi; |
152 | CHECK(VirtualQuery(addr, &mbi, sizeof(mbi))); |
153 | |
154 | // MEM_RELEASE can only be used to unmap whole regions previously mapped with |
155 | // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that |
156 | // fails try MEM_DECOMMIT. |
157 | if (VirtualFree(addr, 0, MEM_RELEASE) == 0) { |
158 | if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) { |
159 | ReportMunmapFailureAndDie(addr, size, GetLastError(), raw_report); |
160 | } |
161 | } |
162 | } |
163 | |
164 | static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type, |
165 | const char *mmap_type) { |
166 | error_t last_error = GetLastError(); |
167 | |
168 | // Assumption: VirtualAlloc is the last system call that was invoked before |
169 | // this method. |
170 | // VirtualAlloc emits one of 3 error codes when running out of memory |
171 | // 1. ERROR_NOT_ENOUGH_MEMORY: |
172 | // There's not enough memory to execute the command |
173 | // 2. ERROR_INVALID_PARAMETER: |
174 | // VirtualAlloc will return this if the request would allocate memory at an |
175 | // address exceeding or being very close to the maximum application address |
176 | // (the `lpMaximumApplicationAddress` field within the `SystemInfo` struct). |
177 | // This does not seem to be officially documented, but is corroborated here: |
178 | // https://stackoverflow.com/questions/45833674/why-does-virtualalloc-fail-for-lpaddress-greater-than-0x6ffffffffff |
179 | // 3. ERROR_COMMITMENT_LIMIT: |
180 | // VirtualAlloc will return this if e.g. the pagefile is too small to commit |
181 | // the requested amount of memory. |
182 | if (last_error == ERROR_NOT_ENOUGH_MEMORY || |
183 | last_error == ERROR_INVALID_PARAMETER || |
184 | last_error == ERROR_COMMITMENT_LIMIT) |
185 | return nullptr; |
186 | ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error); |
187 | } |
188 | |
189 | void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { |
190 | void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); |
191 | if (rv == 0) |
192 | return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate" ); |
193 | return rv; |
194 | } |
195 | |
196 | // We want to map a chunk of address space aligned to 'alignment'. |
197 | void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, |
198 | const char *mem_type) { |
199 | CHECK(IsPowerOfTwo(size)); |
200 | CHECK(IsPowerOfTwo(alignment)); |
201 | |
202 | // Windows will align our allocations to at least 64K. |
203 | alignment = Max(alignment, GetMmapGranularity()); |
204 | |
205 | uptr mapped_addr = |
206 | (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); |
207 | if (!mapped_addr) |
208 | return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned" ); |
209 | |
210 | // If we got it right on the first try, return. Otherwise, unmap it and go to |
211 | // the slow path. |
212 | if (IsAligned(mapped_addr, alignment)) |
213 | return (void*)mapped_addr; |
214 | if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0) |
215 | ReportMmapFailureAndDie(size, mem_type, "deallocate" , GetLastError()); |
216 | |
217 | // If we didn't get an aligned address, overallocate, find an aligned address, |
218 | // unmap, and try to allocate at that aligned address. |
219 | int retries = 0; |
220 | const int kMaxRetries = 10; |
221 | for (; retries < kMaxRetries && |
222 | (mapped_addr == 0 || !IsAligned(mapped_addr, alignment)); |
223 | retries++) { |
224 | // Overallocate size + alignment bytes. |
225 | mapped_addr = |
226 | (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS); |
227 | if (!mapped_addr) |
228 | return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned" ); |
229 | |
230 | // Find the aligned address. |
231 | uptr aligned_addr = RoundUpTo(mapped_addr, alignment); |
232 | |
233 | // Free the overallocation. |
234 | if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0) |
235 | ReportMmapFailureAndDie(size, mem_type, "deallocate" , GetLastError()); |
236 | |
237 | // Attempt to allocate exactly the number of bytes we need at the aligned |
238 | // address. This may fail for a number of reasons, in which case we continue |
239 | // the loop. |
240 | mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size, |
241 | MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); |
242 | } |
243 | |
244 | // Fail if we can't make this work quickly. |
245 | if (retries == kMaxRetries && mapped_addr == 0) |
246 | return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned" ); |
247 | |
248 | return (void *)mapped_addr; |
249 | } |
250 | |
251 | // ZeroMmapFixedRegion zero's out a region of memory previously returned from a |
252 | // call to one of the MmapFixed* helpers. On non-windows systems this would be |
253 | // done with another mmap, but on windows remapping is not an option. |
254 | // VirtualFree(DECOMMIT)+VirtualAlloc(RECOMMIT) would also be a way to zero the |
255 | // memory, but we can't do this atomically, so instead we fall back to using |
256 | // internal_memset. |
257 | bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) { |
258 | internal_memset((void*) fixed_addr, 0, size); |
259 | return true; |
260 | } |
261 | |
262 | bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { |
263 | // FIXME: is this really "NoReserve"? On Win32 this does not matter much, |
264 | // but on Win64 it does. |
265 | (void)name; // unsupported |
266 | #if !SANITIZER_GO && SANITIZER_WINDOWS64 |
267 | // On asan/Windows64, use MEM_COMMIT would result in error |
268 | // 1455:ERROR_COMMITMENT_LIMIT. |
269 | // Asan uses exception handler to commit page on demand. |
270 | void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE); |
271 | #else |
272 | void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT, |
273 | PAGE_READWRITE); |
274 | #endif |
275 | if (p == 0) { |
276 | Report("ERROR: %s failed to " |
277 | "allocate %p (%zd) bytes at %p (error code: %d)\n" , |
278 | SanitizerToolName, size, size, fixed_addr, GetLastError()); |
279 | return false; |
280 | } |
281 | return true; |
282 | } |
283 | |
284 | bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size, const char *name) { |
285 | // FIXME: Windows support large pages too. Might be worth checking |
286 | return MmapFixedNoReserve(fixed_addr, size, name); |
287 | } |
288 | |
289 | // Memory space mapped by 'MmapFixedOrDie' must have been reserved by |
290 | // 'MmapFixedNoAccess'. |
291 | void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name) { |
292 | void *p = VirtualAlloc((LPVOID)fixed_addr, size, |
293 | MEM_COMMIT, PAGE_READWRITE); |
294 | if (p == 0) { |
295 | char mem_type[30]; |
296 | internal_snprintf(mem_type, sizeof(mem_type), "memory at address %p" , |
297 | (void *)fixed_addr); |
298 | ReportMmapFailureAndDie(size, mem_type, "allocate" , GetLastError()); |
299 | } |
300 | return p; |
301 | } |
302 | |
303 | // Uses fixed_addr for now. |
304 | // Will use offset instead once we've implemented this function for real. |
305 | uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size, const char *name) { |
306 | return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size)); |
307 | } |
308 | |
309 | uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size, |
310 | const char *name) { |
311 | return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size)); |
312 | } |
313 | |
314 | void ReservedAddressRange::Unmap(uptr addr, uptr size) { |
315 | // Only unmap if it covers the entire range. |
316 | CHECK((addr == reinterpret_cast<uptr>(base_)) && (size == size_)); |
317 | // We unmap the whole range, just null out the base. |
318 | base_ = nullptr; |
319 | size_ = 0; |
320 | UnmapOrDie(reinterpret_cast<void*>(addr), size); |
321 | } |
322 | |
323 | void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, const char *name) { |
324 | void *p = VirtualAlloc((LPVOID)fixed_addr, size, |
325 | MEM_COMMIT, PAGE_READWRITE); |
326 | if (p == 0) { |
327 | char mem_type[30]; |
328 | internal_snprintf(mem_type, sizeof(mem_type), "memory at address %p" , |
329 | (void *)fixed_addr); |
330 | return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate" ); |
331 | } |
332 | return p; |
333 | } |
334 | |
335 | void *MmapNoReserveOrDie(uptr size, const char *mem_type) { |
336 | // FIXME: make this really NoReserve? |
337 | return MmapOrDie(size, mem_type); |
338 | } |
339 | |
340 | uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) { |
341 | base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size) : MmapNoAccess(size); |
342 | size_ = size; |
343 | name_ = name; |
344 | (void)os_handle_; // unsupported |
345 | return reinterpret_cast<uptr>(base_); |
346 | } |
347 | |
348 | |
349 | void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) { |
350 | (void)name; // unsupported |
351 | void *res = VirtualAlloc((LPVOID)fixed_addr, size, |
352 | MEM_RESERVE, PAGE_NOACCESS); |
353 | if (res == 0) |
354 | Report("WARNING: %s failed to " |
355 | "mprotect %p (%zd) bytes at %p (error code: %d)\n" , |
356 | SanitizerToolName, size, size, fixed_addr, GetLastError()); |
357 | return res; |
358 | } |
359 | |
360 | void *MmapNoAccess(uptr size) { |
361 | void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS); |
362 | if (res == 0) |
363 | Report("WARNING: %s failed to " |
364 | "mprotect %p (%zd) bytes (error code: %d)\n" , |
365 | SanitizerToolName, size, size, GetLastError()); |
366 | return res; |
367 | } |
368 | |
369 | bool MprotectNoAccess(uptr addr, uptr size) { |
370 | DWORD old_protection; |
371 | return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection); |
372 | } |
373 | |
374 | bool MprotectReadOnly(uptr addr, uptr size) { |
375 | DWORD old_protection; |
376 | return VirtualProtect((LPVOID)addr, size, PAGE_READONLY, &old_protection); |
377 | } |
378 | |
379 | bool MprotectReadWrite(uptr addr, uptr size) { |
380 | DWORD old_protection; |
381 | return VirtualProtect((LPVOID)addr, size, PAGE_READWRITE, &old_protection); |
382 | } |
383 | |
384 | void ReleaseMemoryPagesToOS(uptr beg, uptr end) { |
385 | uptr beg_aligned = RoundDownTo(beg, GetPageSizeCached()), |
386 | end_aligned = RoundDownTo(end, GetPageSizeCached()); |
387 | CHECK(beg < end); // make sure the region is sane |
388 | if (beg_aligned == end_aligned) // make sure we're freeing at least 1 page; |
389 | return; |
390 | UnmapOrDie((void *)beg, end_aligned - beg_aligned); |
391 | } |
392 | |
393 | void SetShadowRegionHugePageMode(uptr addr, uptr size) { |
394 | // FIXME: probably similar to ReleaseMemoryToOS. |
395 | } |
396 | |
397 | bool DontDumpShadowMemory(uptr addr, uptr length) { |
398 | // This is almost useless on 32-bits. |
399 | // FIXME: add madvise-analog when we move to 64-bits. |
400 | return true; |
401 | } |
402 | |
403 | uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, |
404 | uptr min_shadow_base_alignment, UNUSED uptr &high_mem_end, |
405 | uptr granularity) { |
406 | const uptr alignment = |
407 | Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment); |
408 | const uptr left_padding = |
409 | Max<uptr>(granularity, 1ULL << min_shadow_base_alignment); |
410 | uptr space_size = shadow_size_bytes + left_padding; |
411 | uptr shadow_start = FindAvailableMemoryRange(space_size, alignment, |
412 | granularity, nullptr, nullptr); |
413 | CHECK_NE((uptr)0, shadow_start); |
414 | CHECK(IsAligned(shadow_start, alignment)); |
415 | return shadow_start; |
416 | } |
417 | |
418 | uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, |
419 | uptr *largest_gap_found, |
420 | uptr *max_occupied_addr) { |
421 | uptr address = 0; |
422 | while (true) { |
423 | MEMORY_BASIC_INFORMATION info; |
424 | if (!::VirtualQuery((void*)address, &info, sizeof(info))) |
425 | return 0; |
426 | |
427 | if (info.State == MEM_FREE) { |
428 | uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding, |
429 | alignment); |
430 | if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize) |
431 | return shadow_address; |
432 | } |
433 | |
434 | // Move to the next region. |
435 | address = (uptr)info.BaseAddress + info.RegionSize; |
436 | } |
437 | return 0; |
438 | } |
439 | |
440 | uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size, |
441 | uptr num_aliases, uptr ring_buffer_size) { |
442 | CHECK(false && "HWASan aliasing is unimplemented on Windows" ); |
443 | return 0; |
444 | } |
445 | |
446 | bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { |
447 | MEMORY_BASIC_INFORMATION mbi; |
448 | CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi))); |
449 | return mbi.Protect == PAGE_NOACCESS && |
450 | (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end; |
451 | } |
452 | |
453 | void *MapFileToMemory(const char *file_name, uptr *buff_size) { |
454 | UNIMPLEMENTED(); |
455 | } |
456 | |
457 | void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) { |
458 | UNIMPLEMENTED(); |
459 | } |
460 | |
461 | static const int kMaxEnvNameLength = 128; |
462 | static const DWORD kMaxEnvValueLength = 32767; |
463 | |
464 | namespace { |
465 | |
466 | struct EnvVariable { |
467 | char name[kMaxEnvNameLength]; |
468 | char value[kMaxEnvValueLength]; |
469 | }; |
470 | |
471 | } // namespace |
472 | |
473 | static const int kEnvVariables = 5; |
474 | static EnvVariable env_vars[kEnvVariables]; |
475 | static int num_env_vars; |
476 | |
477 | const char *GetEnv(const char *name) { |
478 | // Note: this implementation caches the values of the environment variables |
479 | // and limits their quantity. |
480 | for (int i = 0; i < num_env_vars; i++) { |
481 | if (0 == internal_strcmp(name, env_vars[i].name)) |
482 | return env_vars[i].value; |
483 | } |
484 | CHECK_LT(num_env_vars, kEnvVariables); |
485 | DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value, |
486 | kMaxEnvValueLength); |
487 | if (rv > 0 && rv < kMaxEnvValueLength) { |
488 | CHECK_LT(internal_strlen(name), kMaxEnvNameLength); |
489 | internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength); |
490 | num_env_vars++; |
491 | return env_vars[num_env_vars - 1].value; |
492 | } |
493 | return 0; |
494 | } |
495 | |
496 | const char *GetPwd() { |
497 | UNIMPLEMENTED(); |
498 | } |
499 | |
500 | u32 GetUid() { |
501 | UNIMPLEMENTED(); |
502 | } |
503 | |
504 | namespace { |
505 | struct ModuleInfo { |
506 | const char *filepath; |
507 | uptr base_address; |
508 | uptr end_address; |
509 | }; |
510 | |
511 | #if !SANITIZER_GO |
512 | int CompareModulesBase(const void *pl, const void *pr) { |
513 | const ModuleInfo *l = (const ModuleInfo *)pl, *r = (const ModuleInfo *)pr; |
514 | if (l->base_address < r->base_address) |
515 | return -1; |
516 | return l->base_address > r->base_address; |
517 | } |
518 | #endif |
519 | } // namespace |
520 | |
521 | #if !SANITIZER_GO |
522 | void DumpProcessMap() { |
523 | Report("Dumping process modules:\n" ); |
524 | ListOfModules modules; |
525 | modules.init(); |
526 | uptr num_modules = modules.size(); |
527 | |
528 | InternalMmapVector<ModuleInfo> module_infos(num_modules); |
529 | for (size_t i = 0; i < num_modules; ++i) { |
530 | module_infos[i].filepath = modules[i].full_name(); |
531 | module_infos[i].base_address = modules[i].ranges().front()->beg; |
532 | module_infos[i].end_address = modules[i].ranges().back()->end; |
533 | } |
534 | qsort(module_infos.data(), num_modules, sizeof(ModuleInfo), |
535 | CompareModulesBase); |
536 | |
537 | for (size_t i = 0; i < num_modules; ++i) { |
538 | const ModuleInfo &mi = module_infos[i]; |
539 | if (mi.end_address != 0) { |
540 | Printf("\t%p-%p %s\n" , mi.base_address, mi.end_address, |
541 | mi.filepath[0] ? mi.filepath : "[no name]" ); |
542 | } else if (mi.filepath[0]) { |
543 | Printf("\t??\?-??? %s\n" , mi.filepath); |
544 | } else { |
545 | Printf("\t???\n" ); |
546 | } |
547 | } |
548 | } |
549 | #endif |
550 | |
551 | void DisableCoreDumperIfNecessary() { |
552 | // Do nothing. |
553 | } |
554 | |
555 | void ReExec() { |
556 | UNIMPLEMENTED(); |
557 | } |
558 | |
559 | void PlatformPrepareForSandboxing(void *args) {} |
560 | |
561 | bool StackSizeIsUnlimited() { |
562 | UNIMPLEMENTED(); |
563 | } |
564 | |
565 | void SetStackSizeLimitInBytes(uptr limit) { |
566 | UNIMPLEMENTED(); |
567 | } |
568 | |
569 | bool AddressSpaceIsUnlimited() { |
570 | UNIMPLEMENTED(); |
571 | } |
572 | |
573 | void SetAddressSpaceUnlimited() { |
574 | UNIMPLEMENTED(); |
575 | } |
576 | |
577 | bool IsPathSeparator(const char c) { |
578 | return c == '\\' || c == '/'; |
579 | } |
580 | |
581 | static bool IsAlpha(char c) { |
582 | c = ToLower(c); |
583 | return c >= 'a' && c <= 'z'; |
584 | } |
585 | |
586 | bool IsAbsolutePath(const char *path) { |
587 | return path != nullptr && IsAlpha(path[0]) && path[1] == ':' && |
588 | IsPathSeparator(path[2]); |
589 | } |
590 | |
591 | void internal_usleep(u64 useconds) { Sleep(useconds / 1000); } |
592 | |
593 | u64 NanoTime() { |
594 | static LARGE_INTEGER frequency = {}; |
595 | LARGE_INTEGER counter; |
596 | if (UNLIKELY(frequency.QuadPart == 0)) { |
597 | QueryPerformanceFrequency(&frequency); |
598 | CHECK_NE(frequency.QuadPart, 0); |
599 | } |
600 | QueryPerformanceCounter(&counter); |
601 | counter.QuadPart *= 1000ULL * 1000000ULL; |
602 | counter.QuadPart /= frequency.QuadPart; |
603 | return counter.QuadPart; |
604 | } |
605 | |
606 | u64 MonotonicNanoTime() { return NanoTime(); } |
607 | |
608 | void Abort() { |
609 | internal__exit(3); |
610 | } |
611 | |
612 | bool CreateDir(const char *pathname) { |
613 | return CreateDirectoryA(pathname, nullptr) != 0; |
614 | } |
615 | |
616 | #if !SANITIZER_GO |
617 | // Read the file to extract the ImageBase field from the PE header. If ASLR is |
618 | // disabled and this virtual address is available, the loader will typically |
619 | // load the image at this address. Therefore, we call it the preferred base. Any |
620 | // addresses in the DWARF typically assume that the object has been loaded at |
621 | // this address. |
622 | static uptr GetPreferredBase(const char *modname, char *buf, size_t buf_size) { |
623 | fd_t fd = OpenFile(modname, RdOnly, nullptr); |
624 | if (fd == kInvalidFd) |
625 | return 0; |
626 | FileCloser closer(fd); |
627 | |
628 | // Read just the DOS header. |
629 | IMAGE_DOS_HEADER dos_header; |
630 | uptr bytes_read; |
631 | if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) || |
632 | bytes_read != sizeof(dos_header)) |
633 | return 0; |
634 | |
635 | // The file should start with the right signature. |
636 | if (dos_header.e_magic != IMAGE_DOS_SIGNATURE) |
637 | return 0; |
638 | |
639 | // The layout at e_lfanew is: |
640 | // "PE\0\0" |
641 | // IMAGE_FILE_HEADER |
642 | // IMAGE_OPTIONAL_HEADER |
643 | // Seek to e_lfanew and read all that data. |
644 | if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) == |
645 | INVALID_SET_FILE_POINTER) |
646 | return 0; |
647 | if (!ReadFromFile(fd, buf, buf_size, &bytes_read) || bytes_read != buf_size) |
648 | return 0; |
649 | |
650 | // Check for "PE\0\0" before the PE header. |
651 | char *pe_sig = &buf[0]; |
652 | if (internal_memcmp(pe_sig, "PE\0\0" , 4) != 0) |
653 | return 0; |
654 | |
655 | // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted. |
656 | IMAGE_OPTIONAL_HEADER *pe_header = |
657 | (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER)); |
658 | |
659 | // Check for more magic in the PE header. |
660 | if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC) |
661 | return 0; |
662 | |
663 | // Finally, return the ImageBase. |
664 | return (uptr)pe_header->ImageBase; |
665 | } |
666 | |
667 | void ListOfModules::init() { |
668 | clearOrInit(); |
669 | HANDLE cur_process = GetCurrentProcess(); |
670 | |
671 | // Query the list of modules. Start by assuming there are no more than 256 |
672 | // modules and retry if that's not sufficient. |
673 | HMODULE *hmodules = 0; |
674 | uptr modules_buffer_size = sizeof(HMODULE) * 256; |
675 | DWORD bytes_required; |
676 | while (!hmodules) { |
677 | hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__); |
678 | CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size, |
679 | &bytes_required)); |
680 | if (bytes_required > modules_buffer_size) { |
681 | // Either there turned out to be more than 256 hmodules, or new hmodules |
682 | // could have loaded since the last try. Retry. |
683 | UnmapOrDie(hmodules, modules_buffer_size); |
684 | hmodules = 0; |
685 | modules_buffer_size = bytes_required; |
686 | } |
687 | } |
688 | |
689 | InternalMmapVector<char> buf(4 + sizeof(IMAGE_FILE_HEADER) + |
690 | sizeof(IMAGE_OPTIONAL_HEADER)); |
691 | InternalMmapVector<wchar_t> modname_utf16(kMaxPathLength); |
692 | InternalMmapVector<char> module_name(kMaxPathLength); |
693 | // |num_modules| is the number of modules actually present, |
694 | size_t num_modules = bytes_required / sizeof(HMODULE); |
695 | for (size_t i = 0; i < num_modules; ++i) { |
696 | HMODULE handle = hmodules[i]; |
697 | MODULEINFO mi; |
698 | if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi))) |
699 | continue; |
700 | |
701 | // Get the UTF-16 path and convert to UTF-8. |
702 | int modname_utf16_len = |
703 | GetModuleFileNameW(handle, &modname_utf16[0], kMaxPathLength); |
704 | if (modname_utf16_len == 0) |
705 | modname_utf16[0] = '\0'; |
706 | int module_name_len = ::WideCharToMultiByte( |
707 | CP_UTF8, 0, &modname_utf16[0], modname_utf16_len + 1, &module_name[0], |
708 | kMaxPathLength, NULL, NULL); |
709 | module_name[module_name_len] = '\0'; |
710 | |
711 | uptr base_address = (uptr)mi.lpBaseOfDll; |
712 | uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage; |
713 | |
714 | // Adjust the base address of the module so that we get a VA instead of an |
715 | // RVA when computing the module offset. This helps llvm-symbolizer find the |
716 | // right DWARF CU. In the common case that the image is loaded at it's |
717 | // preferred address, we will now print normal virtual addresses. |
718 | uptr preferred_base = |
719 | GetPreferredBase(&module_name[0], &buf[0], buf.size()); |
720 | uptr adjusted_base = base_address - preferred_base; |
721 | |
722 | modules_.push_back(LoadedModule()); |
723 | LoadedModule &cur_module = modules_.back(); |
724 | cur_module.set(&module_name[0], adjusted_base); |
725 | // We add the whole module as one single address range. |
726 | cur_module.addAddressRange(base_address, end_address, /*executable*/ true, |
727 | /*writable*/ true); |
728 | } |
729 | UnmapOrDie(hmodules, modules_buffer_size); |
730 | } |
731 | |
732 | void ListOfModules::fallbackInit() { clear(); } |
733 | |
734 | // We can't use atexit() directly at __asan_init time as the CRT is not fully |
735 | // initialized at this point. Place the functions into a vector and use |
736 | // atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers). |
737 | InternalMmapVectorNoCtor<void (*)(void)> atexit_functions; |
738 | |
739 | static int queueAtexit(void (*function)(void)) { |
740 | atexit_functions.push_back(function); |
741 | return 0; |
742 | } |
743 | |
744 | // If Atexit() is being called after RunAtexit() has already been run, it needs |
745 | // to be able to call atexit() directly. Here we use a function ponter to |
746 | // switch out its behaviour. |
747 | // An example of where this is needed is the asan_dynamic runtime on MinGW-w64. |
748 | // On this environment, __asan_init is called during global constructor phase, |
749 | // way after calling the .CRT$XID initializer. |
750 | static int (*volatile queueOrCallAtExit)(void (*)(void)) = &queueAtexit; |
751 | |
752 | int Atexit(void (*function)(void)) { return queueOrCallAtExit(function); } |
753 | |
754 | static int RunAtexit() { |
755 | TraceLoggingUnregister(g_asan_provider); |
756 | queueOrCallAtExit = &atexit; |
757 | int ret = 0; |
758 | for (uptr i = 0; i < atexit_functions.size(); ++i) { |
759 | ret |= atexit(atexit_functions[i]); |
760 | } |
761 | return ret; |
762 | } |
763 | |
764 | #pragma section(".CRT$XID", long, read) |
765 | __declspec(allocate(".CRT$XID" )) int (*__run_atexit)() = RunAtexit; |
766 | #endif |
767 | |
768 | // ------------------ sanitizer_libc.h |
769 | fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) { |
770 | // FIXME: Use the wide variants to handle Unicode filenames. |
771 | fd_t res; |
772 | if (mode == RdOnly) { |
773 | res = CreateFileA(filename, GENERIC_READ, |
774 | FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, |
775 | nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr); |
776 | } else if (mode == WrOnly) { |
777 | res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS, |
778 | FILE_ATTRIBUTE_NORMAL, nullptr); |
779 | } else { |
780 | UNIMPLEMENTED(); |
781 | } |
782 | CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd); |
783 | CHECK(res != kStderrFd || kStderrFd == kInvalidFd); |
784 | if (res == kInvalidFd && last_error) |
785 | *last_error = GetLastError(); |
786 | return res; |
787 | } |
788 | |
789 | void CloseFile(fd_t fd) { |
790 | CloseHandle(fd); |
791 | } |
792 | |
793 | bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read, |
794 | error_t *error_p) { |
795 | CHECK(fd != kInvalidFd); |
796 | |
797 | // bytes_read can't be passed directly to ReadFile: |
798 | // uptr is unsigned long long on 64-bit Windows. |
799 | unsigned long num_read_long; |
800 | |
801 | bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr); |
802 | if (!success && error_p) |
803 | *error_p = GetLastError(); |
804 | if (bytes_read) |
805 | *bytes_read = num_read_long; |
806 | return success; |
807 | } |
808 | |
809 | bool SupportsColoredOutput(fd_t fd) { |
810 | // FIXME: support colored output. |
811 | return false; |
812 | } |
813 | |
814 | bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written, |
815 | error_t *error_p) { |
816 | CHECK(fd != kInvalidFd); |
817 | |
818 | // Handle null optional parameters. |
819 | error_t dummy_error; |
820 | error_p = error_p ? error_p : &dummy_error; |
821 | uptr dummy_bytes_written; |
822 | bytes_written = bytes_written ? bytes_written : &dummy_bytes_written; |
823 | |
824 | // Initialize output parameters in case we fail. |
825 | *error_p = 0; |
826 | *bytes_written = 0; |
827 | |
828 | // Map the conventional Unix fds 1 and 2 to Windows handles. They might be |
829 | // closed, in which case this will fail. |
830 | if (fd == kStdoutFd || fd == kStderrFd) { |
831 | fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE); |
832 | if (fd == 0) { |
833 | *error_p = ERROR_INVALID_HANDLE; |
834 | return false; |
835 | } |
836 | } |
837 | |
838 | DWORD bytes_written_32; |
839 | if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) { |
840 | *error_p = GetLastError(); |
841 | return false; |
842 | } else { |
843 | *bytes_written = bytes_written_32; |
844 | return true; |
845 | } |
846 | } |
847 | |
848 | uptr internal_sched_yield() { |
849 | Sleep(0); |
850 | return 0; |
851 | } |
852 | |
853 | void internal__exit(int exitcode) { |
854 | TraceLoggingUnregister(g_asan_provider); |
855 | // ExitProcess runs some finalizers, so use TerminateProcess to avoid that. |
856 | // The debugger doesn't stop on TerminateProcess like it does on ExitProcess, |
857 | // so add our own breakpoint here. |
858 | if (::IsDebuggerPresent()) |
859 | __debugbreak(); |
860 | TerminateProcess(GetCurrentProcess(), exitcode); |
861 | BUILTIN_UNREACHABLE(); |
862 | } |
863 | |
864 | uptr internal_ftruncate(fd_t fd, uptr size) { |
865 | UNIMPLEMENTED(); |
866 | } |
867 | |
868 | uptr GetRSS() { |
869 | PROCESS_MEMORY_COUNTERS counters; |
870 | if (!GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters))) |
871 | return 0; |
872 | return counters.WorkingSetSize; |
873 | } |
874 | |
875 | void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; } |
876 | void internal_join_thread(void *th) { } |
877 | |
878 | void FutexWait(atomic_uint32_t *p, u32 cmp) { |
879 | WaitOnAddress(p, &cmp, sizeof(cmp), INFINITE); |
880 | } |
881 | |
882 | void FutexWake(atomic_uint32_t *p, u32 count) { |
883 | if (count == 1) |
884 | WakeByAddressSingle(p); |
885 | else |
886 | WakeByAddressAll(p); |
887 | } |
888 | |
889 | uptr GetTlsSize() { |
890 | return 0; |
891 | } |
892 | |
893 | void GetThreadStackAndTls(bool main, uptr *stk_begin, uptr *stk_end, |
894 | uptr *tls_begin, uptr *tls_end) { |
895 | # if SANITIZER_GO |
896 | *stk_begin = 0; |
897 | *stk_end = 0; |
898 | *tls_begin = 0; |
899 | *tls_end = 0; |
900 | # else |
901 | GetThreadStackTopAndBottom(main, stk_end, stk_begin); |
902 | *tls_begin = 0; |
903 | *tls_end = 0; |
904 | # endif |
905 | } |
906 | |
907 | void ReportFile::Write(const char *buffer, uptr length) { |
908 | SpinMutexLock l(mu); |
909 | ReopenIfNecessary(); |
910 | if (!WriteToFile(fd, buffer, length)) { |
911 | // stderr may be closed, but we may be able to print to the debugger |
912 | // instead. This is the case when launching a program from Visual Studio, |
913 | // and the following routine should write to its console. |
914 | OutputDebugStringA(buffer); |
915 | } |
916 | } |
917 | |
918 | void SetAlternateSignalStack() { |
919 | // FIXME: Decide what to do on Windows. |
920 | } |
921 | |
922 | void UnsetAlternateSignalStack() { |
923 | // FIXME: Decide what to do on Windows. |
924 | } |
925 | |
926 | void InstallDeadlySignalHandlers(SignalHandlerType handler) { |
927 | (void)handler; |
928 | // FIXME: Decide what to do on Windows. |
929 | } |
930 | |
931 | HandleSignalMode GetHandleSignalMode(int signum) { |
932 | // FIXME: Decide what to do on Windows. |
933 | return kHandleSignalNo; |
934 | } |
935 | |
936 | // Check based on flags if we should handle this exception. |
937 | bool IsHandledDeadlyException(DWORD exceptionCode) { |
938 | switch (exceptionCode) { |
939 | case EXCEPTION_ACCESS_VIOLATION: |
940 | case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: |
941 | case EXCEPTION_STACK_OVERFLOW: |
942 | case EXCEPTION_DATATYPE_MISALIGNMENT: |
943 | case EXCEPTION_IN_PAGE_ERROR: |
944 | return common_flags()->handle_segv; |
945 | case EXCEPTION_ILLEGAL_INSTRUCTION: |
946 | case EXCEPTION_PRIV_INSTRUCTION: |
947 | case EXCEPTION_BREAKPOINT: |
948 | return common_flags()->handle_sigill; |
949 | case EXCEPTION_FLT_DENORMAL_OPERAND: |
950 | case EXCEPTION_FLT_DIVIDE_BY_ZERO: |
951 | case EXCEPTION_FLT_INEXACT_RESULT: |
952 | case EXCEPTION_FLT_INVALID_OPERATION: |
953 | case EXCEPTION_FLT_OVERFLOW: |
954 | case EXCEPTION_FLT_STACK_CHECK: |
955 | case EXCEPTION_FLT_UNDERFLOW: |
956 | case EXCEPTION_INT_DIVIDE_BY_ZERO: |
957 | case EXCEPTION_INT_OVERFLOW: |
958 | return common_flags()->handle_sigfpe; |
959 | } |
960 | return false; |
961 | } |
962 | |
963 | bool IsAccessibleMemoryRange(uptr beg, uptr size) { |
964 | SYSTEM_INFO si; |
965 | GetNativeSystemInfo(&si); |
966 | uptr page_size = si.dwPageSize; |
967 | uptr page_mask = ~(page_size - 1); |
968 | |
969 | for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask; |
970 | page <= end;) { |
971 | MEMORY_BASIC_INFORMATION info; |
972 | if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info)) |
973 | return false; |
974 | |
975 | if (info.Protect == 0 || info.Protect == PAGE_NOACCESS || |
976 | info.Protect == PAGE_EXECUTE) |
977 | return false; |
978 | |
979 | if (info.RegionSize == 0) |
980 | return false; |
981 | |
982 | page += info.RegionSize; |
983 | } |
984 | |
985 | return true; |
986 | } |
987 | |
988 | bool TryMemCpy(void *dest, const void *src, uptr n) { |
989 | // TODO: implement. |
990 | return false; |
991 | } |
992 | |
993 | bool SignalContext::IsStackOverflow() const { |
994 | return (DWORD)GetType() == EXCEPTION_STACK_OVERFLOW; |
995 | } |
996 | |
997 | void SignalContext::InitPcSpBp() { |
998 | EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; |
999 | CONTEXT *context_record = (CONTEXT *)context; |
1000 | |
1001 | pc = (uptr)exception_record->ExceptionAddress; |
1002 | # if SANITIZER_WINDOWS64 |
1003 | # if SANITIZER_ARM64 |
1004 | bp = (uptr)context_record->Fp; |
1005 | sp = (uptr)context_record->Sp; |
1006 | # else |
1007 | bp = (uptr)context_record->Rbp; |
1008 | sp = (uptr)context_record->Rsp; |
1009 | # endif |
1010 | # else |
1011 | # if SANITIZER_ARM |
1012 | bp = (uptr)context_record->R11; |
1013 | sp = (uptr)context_record->Sp; |
1014 | # else |
1015 | bp = (uptr)context_record->Ebp; |
1016 | sp = (uptr)context_record->Esp; |
1017 | # endif |
1018 | # endif |
1019 | } |
1020 | |
1021 | uptr SignalContext::GetAddress() const { |
1022 | EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; |
1023 | if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) |
1024 | return exception_record->ExceptionInformation[1]; |
1025 | return (uptr)exception_record->ExceptionAddress; |
1026 | } |
1027 | |
1028 | bool SignalContext::IsMemoryAccess() const { |
1029 | return ((EXCEPTION_RECORD *)siginfo)->ExceptionCode == |
1030 | EXCEPTION_ACCESS_VIOLATION; |
1031 | } |
1032 | |
1033 | bool SignalContext::IsTrueFaultingAddress() const { return true; } |
1034 | |
1035 | SignalContext::WriteFlag SignalContext::GetWriteFlag() const { |
1036 | EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; |
1037 | |
1038 | // The write flag is only available for access violation exceptions. |
1039 | if (exception_record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION) |
1040 | return SignalContext::Unknown; |
1041 | |
1042 | // The contents of this array are documented at |
1043 | // https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record |
1044 | // The first element indicates read as 0, write as 1, or execute as 8. The |
1045 | // second element is the faulting address. |
1046 | switch (exception_record->ExceptionInformation[0]) { |
1047 | case 0: |
1048 | return SignalContext::Read; |
1049 | case 1: |
1050 | return SignalContext::Write; |
1051 | case 8: |
1052 | return SignalContext::Unknown; |
1053 | } |
1054 | return SignalContext::Unknown; |
1055 | } |
1056 | |
1057 | void SignalContext::DumpAllRegisters(void *context) { |
1058 | CONTEXT *ctx = (CONTEXT *)context; |
1059 | # if defined(_M_X64) |
1060 | Report("Register values:\n" ); |
1061 | Printf("rax = %llx " , ctx->Rax); |
1062 | Printf("rbx = %llx " , ctx->Rbx); |
1063 | Printf("rcx = %llx " , ctx->Rcx); |
1064 | Printf("rdx = %llx " , ctx->Rdx); |
1065 | Printf("\n" ); |
1066 | Printf("rdi = %llx " , ctx->Rdi); |
1067 | Printf("rsi = %llx " , ctx->Rsi); |
1068 | Printf("rbp = %llx " , ctx->Rbp); |
1069 | Printf("rsp = %llx " , ctx->Rsp); |
1070 | Printf("\n" ); |
1071 | Printf("r8 = %llx " , ctx->R8); |
1072 | Printf("r9 = %llx " , ctx->R9); |
1073 | Printf("r10 = %llx " , ctx->R10); |
1074 | Printf("r11 = %llx " , ctx->R11); |
1075 | Printf("\n" ); |
1076 | Printf("r12 = %llx " , ctx->R12); |
1077 | Printf("r13 = %llx " , ctx->R13); |
1078 | Printf("r14 = %llx " , ctx->R14); |
1079 | Printf("r15 = %llx " , ctx->R15); |
1080 | Printf("\n" ); |
1081 | # elif defined(_M_IX86) |
1082 | Report("Register values:\n" ); |
1083 | Printf("eax = %lx " , ctx->Eax); |
1084 | Printf("ebx = %lx " , ctx->Ebx); |
1085 | Printf("ecx = %lx " , ctx->Ecx); |
1086 | Printf("edx = %lx " , ctx->Edx); |
1087 | Printf("\n" ); |
1088 | Printf("edi = %lx " , ctx->Edi); |
1089 | Printf("esi = %lx " , ctx->Esi); |
1090 | Printf("ebp = %lx " , ctx->Ebp); |
1091 | Printf("esp = %lx " , ctx->Esp); |
1092 | Printf("\n" ); |
1093 | # elif defined(_M_ARM64) |
1094 | Report("Register values:\n" ); |
1095 | for (int i = 0; i <= 30; i++) { |
1096 | Printf("x%d%s = %llx" , i < 10 ? " " : "" , ctx->X[i]); |
1097 | if (i % 4 == 3) |
1098 | Printf("\n" ); |
1099 | } |
1100 | # else |
1101 | // TODO |
1102 | (void)ctx; |
1103 | # endif |
1104 | } |
1105 | |
1106 | int SignalContext::GetType() const { |
1107 | return static_cast<const EXCEPTION_RECORD *>(siginfo)->ExceptionCode; |
1108 | } |
1109 | |
1110 | const char *SignalContext::Describe() const { |
1111 | unsigned code = GetType(); |
1112 | // Get the string description of the exception if this is a known deadly |
1113 | // exception. |
1114 | switch (code) { |
1115 | case EXCEPTION_ACCESS_VIOLATION: |
1116 | return "access-violation" ; |
1117 | case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: |
1118 | return "array-bounds-exceeded" ; |
1119 | case EXCEPTION_STACK_OVERFLOW: |
1120 | return "stack-overflow" ; |
1121 | case EXCEPTION_DATATYPE_MISALIGNMENT: |
1122 | return "datatype-misalignment" ; |
1123 | case EXCEPTION_IN_PAGE_ERROR: |
1124 | return "in-page-error" ; |
1125 | case EXCEPTION_ILLEGAL_INSTRUCTION: |
1126 | return "illegal-instruction" ; |
1127 | case EXCEPTION_PRIV_INSTRUCTION: |
1128 | return "priv-instruction" ; |
1129 | case EXCEPTION_BREAKPOINT: |
1130 | return "breakpoint" ; |
1131 | case EXCEPTION_FLT_DENORMAL_OPERAND: |
1132 | return "flt-denormal-operand" ; |
1133 | case EXCEPTION_FLT_DIVIDE_BY_ZERO: |
1134 | return "flt-divide-by-zero" ; |
1135 | case EXCEPTION_FLT_INEXACT_RESULT: |
1136 | return "flt-inexact-result" ; |
1137 | case EXCEPTION_FLT_INVALID_OPERATION: |
1138 | return "flt-invalid-operation" ; |
1139 | case EXCEPTION_FLT_OVERFLOW: |
1140 | return "flt-overflow" ; |
1141 | case EXCEPTION_FLT_STACK_CHECK: |
1142 | return "flt-stack-check" ; |
1143 | case EXCEPTION_FLT_UNDERFLOW: |
1144 | return "flt-underflow" ; |
1145 | case EXCEPTION_INT_DIVIDE_BY_ZERO: |
1146 | return "int-divide-by-zero" ; |
1147 | case EXCEPTION_INT_OVERFLOW: |
1148 | return "int-overflow" ; |
1149 | } |
1150 | return "unknown exception" ; |
1151 | } |
1152 | |
1153 | uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { |
1154 | if (buf_len == 0) |
1155 | return 0; |
1156 | |
1157 | // Get the UTF-16 path and convert to UTF-8. |
1158 | InternalMmapVector<wchar_t> binname_utf16(kMaxPathLength); |
1159 | int binname_utf16_len = |
1160 | GetModuleFileNameW(NULL, &binname_utf16[0], kMaxPathLength); |
1161 | if (binname_utf16_len == 0) { |
1162 | buf[0] = '\0'; |
1163 | return 0; |
1164 | } |
1165 | int binary_name_len = |
1166 | ::WideCharToMultiByte(CP_UTF8, 0, &binname_utf16[0], binname_utf16_len, |
1167 | buf, buf_len, NULL, NULL); |
1168 | if ((unsigned)binary_name_len == buf_len) |
1169 | --binary_name_len; |
1170 | buf[binary_name_len] = '\0'; |
1171 | return binary_name_len; |
1172 | } |
1173 | |
1174 | uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) { |
1175 | return ReadBinaryName(buf, buf_len); |
1176 | } |
1177 | |
1178 | void CheckVMASize() { |
1179 | // Do nothing. |
1180 | } |
1181 | |
1182 | void InitializePlatformEarly() { |
1183 | // Do nothing. |
1184 | } |
1185 | |
1186 | void CheckASLR() { |
1187 | // Do nothing |
1188 | } |
1189 | |
1190 | void CheckMPROTECT() { |
1191 | // Do nothing |
1192 | } |
1193 | |
1194 | char **GetArgv() { |
1195 | // FIXME: Actually implement this function. |
1196 | return 0; |
1197 | } |
1198 | |
1199 | char **GetEnviron() { |
1200 | // FIXME: Actually implement this function. |
1201 | return 0; |
1202 | } |
1203 | |
1204 | pid_t StartSubprocess(const char *program, const char *const argv[], |
1205 | const char *const envp[], fd_t stdin_fd, fd_t stdout_fd, |
1206 | fd_t stderr_fd) { |
1207 | // FIXME: implement on this platform |
1208 | // Should be implemented based on |
1209 | // SymbolizerProcess::StarAtSymbolizerSubprocess |
1210 | // from lib/sanitizer_common/sanitizer_symbolizer_win.cpp. |
1211 | return -1; |
1212 | } |
1213 | |
1214 | bool IsProcessRunning(pid_t pid) { |
1215 | // FIXME: implement on this platform. |
1216 | return false; |
1217 | } |
1218 | |
1219 | int WaitForProcess(pid_t pid) { return -1; } |
1220 | |
1221 | // FIXME implement on this platform. |
1222 | void GetMemoryProfile(fill_profile_f cb, uptr *stats) {} |
1223 | |
1224 | void CheckNoDeepBind(const char *filename, int flag) { |
1225 | // Do nothing. |
1226 | } |
1227 | |
1228 | // FIXME: implement on this platform. |
1229 | bool GetRandom(void *buffer, uptr length, bool blocking) { |
1230 | UNIMPLEMENTED(); |
1231 | } |
1232 | |
1233 | u32 GetNumberOfCPUs() { |
1234 | SYSTEM_INFO sysinfo = {}; |
1235 | GetNativeSystemInfo(&sysinfo); |
1236 | return sysinfo.dwNumberOfProcessors; |
1237 | } |
1238 | |
1239 | #if SANITIZER_WIN_TRACE |
1240 | // TODO(mcgov): Rename this project-wide to PlatformLogInit |
1241 | void AndroidLogInit(void) { |
1242 | HRESULT hr = TraceLoggingRegister(g_asan_provider); |
1243 | if (!SUCCEEDED(hr)) |
1244 | return; |
1245 | } |
1246 | |
1247 | void SetAbortMessage(const char *) {} |
1248 | |
1249 | void LogFullErrorReport(const char *buffer) { |
1250 | if (common_flags()->log_to_syslog) { |
1251 | InternalMmapVector<wchar_t> filename; |
1252 | DWORD filename_length = 0; |
1253 | do { |
1254 | filename.resize(filename.size() + 0x100); |
1255 | filename_length = |
1256 | GetModuleFileNameW(NULL, filename.begin(), filename.size()); |
1257 | } while (filename_length >= filename.size()); |
1258 | TraceLoggingWrite(g_asan_provider, "AsanReportEvent" , |
1259 | TraceLoggingValue(filename.begin(), "ExecutableName" ), |
1260 | TraceLoggingValue(buffer, "AsanReportContents" )); |
1261 | } |
1262 | } |
1263 | #endif // SANITIZER_WIN_TRACE |
1264 | |
1265 | void InitializePlatformCommonFlags(CommonFlags *cf) {} |
1266 | |
1267 | } // namespace __sanitizer |
1268 | |
1269 | #endif // _WIN32 |
1270 | |