1 | //===-- sanitizer_win.cpp -------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is shared between AddressSanitizer and ThreadSanitizer |
10 | // run-time libraries and implements windows-specific functions from |
11 | // sanitizer_libc.h. |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "sanitizer_platform.h" |
15 | #if SANITIZER_WINDOWS |
16 | |
17 | #define WIN32_LEAN_AND_MEAN |
18 | #define NOGDI |
19 | #include <windows.h> |
20 | #include <io.h> |
21 | #include <psapi.h> |
22 | #include <stdlib.h> |
23 | |
24 | #include "sanitizer_common.h" |
25 | #include "sanitizer_file.h" |
26 | #include "sanitizer_libc.h" |
27 | #include "sanitizer_mutex.h" |
28 | #include "sanitizer_placement_new.h" |
29 | #include "sanitizer_win_defs.h" |
30 | |
31 | #if defined(PSAPI_VERSION) && PSAPI_VERSION == 1 |
32 | #pragma comment(lib, "psapi") |
33 | #endif |
34 | #if SANITIZER_WIN_TRACE |
35 | #include <traceloggingprovider.h> |
36 | // Windows trace logging provider init |
37 | #pragma comment(lib, "advapi32.lib") |
38 | TRACELOGGING_DECLARE_PROVIDER(g_asan_provider); |
39 | // GUID must be the same in utils/AddressSanitizerLoggingProvider.wprp |
40 | TRACELOGGING_DEFINE_PROVIDER(g_asan_provider, "AddressSanitizerLoggingProvider" , |
41 | (0x6c6c766d, 0x3846, 0x4e6a, 0xa4, 0xfb, 0x5b, |
42 | 0x53, 0x0b, 0xd0, 0xf3, 0xfa)); |
43 | #else |
44 | #define TraceLoggingUnregister(x) |
45 | #endif |
46 | |
47 | // For WaitOnAddress |
48 | # pragma comment(lib, "synchronization.lib") |
49 | |
50 | // A macro to tell the compiler that this part of the code cannot be reached, |
51 | // if the compiler supports this feature. Since we're using this in |
52 | // code that is called when terminating the process, the expansion of the |
53 | // macro should not terminate the process to avoid infinite recursion. |
54 | #if defined(__clang__) |
55 | # define BUILTIN_UNREACHABLE() __builtin_unreachable() |
56 | #elif defined(__GNUC__) && \ |
57 | (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)) |
58 | # define BUILTIN_UNREACHABLE() __builtin_unreachable() |
59 | #elif defined(_MSC_VER) |
60 | # define BUILTIN_UNREACHABLE() __assume(0) |
61 | #else |
62 | # define BUILTIN_UNREACHABLE() |
63 | #endif |
64 | |
65 | namespace __sanitizer { |
66 | |
67 | #include "sanitizer_syscall_generic.inc" |
68 | |
69 | // --------------------- sanitizer_common.h |
70 | uptr GetPageSize() { |
71 | SYSTEM_INFO si; |
72 | GetSystemInfo(&si); |
73 | return si.dwPageSize; |
74 | } |
75 | |
76 | uptr GetMmapGranularity() { |
77 | SYSTEM_INFO si; |
78 | GetSystemInfo(&si); |
79 | return si.dwAllocationGranularity; |
80 | } |
81 | |
82 | uptr GetMaxUserVirtualAddress() { |
83 | SYSTEM_INFO si; |
84 | GetSystemInfo(&si); |
85 | return (uptr)si.lpMaximumApplicationAddress; |
86 | } |
87 | |
88 | uptr GetMaxVirtualAddress() { |
89 | return GetMaxUserVirtualAddress(); |
90 | } |
91 | |
92 | bool FileExists(const char *filename) { |
93 | return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES; |
94 | } |
95 | |
96 | bool DirExists(const char *path) { |
97 | auto attr = ::GetFileAttributesA(path); |
98 | return (attr != INVALID_FILE_ATTRIBUTES) && (attr & FILE_ATTRIBUTE_DIRECTORY); |
99 | } |
100 | |
101 | uptr internal_getpid() { |
102 | return GetProcessId(GetCurrentProcess()); |
103 | } |
104 | |
105 | int internal_dlinfo(void *handle, int request, void *p) { |
106 | UNIMPLEMENTED(); |
107 | } |
108 | |
109 | // In contrast to POSIX, on Windows GetCurrentThreadId() |
110 | // returns a system-unique identifier. |
111 | tid_t GetTid() { |
112 | return GetCurrentThreadId(); |
113 | } |
114 | |
115 | uptr GetThreadSelf() { |
116 | return GetTid(); |
117 | } |
118 | |
119 | #if !SANITIZER_GO |
120 | void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, |
121 | uptr *stack_bottom) { |
122 | CHECK(stack_top); |
123 | CHECK(stack_bottom); |
124 | MEMORY_BASIC_INFORMATION mbi; |
125 | CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0); |
126 | // FIXME: is it possible for the stack to not be a single allocation? |
127 | // Are these values what ASan expects to get (reserved, not committed; |
128 | // including stack guard page) ? |
129 | *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize; |
130 | *stack_bottom = (uptr)mbi.AllocationBase; |
131 | } |
132 | #endif // #if !SANITIZER_GO |
133 | |
134 | bool ErrorIsOOM(error_t err) { |
135 | // TODO: This should check which `err`s correspond to OOM. |
136 | return false; |
137 | } |
138 | |
139 | void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { |
140 | void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); |
141 | if (rv == 0) |
142 | ReportMmapFailureAndDie(size, mem_type, "allocate" , |
143 | GetLastError(), raw_report); |
144 | return rv; |
145 | } |
146 | |
147 | void UnmapOrDie(void *addr, uptr size, bool raw_report) { |
148 | if (!size || !addr) |
149 | return; |
150 | |
151 | MEMORY_BASIC_INFORMATION mbi; |
152 | CHECK(VirtualQuery(addr, &mbi, sizeof(mbi))); |
153 | |
154 | // MEM_RELEASE can only be used to unmap whole regions previously mapped with |
155 | // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that |
156 | // fails try MEM_DECOMMIT. |
157 | if (VirtualFree(addr, 0, MEM_RELEASE) == 0) { |
158 | if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) { |
159 | ReportMunmapFailureAndDie(addr, size, GetLastError(), raw_report); |
160 | } |
161 | } |
162 | } |
163 | |
164 | static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type, |
165 | const char *mmap_type) { |
166 | error_t last_error = GetLastError(); |
167 | if (last_error == ERROR_NOT_ENOUGH_MEMORY) |
168 | return nullptr; |
169 | ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error); |
170 | } |
171 | |
172 | void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { |
173 | void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); |
174 | if (rv == 0) |
175 | return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate" ); |
176 | return rv; |
177 | } |
178 | |
179 | // We want to map a chunk of address space aligned to 'alignment'. |
180 | void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, |
181 | const char *mem_type) { |
182 | CHECK(IsPowerOfTwo(size)); |
183 | CHECK(IsPowerOfTwo(alignment)); |
184 | |
185 | // Windows will align our allocations to at least 64K. |
186 | alignment = Max(alignment, GetMmapGranularity()); |
187 | |
188 | uptr mapped_addr = |
189 | (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); |
190 | if (!mapped_addr) |
191 | return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned" ); |
192 | |
193 | // If we got it right on the first try, return. Otherwise, unmap it and go to |
194 | // the slow path. |
195 | if (IsAligned(mapped_addr, alignment)) |
196 | return (void*)mapped_addr; |
197 | if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0) |
198 | ReportMmapFailureAndDie(size, mem_type, "deallocate" , GetLastError()); |
199 | |
200 | // If we didn't get an aligned address, overallocate, find an aligned address, |
201 | // unmap, and try to allocate at that aligned address. |
202 | int retries = 0; |
203 | const int kMaxRetries = 10; |
204 | for (; retries < kMaxRetries && |
205 | (mapped_addr == 0 || !IsAligned(mapped_addr, alignment)); |
206 | retries++) { |
207 | // Overallocate size + alignment bytes. |
208 | mapped_addr = |
209 | (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS); |
210 | if (!mapped_addr) |
211 | return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned" ); |
212 | |
213 | // Find the aligned address. |
214 | uptr aligned_addr = RoundUpTo(mapped_addr, alignment); |
215 | |
216 | // Free the overallocation. |
217 | if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0) |
218 | ReportMmapFailureAndDie(size, mem_type, "deallocate" , GetLastError()); |
219 | |
220 | // Attempt to allocate exactly the number of bytes we need at the aligned |
221 | // address. This may fail for a number of reasons, in which case we continue |
222 | // the loop. |
223 | mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size, |
224 | MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); |
225 | } |
226 | |
227 | // Fail if we can't make this work quickly. |
228 | if (retries == kMaxRetries && mapped_addr == 0) |
229 | return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned" ); |
230 | |
231 | return (void *)mapped_addr; |
232 | } |
233 | |
234 | // ZeroMmapFixedRegion zero's out a region of memory previously returned from a |
235 | // call to one of the MmapFixed* helpers. On non-windows systems this would be |
236 | // done with another mmap, but on windows remapping is not an option. |
237 | // VirtualFree(DECOMMIT)+VirtualAlloc(RECOMMIT) would also be a way to zero the |
238 | // memory, but we can't do this atomically, so instead we fall back to using |
239 | // internal_memset. |
240 | bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) { |
241 | internal_memset((void*) fixed_addr, 0, size); |
242 | return true; |
243 | } |
244 | |
245 | bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { |
246 | // FIXME: is this really "NoReserve"? On Win32 this does not matter much, |
247 | // but on Win64 it does. |
248 | (void)name; // unsupported |
249 | #if !SANITIZER_GO && SANITIZER_WINDOWS64 |
250 | // On asan/Windows64, use MEM_COMMIT would result in error |
251 | // 1455:ERROR_COMMITMENT_LIMIT. |
252 | // Asan uses exception handler to commit page on demand. |
253 | void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE); |
254 | #else |
255 | void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT, |
256 | PAGE_READWRITE); |
257 | #endif |
258 | if (p == 0) { |
259 | Report("ERROR: %s failed to " |
260 | "allocate %p (%zd) bytes at %p (error code: %d)\n" , |
261 | SanitizerToolName, size, size, fixed_addr, GetLastError()); |
262 | return false; |
263 | } |
264 | return true; |
265 | } |
266 | |
267 | bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size, const char *name) { |
268 | // FIXME: Windows support large pages too. Might be worth checking |
269 | return MmapFixedNoReserve(fixed_addr, size, name); |
270 | } |
271 | |
272 | // Memory space mapped by 'MmapFixedOrDie' must have been reserved by |
273 | // 'MmapFixedNoAccess'. |
274 | void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name) { |
275 | void *p = VirtualAlloc((LPVOID)fixed_addr, size, |
276 | MEM_COMMIT, PAGE_READWRITE); |
277 | if (p == 0) { |
278 | char mem_type[30]; |
279 | internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx" , |
280 | fixed_addr); |
281 | ReportMmapFailureAndDie(size, mem_type, "allocate" , GetLastError()); |
282 | } |
283 | return p; |
284 | } |
285 | |
286 | // Uses fixed_addr for now. |
287 | // Will use offset instead once we've implemented this function for real. |
288 | uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size, const char *name) { |
289 | return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size)); |
290 | } |
291 | |
292 | uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size, |
293 | const char *name) { |
294 | return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size)); |
295 | } |
296 | |
297 | void ReservedAddressRange::Unmap(uptr addr, uptr size) { |
298 | // Only unmap if it covers the entire range. |
299 | CHECK((addr == reinterpret_cast<uptr>(base_)) && (size == size_)); |
300 | // We unmap the whole range, just null out the base. |
301 | base_ = nullptr; |
302 | size_ = 0; |
303 | UnmapOrDie(reinterpret_cast<void*>(addr), size); |
304 | } |
305 | |
306 | void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, const char *name) { |
307 | void *p = VirtualAlloc((LPVOID)fixed_addr, size, |
308 | MEM_COMMIT, PAGE_READWRITE); |
309 | if (p == 0) { |
310 | char mem_type[30]; |
311 | internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx" , |
312 | fixed_addr); |
313 | return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate" ); |
314 | } |
315 | return p; |
316 | } |
317 | |
318 | void *MmapNoReserveOrDie(uptr size, const char *mem_type) { |
319 | // FIXME: make this really NoReserve? |
320 | return MmapOrDie(size, mem_type); |
321 | } |
322 | |
323 | uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) { |
324 | base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size) : MmapNoAccess(size); |
325 | size_ = size; |
326 | name_ = name; |
327 | (void)os_handle_; // unsupported |
328 | return reinterpret_cast<uptr>(base_); |
329 | } |
330 | |
331 | |
332 | void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) { |
333 | (void)name; // unsupported |
334 | void *res = VirtualAlloc((LPVOID)fixed_addr, size, |
335 | MEM_RESERVE, PAGE_NOACCESS); |
336 | if (res == 0) |
337 | Report("WARNING: %s failed to " |
338 | "mprotect %p (%zd) bytes at %p (error code: %d)\n" , |
339 | SanitizerToolName, size, size, fixed_addr, GetLastError()); |
340 | return res; |
341 | } |
342 | |
343 | void *MmapNoAccess(uptr size) { |
344 | void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS); |
345 | if (res == 0) |
346 | Report("WARNING: %s failed to " |
347 | "mprotect %p (%zd) bytes (error code: %d)\n" , |
348 | SanitizerToolName, size, size, GetLastError()); |
349 | return res; |
350 | } |
351 | |
352 | bool MprotectNoAccess(uptr addr, uptr size) { |
353 | DWORD old_protection; |
354 | return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection); |
355 | } |
356 | |
357 | bool MprotectReadOnly(uptr addr, uptr size) { |
358 | DWORD old_protection; |
359 | return VirtualProtect((LPVOID)addr, size, PAGE_READONLY, &old_protection); |
360 | } |
361 | |
362 | bool MprotectReadWrite(uptr addr, uptr size) { |
363 | DWORD old_protection; |
364 | return VirtualProtect((LPVOID)addr, size, PAGE_READWRITE, &old_protection); |
365 | } |
366 | |
367 | void ReleaseMemoryPagesToOS(uptr beg, uptr end) { |
368 | uptr beg_aligned = RoundDownTo(beg, GetPageSizeCached()), |
369 | end_aligned = RoundDownTo(end, GetPageSizeCached()); |
370 | CHECK(beg < end); // make sure the region is sane |
371 | if (beg_aligned == end_aligned) // make sure we're freeing at least 1 page; |
372 | return; |
373 | UnmapOrDie((void *)beg, end_aligned - beg_aligned); |
374 | } |
375 | |
376 | void SetShadowRegionHugePageMode(uptr addr, uptr size) { |
377 | // FIXME: probably similar to ReleaseMemoryToOS. |
378 | } |
379 | |
380 | bool DontDumpShadowMemory(uptr addr, uptr length) { |
381 | // This is almost useless on 32-bits. |
382 | // FIXME: add madvise-analog when we move to 64-bits. |
383 | return true; |
384 | } |
385 | |
386 | uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, |
387 | uptr min_shadow_base_alignment, |
388 | UNUSED uptr &high_mem_end) { |
389 | const uptr granularity = GetMmapGranularity(); |
390 | const uptr alignment = |
391 | Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment); |
392 | const uptr left_padding = |
393 | Max<uptr>(granularity, 1ULL << min_shadow_base_alignment); |
394 | uptr space_size = shadow_size_bytes + left_padding; |
395 | uptr shadow_start = FindAvailableMemoryRange(space_size, alignment, |
396 | granularity, nullptr, nullptr); |
397 | CHECK_NE((uptr)0, shadow_start); |
398 | CHECK(IsAligned(shadow_start, alignment)); |
399 | return shadow_start; |
400 | } |
401 | |
402 | uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, |
403 | uptr *largest_gap_found, |
404 | uptr *max_occupied_addr) { |
405 | uptr address = 0; |
406 | while (true) { |
407 | MEMORY_BASIC_INFORMATION info; |
408 | if (!::VirtualQuery((void*)address, &info, sizeof(info))) |
409 | return 0; |
410 | |
411 | if (info.State == MEM_FREE) { |
412 | uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding, |
413 | alignment); |
414 | if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize) |
415 | return shadow_address; |
416 | } |
417 | |
418 | // Move to the next region. |
419 | address = (uptr)info.BaseAddress + info.RegionSize; |
420 | } |
421 | return 0; |
422 | } |
423 | |
424 | uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size, |
425 | uptr num_aliases, uptr ring_buffer_size) { |
426 | CHECK(false && "HWASan aliasing is unimplemented on Windows" ); |
427 | return 0; |
428 | } |
429 | |
430 | bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { |
431 | MEMORY_BASIC_INFORMATION mbi; |
432 | CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi))); |
433 | return mbi.Protect == PAGE_NOACCESS && |
434 | (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end; |
435 | } |
436 | |
437 | void *MapFileToMemory(const char *file_name, uptr *buff_size) { |
438 | UNIMPLEMENTED(); |
439 | } |
440 | |
441 | void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) { |
442 | UNIMPLEMENTED(); |
443 | } |
444 | |
445 | static const int kMaxEnvNameLength = 128; |
446 | static const DWORD kMaxEnvValueLength = 32767; |
447 | |
448 | namespace { |
449 | |
450 | struct EnvVariable { |
451 | char name[kMaxEnvNameLength]; |
452 | char value[kMaxEnvValueLength]; |
453 | }; |
454 | |
455 | } // namespace |
456 | |
457 | static const int kEnvVariables = 5; |
458 | static EnvVariable env_vars[kEnvVariables]; |
459 | static int num_env_vars; |
460 | |
461 | const char *GetEnv(const char *name) { |
462 | // Note: this implementation caches the values of the environment variables |
463 | // and limits their quantity. |
464 | for (int i = 0; i < num_env_vars; i++) { |
465 | if (0 == internal_strcmp(name, env_vars[i].name)) |
466 | return env_vars[i].value; |
467 | } |
468 | CHECK_LT(num_env_vars, kEnvVariables); |
469 | DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value, |
470 | kMaxEnvValueLength); |
471 | if (rv > 0 && rv < kMaxEnvValueLength) { |
472 | CHECK_LT(internal_strlen(name), kMaxEnvNameLength); |
473 | internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength); |
474 | num_env_vars++; |
475 | return env_vars[num_env_vars - 1].value; |
476 | } |
477 | return 0; |
478 | } |
479 | |
480 | const char *GetPwd() { |
481 | UNIMPLEMENTED(); |
482 | } |
483 | |
484 | u32 GetUid() { |
485 | UNIMPLEMENTED(); |
486 | } |
487 | |
488 | namespace { |
489 | struct ModuleInfo { |
490 | const char *filepath; |
491 | uptr base_address; |
492 | uptr end_address; |
493 | }; |
494 | |
495 | #if !SANITIZER_GO |
496 | int CompareModulesBase(const void *pl, const void *pr) { |
497 | const ModuleInfo *l = (const ModuleInfo *)pl, *r = (const ModuleInfo *)pr; |
498 | if (l->base_address < r->base_address) |
499 | return -1; |
500 | return l->base_address > r->base_address; |
501 | } |
502 | #endif |
503 | } // namespace |
504 | |
505 | #if !SANITIZER_GO |
506 | void DumpProcessMap() { |
507 | Report("Dumping process modules:\n" ); |
508 | ListOfModules modules; |
509 | modules.init(); |
510 | uptr num_modules = modules.size(); |
511 | |
512 | InternalMmapVector<ModuleInfo> module_infos(num_modules); |
513 | for (size_t i = 0; i < num_modules; ++i) { |
514 | module_infos[i].filepath = modules[i].full_name(); |
515 | module_infos[i].base_address = modules[i].ranges().front()->beg; |
516 | module_infos[i].end_address = modules[i].ranges().back()->end; |
517 | } |
518 | qsort(module_infos.data(), num_modules, sizeof(ModuleInfo), |
519 | CompareModulesBase); |
520 | |
521 | for (size_t i = 0; i < num_modules; ++i) { |
522 | const ModuleInfo &mi = module_infos[i]; |
523 | if (mi.end_address != 0) { |
524 | Printf("\t%p-%p %s\n" , mi.base_address, mi.end_address, |
525 | mi.filepath[0] ? mi.filepath : "[no name]" ); |
526 | } else if (mi.filepath[0]) { |
527 | Printf("\t??\?-??? %s\n" , mi.filepath); |
528 | } else { |
529 | Printf("\t???\n" ); |
530 | } |
531 | } |
532 | } |
533 | #endif |
534 | |
535 | void DisableCoreDumperIfNecessary() { |
536 | // Do nothing. |
537 | } |
538 | |
539 | void ReExec() { |
540 | UNIMPLEMENTED(); |
541 | } |
542 | |
543 | void PlatformPrepareForSandboxing(void *args) {} |
544 | |
545 | bool StackSizeIsUnlimited() { |
546 | UNIMPLEMENTED(); |
547 | } |
548 | |
549 | void SetStackSizeLimitInBytes(uptr limit) { |
550 | UNIMPLEMENTED(); |
551 | } |
552 | |
553 | bool AddressSpaceIsUnlimited() { |
554 | UNIMPLEMENTED(); |
555 | } |
556 | |
557 | void SetAddressSpaceUnlimited() { |
558 | UNIMPLEMENTED(); |
559 | } |
560 | |
561 | bool IsPathSeparator(const char c) { |
562 | return c == '\\' || c == '/'; |
563 | } |
564 | |
565 | static bool IsAlpha(char c) { |
566 | c = ToLower(c); |
567 | return c >= 'a' && c <= 'z'; |
568 | } |
569 | |
570 | bool IsAbsolutePath(const char *path) { |
571 | return path != nullptr && IsAlpha(path[0]) && path[1] == ':' && |
572 | IsPathSeparator(path[2]); |
573 | } |
574 | |
575 | void internal_usleep(u64 useconds) { Sleep(useconds / 1000); } |
576 | |
577 | u64 NanoTime() { |
578 | static LARGE_INTEGER frequency = {}; |
579 | LARGE_INTEGER counter; |
580 | if (UNLIKELY(frequency.QuadPart == 0)) { |
581 | QueryPerformanceFrequency(&frequency); |
582 | CHECK_NE(frequency.QuadPart, 0); |
583 | } |
584 | QueryPerformanceCounter(&counter); |
585 | counter.QuadPart *= 1000ULL * 1000000ULL; |
586 | counter.QuadPart /= frequency.QuadPart; |
587 | return counter.QuadPart; |
588 | } |
589 | |
590 | u64 MonotonicNanoTime() { return NanoTime(); } |
591 | |
592 | void Abort() { |
593 | internal__exit(3); |
594 | } |
595 | |
596 | bool CreateDir(const char *pathname) { |
597 | return CreateDirectoryA(pathname, nullptr) != 0; |
598 | } |
599 | |
600 | #if !SANITIZER_GO |
601 | // Read the file to extract the ImageBase field from the PE header. If ASLR is |
602 | // disabled and this virtual address is available, the loader will typically |
603 | // load the image at this address. Therefore, we call it the preferred base. Any |
604 | // addresses in the DWARF typically assume that the object has been loaded at |
605 | // this address. |
606 | static uptr GetPreferredBase(const char *modname, char *buf, size_t buf_size) { |
607 | fd_t fd = OpenFile(modname, RdOnly, nullptr); |
608 | if (fd == kInvalidFd) |
609 | return 0; |
610 | FileCloser closer(fd); |
611 | |
612 | // Read just the DOS header. |
613 | IMAGE_DOS_HEADER dos_header; |
614 | uptr bytes_read; |
615 | if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) || |
616 | bytes_read != sizeof(dos_header)) |
617 | return 0; |
618 | |
619 | // The file should start with the right signature. |
620 | if (dos_header.e_magic != IMAGE_DOS_SIGNATURE) |
621 | return 0; |
622 | |
623 | // The layout at e_lfanew is: |
624 | // "PE\0\0" |
625 | // IMAGE_FILE_HEADER |
626 | // IMAGE_OPTIONAL_HEADER |
627 | // Seek to e_lfanew and read all that data. |
628 | if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) == |
629 | INVALID_SET_FILE_POINTER) |
630 | return 0; |
631 | if (!ReadFromFile(fd, buf, buf_size, &bytes_read) || bytes_read != buf_size) |
632 | return 0; |
633 | |
634 | // Check for "PE\0\0" before the PE header. |
635 | char *pe_sig = &buf[0]; |
636 | if (internal_memcmp(pe_sig, "PE\0\0" , 4) != 0) |
637 | return 0; |
638 | |
639 | // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted. |
640 | IMAGE_OPTIONAL_HEADER *pe_header = |
641 | (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER)); |
642 | |
643 | // Check for more magic in the PE header. |
644 | if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC) |
645 | return 0; |
646 | |
647 | // Finally, return the ImageBase. |
648 | return (uptr)pe_header->ImageBase; |
649 | } |
650 | |
651 | void ListOfModules::init() { |
652 | clearOrInit(); |
653 | HANDLE cur_process = GetCurrentProcess(); |
654 | |
655 | // Query the list of modules. Start by assuming there are no more than 256 |
656 | // modules and retry if that's not sufficient. |
657 | HMODULE *hmodules = 0; |
658 | uptr modules_buffer_size = sizeof(HMODULE) * 256; |
659 | DWORD bytes_required; |
660 | while (!hmodules) { |
661 | hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__); |
662 | CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size, |
663 | &bytes_required)); |
664 | if (bytes_required > modules_buffer_size) { |
665 | // Either there turned out to be more than 256 hmodules, or new hmodules |
666 | // could have loaded since the last try. Retry. |
667 | UnmapOrDie(hmodules, modules_buffer_size); |
668 | hmodules = 0; |
669 | modules_buffer_size = bytes_required; |
670 | } |
671 | } |
672 | |
673 | InternalMmapVector<char> buf(4 + sizeof(IMAGE_FILE_HEADER) + |
674 | sizeof(IMAGE_OPTIONAL_HEADER)); |
675 | InternalMmapVector<wchar_t> modname_utf16(kMaxPathLength); |
676 | InternalMmapVector<char> module_name(kMaxPathLength); |
677 | // |num_modules| is the number of modules actually present, |
678 | size_t num_modules = bytes_required / sizeof(HMODULE); |
679 | for (size_t i = 0; i < num_modules; ++i) { |
680 | HMODULE handle = hmodules[i]; |
681 | MODULEINFO mi; |
682 | if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi))) |
683 | continue; |
684 | |
685 | // Get the UTF-16 path and convert to UTF-8. |
686 | int modname_utf16_len = |
687 | GetModuleFileNameW(handle, &modname_utf16[0], kMaxPathLength); |
688 | if (modname_utf16_len == 0) |
689 | modname_utf16[0] = '\0'; |
690 | int module_name_len = ::WideCharToMultiByte( |
691 | CP_UTF8, 0, &modname_utf16[0], modname_utf16_len + 1, &module_name[0], |
692 | kMaxPathLength, NULL, NULL); |
693 | module_name[module_name_len] = '\0'; |
694 | |
695 | uptr base_address = (uptr)mi.lpBaseOfDll; |
696 | uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage; |
697 | |
698 | // Adjust the base address of the module so that we get a VA instead of an |
699 | // RVA when computing the module offset. This helps llvm-symbolizer find the |
700 | // right DWARF CU. In the common case that the image is loaded at it's |
701 | // preferred address, we will now print normal virtual addresses. |
702 | uptr preferred_base = |
703 | GetPreferredBase(&module_name[0], &buf[0], buf.size()); |
704 | uptr adjusted_base = base_address - preferred_base; |
705 | |
706 | modules_.push_back(LoadedModule()); |
707 | LoadedModule &cur_module = modules_.back(); |
708 | cur_module.set(&module_name[0], adjusted_base); |
709 | // We add the whole module as one single address range. |
710 | cur_module.addAddressRange(base_address, end_address, /*executable*/ true, |
711 | /*writable*/ true); |
712 | } |
713 | UnmapOrDie(hmodules, modules_buffer_size); |
714 | } |
715 | |
716 | void ListOfModules::fallbackInit() { clear(); } |
717 | |
718 | // We can't use atexit() directly at __asan_init time as the CRT is not fully |
719 | // initialized at this point. Place the functions into a vector and use |
720 | // atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers). |
721 | InternalMmapVectorNoCtor<void (*)(void)> atexit_functions; |
722 | |
723 | static int queueAtexit(void (*function)(void)) { |
724 | atexit_functions.push_back(function); |
725 | return 0; |
726 | } |
727 | |
728 | // If Atexit() is being called after RunAtexit() has already been run, it needs |
729 | // to be able to call atexit() directly. Here we use a function ponter to |
730 | // switch out its behaviour. |
731 | // An example of where this is needed is the asan_dynamic runtime on MinGW-w64. |
732 | // On this environment, __asan_init is called during global constructor phase, |
733 | // way after calling the .CRT$XID initializer. |
734 | static int (*volatile queueOrCallAtExit)(void (*)(void)) = &queueAtexit; |
735 | |
736 | int Atexit(void (*function)(void)) { return queueOrCallAtExit(function); } |
737 | |
738 | static int RunAtexit() { |
739 | TraceLoggingUnregister(g_asan_provider); |
740 | queueOrCallAtExit = &atexit; |
741 | int ret = 0; |
742 | for (uptr i = 0; i < atexit_functions.size(); ++i) { |
743 | ret |= atexit(atexit_functions[i]); |
744 | } |
745 | return ret; |
746 | } |
747 | |
748 | #pragma section(".CRT$XID", long, read) |
749 | __declspec(allocate(".CRT$XID" )) int (*__run_atexit)() = RunAtexit; |
750 | #endif |
751 | |
752 | // ------------------ sanitizer_libc.h |
753 | fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) { |
754 | // FIXME: Use the wide variants to handle Unicode filenames. |
755 | fd_t res; |
756 | if (mode == RdOnly) { |
757 | res = CreateFileA(filename, GENERIC_READ, |
758 | FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, |
759 | nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr); |
760 | } else if (mode == WrOnly) { |
761 | res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS, |
762 | FILE_ATTRIBUTE_NORMAL, nullptr); |
763 | } else { |
764 | UNIMPLEMENTED(); |
765 | } |
766 | CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd); |
767 | CHECK(res != kStderrFd || kStderrFd == kInvalidFd); |
768 | if (res == kInvalidFd && last_error) |
769 | *last_error = GetLastError(); |
770 | return res; |
771 | } |
772 | |
773 | void CloseFile(fd_t fd) { |
774 | CloseHandle(fd); |
775 | } |
776 | |
777 | bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read, |
778 | error_t *error_p) { |
779 | CHECK(fd != kInvalidFd); |
780 | |
781 | // bytes_read can't be passed directly to ReadFile: |
782 | // uptr is unsigned long long on 64-bit Windows. |
783 | unsigned long num_read_long; |
784 | |
785 | bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr); |
786 | if (!success && error_p) |
787 | *error_p = GetLastError(); |
788 | if (bytes_read) |
789 | *bytes_read = num_read_long; |
790 | return success; |
791 | } |
792 | |
793 | bool SupportsColoredOutput(fd_t fd) { |
794 | // FIXME: support colored output. |
795 | return false; |
796 | } |
797 | |
798 | bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written, |
799 | error_t *error_p) { |
800 | CHECK(fd != kInvalidFd); |
801 | |
802 | // Handle null optional parameters. |
803 | error_t dummy_error; |
804 | error_p = error_p ? error_p : &dummy_error; |
805 | uptr dummy_bytes_written; |
806 | bytes_written = bytes_written ? bytes_written : &dummy_bytes_written; |
807 | |
808 | // Initialize output parameters in case we fail. |
809 | *error_p = 0; |
810 | *bytes_written = 0; |
811 | |
812 | // Map the conventional Unix fds 1 and 2 to Windows handles. They might be |
813 | // closed, in which case this will fail. |
814 | if (fd == kStdoutFd || fd == kStderrFd) { |
815 | fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE); |
816 | if (fd == 0) { |
817 | *error_p = ERROR_INVALID_HANDLE; |
818 | return false; |
819 | } |
820 | } |
821 | |
822 | DWORD bytes_written_32; |
823 | if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) { |
824 | *error_p = GetLastError(); |
825 | return false; |
826 | } else { |
827 | *bytes_written = bytes_written_32; |
828 | return true; |
829 | } |
830 | } |
831 | |
832 | uptr internal_sched_yield() { |
833 | Sleep(0); |
834 | return 0; |
835 | } |
836 | |
837 | void internal__exit(int exitcode) { |
838 | TraceLoggingUnregister(g_asan_provider); |
839 | // ExitProcess runs some finalizers, so use TerminateProcess to avoid that. |
840 | // The debugger doesn't stop on TerminateProcess like it does on ExitProcess, |
841 | // so add our own breakpoint here. |
842 | if (::IsDebuggerPresent()) |
843 | __debugbreak(); |
844 | TerminateProcess(GetCurrentProcess(), exitcode); |
845 | BUILTIN_UNREACHABLE(); |
846 | } |
847 | |
848 | uptr internal_ftruncate(fd_t fd, uptr size) { |
849 | UNIMPLEMENTED(); |
850 | } |
851 | |
852 | uptr GetRSS() { |
853 | PROCESS_MEMORY_COUNTERS counters; |
854 | if (!GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters))) |
855 | return 0; |
856 | return counters.WorkingSetSize; |
857 | } |
858 | |
859 | void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; } |
860 | void internal_join_thread(void *th) { } |
861 | |
862 | void FutexWait(atomic_uint32_t *p, u32 cmp) { |
863 | WaitOnAddress(p, &cmp, sizeof(cmp), INFINITE); |
864 | } |
865 | |
866 | void FutexWake(atomic_uint32_t *p, u32 count) { |
867 | if (count == 1) |
868 | WakeByAddressSingle(p); |
869 | else |
870 | WakeByAddressAll(p); |
871 | } |
872 | |
873 | uptr GetTlsSize() { |
874 | return 0; |
875 | } |
876 | |
877 | void InitTlsSize() { |
878 | } |
879 | |
880 | void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, |
881 | uptr *tls_addr, uptr *tls_size) { |
882 | #if SANITIZER_GO |
883 | *stk_addr = 0; |
884 | *stk_size = 0; |
885 | *tls_addr = 0; |
886 | *tls_size = 0; |
887 | #else |
888 | uptr stack_top, stack_bottom; |
889 | GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); |
890 | *stk_addr = stack_bottom; |
891 | *stk_size = stack_top - stack_bottom; |
892 | *tls_addr = 0; |
893 | *tls_size = 0; |
894 | #endif |
895 | } |
896 | |
897 | void ReportFile::Write(const char *buffer, uptr length) { |
898 | SpinMutexLock l(mu); |
899 | ReopenIfNecessary(); |
900 | if (!WriteToFile(fd, buffer, length)) { |
901 | // stderr may be closed, but we may be able to print to the debugger |
902 | // instead. This is the case when launching a program from Visual Studio, |
903 | // and the following routine should write to its console. |
904 | OutputDebugStringA(buffer); |
905 | } |
906 | } |
907 | |
908 | void SetAlternateSignalStack() { |
909 | // FIXME: Decide what to do on Windows. |
910 | } |
911 | |
912 | void UnsetAlternateSignalStack() { |
913 | // FIXME: Decide what to do on Windows. |
914 | } |
915 | |
916 | void InstallDeadlySignalHandlers(SignalHandlerType handler) { |
917 | (void)handler; |
918 | // FIXME: Decide what to do on Windows. |
919 | } |
920 | |
921 | HandleSignalMode GetHandleSignalMode(int signum) { |
922 | // FIXME: Decide what to do on Windows. |
923 | return kHandleSignalNo; |
924 | } |
925 | |
926 | // Check based on flags if we should handle this exception. |
927 | bool IsHandledDeadlyException(DWORD exceptionCode) { |
928 | switch (exceptionCode) { |
929 | case EXCEPTION_ACCESS_VIOLATION: |
930 | case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: |
931 | case EXCEPTION_STACK_OVERFLOW: |
932 | case EXCEPTION_DATATYPE_MISALIGNMENT: |
933 | case EXCEPTION_IN_PAGE_ERROR: |
934 | return common_flags()->handle_segv; |
935 | case EXCEPTION_ILLEGAL_INSTRUCTION: |
936 | case EXCEPTION_PRIV_INSTRUCTION: |
937 | case EXCEPTION_BREAKPOINT: |
938 | return common_flags()->handle_sigill; |
939 | case EXCEPTION_FLT_DENORMAL_OPERAND: |
940 | case EXCEPTION_FLT_DIVIDE_BY_ZERO: |
941 | case EXCEPTION_FLT_INEXACT_RESULT: |
942 | case EXCEPTION_FLT_INVALID_OPERATION: |
943 | case EXCEPTION_FLT_OVERFLOW: |
944 | case EXCEPTION_FLT_STACK_CHECK: |
945 | case EXCEPTION_FLT_UNDERFLOW: |
946 | case EXCEPTION_INT_DIVIDE_BY_ZERO: |
947 | case EXCEPTION_INT_OVERFLOW: |
948 | return common_flags()->handle_sigfpe; |
949 | } |
950 | return false; |
951 | } |
952 | |
953 | bool IsAccessibleMemoryRange(uptr beg, uptr size) { |
954 | SYSTEM_INFO si; |
955 | GetNativeSystemInfo(&si); |
956 | uptr page_size = si.dwPageSize; |
957 | uptr page_mask = ~(page_size - 1); |
958 | |
959 | for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask; |
960 | page <= end;) { |
961 | MEMORY_BASIC_INFORMATION info; |
962 | if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info)) |
963 | return false; |
964 | |
965 | if (info.Protect == 0 || info.Protect == PAGE_NOACCESS || |
966 | info.Protect == PAGE_EXECUTE) |
967 | return false; |
968 | |
969 | if (info.RegionSize == 0) |
970 | return false; |
971 | |
972 | page += info.RegionSize; |
973 | } |
974 | |
975 | return true; |
976 | } |
977 | |
978 | bool SignalContext::IsStackOverflow() const { |
979 | return (DWORD)GetType() == EXCEPTION_STACK_OVERFLOW; |
980 | } |
981 | |
982 | void SignalContext::InitPcSpBp() { |
983 | EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; |
984 | CONTEXT *context_record = (CONTEXT *)context; |
985 | |
986 | pc = (uptr)exception_record->ExceptionAddress; |
987 | # if SANITIZER_WINDOWS64 |
988 | # if SANITIZER_ARM64 |
989 | bp = (uptr)context_record->Fp; |
990 | sp = (uptr)context_record->Sp; |
991 | # else |
992 | bp = (uptr)context_record->Rbp; |
993 | sp = (uptr)context_record->Rsp; |
994 | # endif |
995 | # else |
996 | bp = (uptr)context_record->Ebp; |
997 | sp = (uptr)context_record->Esp; |
998 | # endif |
999 | } |
1000 | |
1001 | uptr SignalContext::GetAddress() const { |
1002 | EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; |
1003 | if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) |
1004 | return exception_record->ExceptionInformation[1]; |
1005 | return (uptr)exception_record->ExceptionAddress; |
1006 | } |
1007 | |
1008 | bool SignalContext::IsMemoryAccess() const { |
1009 | return ((EXCEPTION_RECORD *)siginfo)->ExceptionCode == |
1010 | EXCEPTION_ACCESS_VIOLATION; |
1011 | } |
1012 | |
1013 | bool SignalContext::IsTrueFaultingAddress() const { return true; } |
1014 | |
1015 | SignalContext::WriteFlag SignalContext::GetWriteFlag() const { |
1016 | EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo; |
1017 | |
1018 | // The write flag is only available for access violation exceptions. |
1019 | if (exception_record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION) |
1020 | return SignalContext::Unknown; |
1021 | |
1022 | // The contents of this array are documented at |
1023 | // https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record |
1024 | // The first element indicates read as 0, write as 1, or execute as 8. The |
1025 | // second element is the faulting address. |
1026 | switch (exception_record->ExceptionInformation[0]) { |
1027 | case 0: |
1028 | return SignalContext::Read; |
1029 | case 1: |
1030 | return SignalContext::Write; |
1031 | case 8: |
1032 | return SignalContext::Unknown; |
1033 | } |
1034 | return SignalContext::Unknown; |
1035 | } |
1036 | |
1037 | void SignalContext::DumpAllRegisters(void *context) { |
1038 | // FIXME: Implement this. |
1039 | } |
1040 | |
1041 | int SignalContext::GetType() const { |
1042 | return static_cast<const EXCEPTION_RECORD *>(siginfo)->ExceptionCode; |
1043 | } |
1044 | |
1045 | const char *SignalContext::Describe() const { |
1046 | unsigned code = GetType(); |
1047 | // Get the string description of the exception if this is a known deadly |
1048 | // exception. |
1049 | switch (code) { |
1050 | case EXCEPTION_ACCESS_VIOLATION: |
1051 | return "access-violation" ; |
1052 | case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: |
1053 | return "array-bounds-exceeded" ; |
1054 | case EXCEPTION_STACK_OVERFLOW: |
1055 | return "stack-overflow" ; |
1056 | case EXCEPTION_DATATYPE_MISALIGNMENT: |
1057 | return "datatype-misalignment" ; |
1058 | case EXCEPTION_IN_PAGE_ERROR: |
1059 | return "in-page-error" ; |
1060 | case EXCEPTION_ILLEGAL_INSTRUCTION: |
1061 | return "illegal-instruction" ; |
1062 | case EXCEPTION_PRIV_INSTRUCTION: |
1063 | return "priv-instruction" ; |
1064 | case EXCEPTION_BREAKPOINT: |
1065 | return "breakpoint" ; |
1066 | case EXCEPTION_FLT_DENORMAL_OPERAND: |
1067 | return "flt-denormal-operand" ; |
1068 | case EXCEPTION_FLT_DIVIDE_BY_ZERO: |
1069 | return "flt-divide-by-zero" ; |
1070 | case EXCEPTION_FLT_INEXACT_RESULT: |
1071 | return "flt-inexact-result" ; |
1072 | case EXCEPTION_FLT_INVALID_OPERATION: |
1073 | return "flt-invalid-operation" ; |
1074 | case EXCEPTION_FLT_OVERFLOW: |
1075 | return "flt-overflow" ; |
1076 | case EXCEPTION_FLT_STACK_CHECK: |
1077 | return "flt-stack-check" ; |
1078 | case EXCEPTION_FLT_UNDERFLOW: |
1079 | return "flt-underflow" ; |
1080 | case EXCEPTION_INT_DIVIDE_BY_ZERO: |
1081 | return "int-divide-by-zero" ; |
1082 | case EXCEPTION_INT_OVERFLOW: |
1083 | return "int-overflow" ; |
1084 | } |
1085 | return "unknown exception" ; |
1086 | } |
1087 | |
1088 | uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { |
1089 | if (buf_len == 0) |
1090 | return 0; |
1091 | |
1092 | // Get the UTF-16 path and convert to UTF-8. |
1093 | InternalMmapVector<wchar_t> binname_utf16(kMaxPathLength); |
1094 | int binname_utf16_len = |
1095 | GetModuleFileNameW(NULL, &binname_utf16[0], kMaxPathLength); |
1096 | if (binname_utf16_len == 0) { |
1097 | buf[0] = '\0'; |
1098 | return 0; |
1099 | } |
1100 | int binary_name_len = |
1101 | ::WideCharToMultiByte(CP_UTF8, 0, &binname_utf16[0], binname_utf16_len, |
1102 | buf, buf_len, NULL, NULL); |
1103 | if ((unsigned)binary_name_len == buf_len) |
1104 | --binary_name_len; |
1105 | buf[binary_name_len] = '\0'; |
1106 | return binary_name_len; |
1107 | } |
1108 | |
1109 | uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) { |
1110 | return ReadBinaryName(buf, buf_len); |
1111 | } |
1112 | |
1113 | void CheckVMASize() { |
1114 | // Do nothing. |
1115 | } |
1116 | |
1117 | void InitializePlatformEarly() { |
1118 | // Do nothing. |
1119 | } |
1120 | |
1121 | void CheckASLR() { |
1122 | // Do nothing |
1123 | } |
1124 | |
1125 | void CheckMPROTECT() { |
1126 | // Do nothing |
1127 | } |
1128 | |
1129 | char **GetArgv() { |
1130 | // FIXME: Actually implement this function. |
1131 | return 0; |
1132 | } |
1133 | |
1134 | char **GetEnviron() { |
1135 | // FIXME: Actually implement this function. |
1136 | return 0; |
1137 | } |
1138 | |
1139 | pid_t StartSubprocess(const char *program, const char *const argv[], |
1140 | const char *const envp[], fd_t stdin_fd, fd_t stdout_fd, |
1141 | fd_t stderr_fd) { |
1142 | // FIXME: implement on this platform |
1143 | // Should be implemented based on |
1144 | // SymbolizerProcess::StarAtSymbolizerSubprocess |
1145 | // from lib/sanitizer_common/sanitizer_symbolizer_win.cpp. |
1146 | return -1; |
1147 | } |
1148 | |
1149 | bool IsProcessRunning(pid_t pid) { |
1150 | // FIXME: implement on this platform. |
1151 | return false; |
1152 | } |
1153 | |
1154 | int WaitForProcess(pid_t pid) { return -1; } |
1155 | |
1156 | // FIXME implement on this platform. |
1157 | void GetMemoryProfile(fill_profile_f cb, uptr *stats) {} |
1158 | |
1159 | void CheckNoDeepBind(const char *filename, int flag) { |
1160 | // Do nothing. |
1161 | } |
1162 | |
1163 | // FIXME: implement on this platform. |
1164 | bool GetRandom(void *buffer, uptr length, bool blocking) { |
1165 | UNIMPLEMENTED(); |
1166 | } |
1167 | |
1168 | u32 GetNumberOfCPUs() { |
1169 | SYSTEM_INFO sysinfo = {}; |
1170 | GetNativeSystemInfo(&sysinfo); |
1171 | return sysinfo.dwNumberOfProcessors; |
1172 | } |
1173 | |
1174 | #if SANITIZER_WIN_TRACE |
1175 | // TODO(mcgov): Rename this project-wide to PlatformLogInit |
1176 | void AndroidLogInit(void) { |
1177 | HRESULT hr = TraceLoggingRegister(g_asan_provider); |
1178 | if (!SUCCEEDED(hr)) |
1179 | return; |
1180 | } |
1181 | |
1182 | void SetAbortMessage(const char *) {} |
1183 | |
1184 | void LogFullErrorReport(const char *buffer) { |
1185 | if (common_flags()->log_to_syslog) { |
1186 | InternalMmapVector<wchar_t> filename; |
1187 | DWORD filename_length = 0; |
1188 | do { |
1189 | filename.resize(filename.size() + 0x100); |
1190 | filename_length = |
1191 | GetModuleFileNameW(NULL, filename.begin(), filename.size()); |
1192 | } while (filename_length >= filename.size()); |
1193 | TraceLoggingWrite(g_asan_provider, "AsanReportEvent" , |
1194 | TraceLoggingValue(filename.begin(), "ExecutableName" ), |
1195 | TraceLoggingValue(buffer, "AsanReportContents" )); |
1196 | } |
1197 | } |
1198 | #endif // SANITIZER_WIN_TRACE |
1199 | |
1200 | void InitializePlatformCommonFlags(CommonFlags *cf) {} |
1201 | |
1202 | } // namespace __sanitizer |
1203 | |
1204 | #endif // _WIN32 |
1205 | |