1 | //===-- sanitizer_fuchsia.cpp ---------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is shared between AddressSanitizer and other sanitizer |
10 | // run-time libraries and implements Fuchsia-specific functions from |
11 | // sanitizer_common.h. |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "sanitizer_fuchsia.h" |
15 | #if SANITIZER_FUCHSIA |
16 | |
17 | # include <pthread.h> |
18 | # include <stdlib.h> |
19 | # include <unistd.h> |
20 | # include <zircon/errors.h> |
21 | # include <zircon/process.h> |
22 | # include <zircon/syscalls.h> |
23 | # include <zircon/utc.h> |
24 | |
25 | # include "sanitizer_common.h" |
26 | # include "sanitizer_interface_internal.h" |
27 | # include "sanitizer_libc.h" |
28 | # include "sanitizer_mutex.h" |
29 | |
30 | namespace __sanitizer { |
31 | |
32 | void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); } |
33 | |
34 | uptr internal_sched_yield() { |
35 | zx_status_t status = _zx_thread_legacy_yield(0u); |
36 | CHECK_EQ(status, ZX_OK); |
37 | return 0; // Why doesn't this return void? |
38 | } |
39 | |
40 | void internal_usleep(u64 useconds) { |
41 | zx_status_t status = _zx_nanosleep(_zx_deadline_after(ZX_USEC(useconds))); |
42 | CHECK_EQ(status, ZX_OK); |
43 | } |
44 | |
45 | u64 NanoTime() { |
46 | zx_handle_t utc_clock = _zx_utc_reference_get(); |
47 | CHECK_NE(utc_clock, ZX_HANDLE_INVALID); |
48 | zx_time_t time; |
49 | zx_status_t status = _zx_clock_read(utc_clock, &time); |
50 | CHECK_EQ(status, ZX_OK); |
51 | return time; |
52 | } |
53 | |
54 | u64 MonotonicNanoTime() { return _zx_clock_get_monotonic(); } |
55 | |
56 | uptr internal_getpid() { |
57 | zx_info_handle_basic_t info; |
58 | zx_status_t status = |
59 | _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info, |
60 | sizeof(info), NULL, NULL); |
61 | CHECK_EQ(status, ZX_OK); |
62 | uptr pid = static_cast<uptr>(info.koid); |
63 | CHECK_EQ(pid, info.koid); |
64 | return pid; |
65 | } |
66 | |
67 | int internal_dlinfo(void *handle, int request, void *p) { UNIMPLEMENTED(); } |
68 | |
69 | uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); } |
70 | |
71 | tid_t GetTid() { return GetThreadSelf(); } |
72 | |
73 | void Abort() { abort(); } |
74 | |
75 | int Atexit(void (*function)(void)) { return atexit(function); } |
76 | |
77 | void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) { |
78 | pthread_attr_t attr; |
79 | CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0); |
80 | void *base; |
81 | size_t size; |
82 | CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0); |
83 | CHECK_EQ(pthread_attr_destroy(&attr), 0); |
84 | |
85 | *stack_bottom = reinterpret_cast<uptr>(base); |
86 | *stack_top = *stack_bottom + size; |
87 | } |
88 | |
89 | void InitializePlatformEarly() {} |
90 | void CheckASLR() {} |
91 | void CheckMPROTECT() {} |
92 | void PlatformPrepareForSandboxing(void *args) {} |
93 | void DisableCoreDumperIfNecessary() {} |
94 | void InstallDeadlySignalHandlers(SignalHandlerType handler) {} |
95 | void SetAlternateSignalStack() {} |
96 | void UnsetAlternateSignalStack() {} |
97 | void InitTlsSize() {} |
98 | |
99 | bool SignalContext::IsStackOverflow() const { return false; } |
100 | void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); } |
101 | const char *SignalContext::Describe() const { UNIMPLEMENTED(); } |
102 | |
103 | void FutexWait(atomic_uint32_t *p, u32 cmp) { |
104 | zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(p), cmp, |
105 | ZX_HANDLE_INVALID, ZX_TIME_INFINITE); |
106 | if (status != ZX_ERR_BAD_STATE) // Normal race. |
107 | CHECK_EQ(status, ZX_OK); |
108 | } |
109 | |
110 | void FutexWake(atomic_uint32_t *p, u32 count) { |
111 | zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(p), count); |
112 | CHECK_EQ(status, ZX_OK); |
113 | } |
114 | |
115 | uptr GetPageSize() { return _zx_system_get_page_size(); } |
116 | |
117 | uptr GetMmapGranularity() { return _zx_system_get_page_size(); } |
118 | |
119 | sanitizer_shadow_bounds_t ShadowBounds; |
120 | |
121 | void InitShadowBounds() { ShadowBounds = __sanitizer_shadow_bounds(); } |
122 | |
123 | uptr GetMaxUserVirtualAddress() { |
124 | InitShadowBounds(); |
125 | return ShadowBounds.memory_limit - 1; |
126 | } |
127 | |
128 | uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); } |
129 | |
130 | bool ErrorIsOOM(error_t err) { return err == ZX_ERR_NO_MEMORY; } |
131 | |
132 | // For any sanitizer internal that needs to map something which can be unmapped |
133 | // later, first attempt to map to a pre-allocated VMAR. This helps reduce |
134 | // fragmentation from many small anonymous mmap calls. A good value for this |
135 | // VMAR size would be the total size of your typical sanitizer internal objects |
136 | // allocated in an "average" process lifetime. Examples of this include: |
137 | // FakeStack, LowLevelAllocator mappings, TwoLevelMap, InternalMmapVector, |
138 | // StackStore, CreateAsanThread, etc. |
139 | // |
140 | // This is roughly equal to the total sum of sanitizer internal mappings for a |
141 | // large test case. |
142 | constexpr size_t kSanitizerHeapVmarSize = 13ULL << 20; |
143 | static zx_handle_t gSanitizerHeapVmar = ZX_HANDLE_INVALID; |
144 | |
145 | static zx_status_t GetSanitizerHeapVmar(zx_handle_t *vmar) { |
146 | zx_status_t status = ZX_OK; |
147 | if (gSanitizerHeapVmar == ZX_HANDLE_INVALID) { |
148 | CHECK_EQ(kSanitizerHeapVmarSize % GetPageSizeCached(), 0); |
149 | uintptr_t base; |
150 | status = _zx_vmar_allocate( |
151 | _zx_vmar_root_self(), |
152 | ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0, |
153 | kSanitizerHeapVmarSize, &gSanitizerHeapVmar, &base); |
154 | } |
155 | *vmar = gSanitizerHeapVmar; |
156 | if (status == ZX_OK) |
157 | CHECK_NE(gSanitizerHeapVmar, ZX_HANDLE_INVALID); |
158 | return status; |
159 | } |
160 | |
161 | static zx_status_t TryVmoMapSanitizerVmar(zx_vm_option_t options, |
162 | size_t vmar_offset, zx_handle_t vmo, |
163 | size_t size, uintptr_t *addr, |
164 | zx_handle_t *vmar_used = nullptr) { |
165 | zx_handle_t vmar; |
166 | zx_status_t status = GetSanitizerHeapVmar(&vmar); |
167 | if (status != ZX_OK) |
168 | return status; |
169 | |
170 | status = _zx_vmar_map(gSanitizerHeapVmar, options, vmar_offset, vmo, |
171 | /*vmo_offset=*/0, size, addr); |
172 | if (vmar_used) |
173 | *vmar_used = gSanitizerHeapVmar; |
174 | if (status == ZX_ERR_NO_RESOURCES || status == ZX_ERR_INVALID_ARGS) { |
175 | // This means there's no space in the heap VMAR, so fallback to the root |
176 | // VMAR. |
177 | status = _zx_vmar_map(_zx_vmar_root_self(), options, vmar_offset, vmo, |
178 | /*vmo_offset=*/0, size, addr); |
179 | if (vmar_used) |
180 | *vmar_used = _zx_vmar_root_self(); |
181 | } |
182 | |
183 | return status; |
184 | } |
185 | |
186 | static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type, |
187 | bool raw_report, bool die_for_nomem) { |
188 | size = RoundUpTo(size, GetPageSize()); |
189 | |
190 | zx_handle_t vmo; |
191 | zx_status_t status = _zx_vmo_create(size, 0, &vmo); |
192 | if (status != ZX_OK) { |
193 | if (status != ZX_ERR_NO_MEMORY || die_for_nomem) |
194 | ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create" , status, |
195 | raw_report); |
196 | return nullptr; |
197 | } |
198 | _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type, |
199 | internal_strlen(mem_type)); |
200 | |
201 | uintptr_t addr; |
202 | status = TryVmoMapSanitizerVmar(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, |
203 | /*vmar_offset=*/0, vmo, size, &addr); |
204 | _zx_handle_close(vmo); |
205 | |
206 | if (status != ZX_OK) { |
207 | if (status != ZX_ERR_NO_MEMORY || die_for_nomem) |
208 | ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map" , status, |
209 | raw_report); |
210 | return nullptr; |
211 | } |
212 | |
213 | IncreaseTotalMmap(size); |
214 | |
215 | return reinterpret_cast<void *>(addr); |
216 | } |
217 | |
218 | void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { |
219 | return DoAnonymousMmapOrDie(size, mem_type, raw_report, true); |
220 | } |
221 | |
222 | void *MmapNoReserveOrDie(uptr size, const char *mem_type) { |
223 | return MmapOrDie(size, mem_type); |
224 | } |
225 | |
226 | void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { |
227 | return DoAnonymousMmapOrDie(size, mem_type, false, false); |
228 | } |
229 | |
230 | uptr ReservedAddressRange::Init(uptr init_size, const char *name, |
231 | uptr fixed_addr) { |
232 | init_size = RoundUpTo(init_size, GetPageSize()); |
233 | DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID); |
234 | uintptr_t base; |
235 | zx_handle_t vmar; |
236 | zx_status_t status = _zx_vmar_allocate( |
237 | _zx_vmar_root_self(), |
238 | ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0, |
239 | init_size, &vmar, &base); |
240 | if (status != ZX_OK) |
241 | ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate" , status); |
242 | base_ = reinterpret_cast<void *>(base); |
243 | size_ = init_size; |
244 | name_ = name; |
245 | os_handle_ = vmar; |
246 | |
247 | return reinterpret_cast<uptr>(base_); |
248 | } |
249 | |
250 | static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size, |
251 | void *base, const char *name, bool die_for_nomem) { |
252 | uptr offset = fixed_addr - reinterpret_cast<uptr>(base); |
253 | map_size = RoundUpTo(map_size, GetPageSize()); |
254 | zx_handle_t vmo; |
255 | zx_status_t status = _zx_vmo_create(map_size, 0, &vmo); |
256 | if (status != ZX_OK) { |
257 | if (status != ZX_ERR_NO_MEMORY || die_for_nomem) |
258 | ReportMmapFailureAndDie(map_size, name, "zx_vmo_create" , status); |
259 | return 0; |
260 | } |
261 | _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name)); |
262 | DCHECK_GE(base + size_, map_size + offset); |
263 | uintptr_t addr; |
264 | |
265 | status = |
266 | _zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC, |
267 | offset, vmo, 0, map_size, &addr); |
268 | _zx_handle_close(vmo); |
269 | if (status != ZX_OK) { |
270 | if (status != ZX_ERR_NO_MEMORY || die_for_nomem) { |
271 | ReportMmapFailureAndDie(map_size, name, "zx_vmar_map" , status); |
272 | } |
273 | return 0; |
274 | } |
275 | IncreaseTotalMmap(map_size); |
276 | return addr; |
277 | } |
278 | |
279 | uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size, |
280 | const char *name) { |
281 | return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, |
282 | name ? name : name_, false); |
283 | } |
284 | |
285 | uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size, |
286 | const char *name) { |
287 | return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, |
288 | name ? name : name_, true); |
289 | } |
290 | |
291 | void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar, |
292 | bool raw_report) { |
293 | if (!addr || !size) |
294 | return; |
295 | size = RoundUpTo(size, GetPageSize()); |
296 | |
297 | zx_status_t status = |
298 | _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size); |
299 | if (status == ZX_ERR_INVALID_ARGS && target_vmar == gSanitizerHeapVmar) { |
300 | // If there wasn't any space in the heap vmar, the fallback was the root |
301 | // vmar. |
302 | status = _zx_vmar_unmap(_zx_vmar_root_self(), |
303 | reinterpret_cast<uintptr_t>(addr), size); |
304 | } |
305 | if (status != ZX_OK) |
306 | ReportMunmapFailureAndDie(addr, size, status, raw_report); |
307 | |
308 | DecreaseTotalMmap(size); |
309 | } |
310 | |
311 | void ReservedAddressRange::Unmap(uptr addr, uptr size) { |
312 | CHECK_LE(size, size_); |
313 | const zx_handle_t vmar = static_cast<zx_handle_t>(os_handle_); |
314 | if (addr == reinterpret_cast<uptr>(base_)) { |
315 | if (size == size_) { |
316 | // Destroying the vmar effectively unmaps the whole mapping. |
317 | _zx_vmar_destroy(vmar); |
318 | _zx_handle_close(vmar); |
319 | os_handle_ = static_cast<uptr>(ZX_HANDLE_INVALID); |
320 | DecreaseTotalMmap(size); |
321 | return; |
322 | } |
323 | } else { |
324 | CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_); |
325 | } |
326 | // Partial unmapping does not affect the fact that the initial range is still |
327 | // reserved, and the resulting unmapped memory can't be reused. |
328 | UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar, |
329 | /*raw_report=*/false); |
330 | } |
331 | |
332 | // This should never be called. |
333 | void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) { |
334 | UNIMPLEMENTED(); |
335 | } |
336 | |
337 | bool MprotectNoAccess(uptr addr, uptr size) { |
338 | return _zx_vmar_protect(_zx_vmar_root_self(), 0, addr, size) == ZX_OK; |
339 | } |
340 | |
341 | bool MprotectReadOnly(uptr addr, uptr size) { |
342 | return _zx_vmar_protect(_zx_vmar_root_self(), ZX_VM_PERM_READ, addr, size) == |
343 | ZX_OK; |
344 | } |
345 | |
346 | bool MprotectReadWrite(uptr addr, uptr size) { |
347 | return _zx_vmar_protect(_zx_vmar_root_self(), |
348 | ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, addr, |
349 | size) == ZX_OK; |
350 | } |
351 | |
352 | void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, |
353 | const char *mem_type) { |
354 | CHECK_GE(size, GetPageSize()); |
355 | CHECK(IsPowerOfTwo(size)); |
356 | CHECK(IsPowerOfTwo(alignment)); |
357 | |
358 | zx_handle_t vmo; |
359 | zx_status_t status = _zx_vmo_create(size, 0, &vmo); |
360 | if (status != ZX_OK) { |
361 | if (status != ZX_ERR_NO_MEMORY) |
362 | ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create" , status, false); |
363 | return nullptr; |
364 | } |
365 | _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type, |
366 | internal_strlen(mem_type)); |
367 | |
368 | // Map a larger size to get a chunk of address space big enough that |
369 | // it surely contains an aligned region of the requested size. Then |
370 | // overwrite the aligned middle portion with a mapping from the |
371 | // beginning of the VMO, and unmap the excess before and after. |
372 | size_t map_size = size + alignment; |
373 | uintptr_t addr; |
374 | zx_handle_t vmar_used; |
375 | status = TryVmoMapSanitizerVmar(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, |
376 | /*vmar_offset=*/0, vmo, map_size, &addr, |
377 | &vmar_used); |
378 | if (status == ZX_OK) { |
379 | uintptr_t map_addr = addr; |
380 | uintptr_t map_end = map_addr + map_size; |
381 | addr = RoundUpTo(map_addr, alignment); |
382 | uintptr_t end = addr + size; |
383 | if (addr != map_addr) { |
384 | zx_info_vmar_t info; |
385 | status = _zx_object_get_info(vmar_used, ZX_INFO_VMAR, &info, sizeof(info), |
386 | NULL, NULL); |
387 | if (status == ZX_OK) { |
388 | uintptr_t new_addr; |
389 | status = _zx_vmar_map( |
390 | vmar_used, |
391 | ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE, |
392 | addr - info.base, vmo, 0, size, &new_addr); |
393 | if (status == ZX_OK) |
394 | CHECK_EQ(new_addr, addr); |
395 | } |
396 | } |
397 | if (status == ZX_OK && addr != map_addr) |
398 | status = _zx_vmar_unmap(vmar_used, map_addr, addr - map_addr); |
399 | if (status == ZX_OK && end != map_end) |
400 | status = _zx_vmar_unmap(vmar_used, end, map_end - end); |
401 | } |
402 | _zx_handle_close(vmo); |
403 | |
404 | if (status != ZX_OK) { |
405 | if (status != ZX_ERR_NO_MEMORY) |
406 | ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map" , status, false); |
407 | return nullptr; |
408 | } |
409 | |
410 | IncreaseTotalMmap(size); |
411 | |
412 | return reinterpret_cast<void *>(addr); |
413 | } |
414 | |
415 | void UnmapOrDie(void *addr, uptr size, bool raw_report) { |
416 | UnmapOrDieVmar(addr, size, gSanitizerHeapVmar, raw_report); |
417 | } |
418 | |
419 | void ReleaseMemoryPagesToOS(uptr beg, uptr end) { |
420 | uptr beg_aligned = RoundUpTo(beg, GetPageSize()); |
421 | uptr end_aligned = RoundDownTo(end, GetPageSize()); |
422 | if (beg_aligned < end_aligned) { |
423 | zx_handle_t root_vmar = _zx_vmar_root_self(); |
424 | CHECK_NE(root_vmar, ZX_HANDLE_INVALID); |
425 | zx_status_t status = |
426 | _zx_vmar_op_range(root_vmar, ZX_VMAR_OP_DECOMMIT, beg_aligned, |
427 | end_aligned - beg_aligned, nullptr, 0); |
428 | CHECK_EQ(status, ZX_OK); |
429 | } |
430 | } |
431 | |
432 | void DumpProcessMap() { |
433 | // TODO(mcgrathr): write it |
434 | return; |
435 | } |
436 | |
437 | bool IsAccessibleMemoryRange(uptr beg, uptr size) { |
438 | // TODO(mcgrathr): Figure out a better way. |
439 | zx_handle_t vmo; |
440 | zx_status_t status = _zx_vmo_create(size, 0, &vmo); |
441 | if (status == ZX_OK) { |
442 | status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size); |
443 | _zx_handle_close(vmo); |
444 | } |
445 | return status == ZX_OK; |
446 | } |
447 | |
448 | // FIXME implement on this platform. |
449 | void GetMemoryProfile(fill_profile_f cb, uptr *stats) {} |
450 | |
451 | bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, |
452 | uptr *read_len, uptr max_len, error_t *errno_p) { |
453 | *errno_p = ZX_ERR_NOT_SUPPORTED; |
454 | return false; |
455 | } |
456 | |
457 | void RawWrite(const char *buffer) { |
458 | constexpr size_t size = 128; |
459 | static _Thread_local char line[size]; |
460 | static _Thread_local size_t lastLineEnd = 0; |
461 | static _Thread_local size_t cur = 0; |
462 | |
463 | while (*buffer) { |
464 | if (cur >= size) { |
465 | if (lastLineEnd == 0) |
466 | lastLineEnd = size; |
467 | __sanitizer_log_write(line, lastLineEnd); |
468 | internal_memmove(line, line + lastLineEnd, cur - lastLineEnd); |
469 | cur = cur - lastLineEnd; |
470 | lastLineEnd = 0; |
471 | } |
472 | if (*buffer == '\n') |
473 | lastLineEnd = cur + 1; |
474 | line[cur++] = *buffer++; |
475 | } |
476 | // Flush all complete lines before returning. |
477 | if (lastLineEnd != 0) { |
478 | __sanitizer_log_write(line, lastLineEnd); |
479 | internal_memmove(line, line + lastLineEnd, cur - lastLineEnd); |
480 | cur = cur - lastLineEnd; |
481 | lastLineEnd = 0; |
482 | } |
483 | } |
484 | |
485 | void CatastrophicErrorWrite(const char *buffer, uptr length) { |
486 | __sanitizer_log_write(buffer, length); |
487 | } |
488 | |
489 | char **StoredArgv; |
490 | char **StoredEnviron; |
491 | |
492 | char **GetArgv() { return StoredArgv; } |
493 | char **GetEnviron() { return StoredEnviron; } |
494 | |
495 | const char *GetEnv(const char *name) { |
496 | if (StoredEnviron) { |
497 | uptr NameLen = internal_strlen(name); |
498 | for (char **Env = StoredEnviron; *Env != 0; Env++) { |
499 | if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=') |
500 | return (*Env) + NameLen + 1; |
501 | } |
502 | } |
503 | return nullptr; |
504 | } |
505 | |
506 | uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) { |
507 | const char *argv0 = "<UNKNOWN>" ; |
508 | if (StoredArgv && StoredArgv[0]) { |
509 | argv0 = StoredArgv[0]; |
510 | } |
511 | internal_strncpy(buf, argv0, buf_len); |
512 | return internal_strlen(buf); |
513 | } |
514 | |
515 | uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) { |
516 | return ReadBinaryName(buf, buf_len); |
517 | } |
518 | |
519 | uptr MainThreadStackBase, MainThreadStackSize; |
520 | |
521 | bool GetRandom(void *buffer, uptr length, bool blocking) { |
522 | CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN); |
523 | _zx_cprng_draw(buffer, length); |
524 | return true; |
525 | } |
526 | |
527 | u32 GetNumberOfCPUs() { return zx_system_get_num_cpus(); } |
528 | |
529 | uptr GetRSS() { UNIMPLEMENTED(); } |
530 | |
531 | void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; } |
532 | void internal_join_thread(void *th) {} |
533 | |
534 | void InitializePlatformCommonFlags(CommonFlags *cf) {} |
535 | |
536 | } // namespace __sanitizer |
537 | |
538 | using namespace __sanitizer; |
539 | |
540 | extern "C" { |
541 | void __sanitizer_startup_hook(int argc, char **argv, char **envp, |
542 | void *stack_base, size_t stack_size) { |
543 | __sanitizer::StoredArgv = argv; |
544 | __sanitizer::StoredEnviron = envp; |
545 | __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base); |
546 | __sanitizer::MainThreadStackSize = stack_size; |
547 | } |
548 | |
549 | void __sanitizer_set_report_path(const char *path) { |
550 | // Handle the initialization code in each sanitizer, but no other calls. |
551 | // This setting is never consulted on Fuchsia. |
552 | DCHECK_EQ(path, common_flags()->log_path); |
553 | } |
554 | |
555 | void __sanitizer_set_report_fd(void *fd) { |
556 | UNREACHABLE("not available on Fuchsia" ); |
557 | } |
558 | |
559 | const char *__sanitizer_get_report_path() { |
560 | UNREACHABLE("not available on Fuchsia" ); |
561 | } |
562 | } // extern "C" |
563 | |
564 | #endif // SANITIZER_FUCHSIA |
565 | |