1 | //===-- asan_thread.cpp ---------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of AddressSanitizer, an address sanity checker. |
10 | // |
11 | // Thread-related code. |
12 | //===----------------------------------------------------------------------===// |
13 | #include "asan_thread.h" |
14 | |
15 | #include "asan_allocator.h" |
16 | #include "asan_interceptors.h" |
17 | #include "asan_mapping.h" |
18 | #include "asan_poisoning.h" |
19 | #include "asan_stack.h" |
20 | #include "lsan/lsan_common.h" |
21 | #include "sanitizer_common/sanitizer_common.h" |
22 | #include "sanitizer_common/sanitizer_placement_new.h" |
23 | #include "sanitizer_common/sanitizer_stackdepot.h" |
24 | #include "sanitizer_common/sanitizer_tls_get_addr.h" |
25 | |
26 | namespace __asan { |
27 | |
28 | // AsanThreadContext implementation. |
29 | |
30 | void AsanThreadContext::OnCreated(void *arg) { |
31 | CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs *>(arg); |
32 | if (args->stack) |
33 | stack_id = StackDepotPut(stack: *args->stack); |
34 | thread = args->thread; |
35 | thread->set_context(this); |
36 | } |
37 | |
38 | void AsanThreadContext::OnFinished() { |
39 | // Drop the link to the AsanThread object. |
40 | thread = nullptr; |
41 | } |
42 | |
43 | static ThreadRegistry *asan_thread_registry; |
44 | static ThreadArgRetval *thread_data; |
45 | |
46 | static Mutex mu_for_thread_context; |
47 | // TODO(leonardchan@): It should be possible to make LowLevelAllocator |
48 | // threadsafe and consolidate this one into the GlobalLoweLevelAllocator. |
49 | // We should be able to do something similar to what's in |
50 | // sanitizer_stack_store.cpp. |
51 | static LowLevelAllocator allocator_for_thread_context; |
52 | |
53 | static ThreadContextBase *GetAsanThreadContext(u32 tid) { |
54 | Lock lock(&mu_for_thread_context); |
55 | return new (allocator_for_thread_context) AsanThreadContext(tid); |
56 | } |
57 | |
58 | static void InitThreads() { |
59 | static bool initialized; |
60 | // Don't worry about thread_safety - this should be called when there is |
61 | // a single thread. |
62 | if (LIKELY(initialized)) |
63 | return; |
64 | // Never reuse ASan threads: we store pointer to AsanThreadContext |
65 | // in TSD and can't reliably tell when no more TSD destructors will |
66 | // be called. It would be wrong to reuse AsanThreadContext for another |
67 | // thread before all TSD destructors will be called for it. |
68 | |
69 | // MIPS requires aligned address |
70 | static ALIGNED(alignof( |
71 | ThreadRegistry)) char thread_registry_placeholder[sizeof(ThreadRegistry)]; |
72 | static ALIGNED(alignof( |
73 | ThreadArgRetval)) char thread_data_placeholder[sizeof(ThreadArgRetval)]; |
74 | |
75 | asan_thread_registry = |
76 | new (thread_registry_placeholder) ThreadRegistry(GetAsanThreadContext); |
77 | thread_data = new (thread_data_placeholder) ThreadArgRetval(); |
78 | initialized = true; |
79 | } |
80 | |
81 | ThreadRegistry &asanThreadRegistry() { |
82 | InitThreads(); |
83 | return *asan_thread_registry; |
84 | } |
85 | |
86 | ThreadArgRetval &asanThreadArgRetval() { |
87 | InitThreads(); |
88 | return *thread_data; |
89 | } |
90 | |
91 | AsanThreadContext *GetThreadContextByTidLocked(u32 tid) { |
92 | return static_cast<AsanThreadContext *>( |
93 | asanThreadRegistry().GetThreadLocked(tid)); |
94 | } |
95 | |
96 | // AsanThread implementation. |
97 | |
98 | AsanThread *AsanThread::Create(const void *start_data, uptr data_size, |
99 | u32 parent_tid, StackTrace *stack, |
100 | bool detached) { |
101 | uptr PageSize = GetPageSizeCached(); |
102 | uptr size = RoundUpTo(size: sizeof(AsanThread), boundary: PageSize); |
103 | AsanThread *thread = (AsanThread *)MmapOrDie(size, mem_type: __func__); |
104 | if (data_size) { |
105 | uptr availible_size = (uptr)thread + size - (uptr)(thread->start_data_); |
106 | CHECK_LE(data_size, availible_size); |
107 | internal_memcpy(dest: thread->start_data_, src: start_data, n: data_size); |
108 | } |
109 | AsanThreadContext::CreateThreadContextArgs args = {.thread: thread, .stack: stack}; |
110 | asanThreadRegistry().CreateThread(user_id: 0, detached, parent_tid, arg: &args); |
111 | |
112 | return thread; |
113 | } |
114 | |
115 | void AsanThread::GetStartData(void *out, uptr out_size) const { |
116 | internal_memcpy(dest: out, src: start_data_, n: out_size); |
117 | } |
118 | |
119 | void AsanThread::TSDDtor(void *tsd) { |
120 | AsanThreadContext *context = (AsanThreadContext *)tsd; |
121 | VReport(1, "T%d TSDDtor\n" , context->tid); |
122 | if (context->thread) |
123 | context->thread->Destroy(); |
124 | } |
125 | |
126 | void AsanThread::Destroy() { |
127 | int tid = this->tid(); |
128 | VReport(1, "T%d exited\n" , tid); |
129 | |
130 | bool was_running = |
131 | (asanThreadRegistry().FinishThread(tid) == ThreadStatusRunning); |
132 | if (was_running) { |
133 | if (AsanThread *thread = GetCurrentThread()) |
134 | CHECK_EQ(this, thread); |
135 | malloc_storage().CommitBack(); |
136 | if (common_flags()->use_sigaltstack) |
137 | UnsetAlternateSignalStack(); |
138 | FlushToDeadThreadStats(stats: &stats_); |
139 | // We also clear the shadow on thread destruction because |
140 | // some code may still be executing in later TSD destructors |
141 | // and we don't want it to have any poisoned stack. |
142 | ClearShadowForThreadStackAndTLS(); |
143 | DeleteFakeStack(tid); |
144 | } else { |
145 | CHECK_NE(this, GetCurrentThread()); |
146 | } |
147 | uptr size = RoundUpTo(size: sizeof(AsanThread), boundary: GetPageSizeCached()); |
148 | UnmapOrDie(addr: this, size); |
149 | if (was_running) |
150 | DTLS_Destroy(); |
151 | } |
152 | |
153 | void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, |
154 | uptr size) { |
155 | if (atomic_load(a: &stack_switching_, mo: memory_order_relaxed)) { |
156 | Report(format: "ERROR: starting fiber switch while in fiber switch\n" ); |
157 | Die(); |
158 | } |
159 | |
160 | next_stack_bottom_ = bottom; |
161 | next_stack_top_ = bottom + size; |
162 | atomic_store(a: &stack_switching_, v: 1, mo: memory_order_release); |
163 | |
164 | FakeStack *current_fake_stack = fake_stack_; |
165 | if (fake_stack_save) |
166 | *fake_stack_save = fake_stack_; |
167 | fake_stack_ = nullptr; |
168 | SetTLSFakeStack(nullptr); |
169 | // if fake_stack_save is null, the fiber will die, delete the fakestack |
170 | if (!fake_stack_save && current_fake_stack) |
171 | current_fake_stack->Destroy(tid: this->tid()); |
172 | } |
173 | |
174 | void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old, |
175 | uptr *size_old) { |
176 | if (!atomic_load(a: &stack_switching_, mo: memory_order_relaxed)) { |
177 | Report(format: "ERROR: finishing a fiber switch that has not started\n" ); |
178 | Die(); |
179 | } |
180 | |
181 | if (fake_stack_save) { |
182 | SetTLSFakeStack(fake_stack_save); |
183 | fake_stack_ = fake_stack_save; |
184 | } |
185 | |
186 | if (bottom_old) |
187 | *bottom_old = stack_bottom_; |
188 | if (size_old) |
189 | *size_old = stack_top_ - stack_bottom_; |
190 | stack_bottom_ = next_stack_bottom_; |
191 | stack_top_ = next_stack_top_; |
192 | atomic_store(a: &stack_switching_, v: 0, mo: memory_order_release); |
193 | next_stack_top_ = 0; |
194 | next_stack_bottom_ = 0; |
195 | } |
196 | |
197 | inline AsanThread::StackBounds AsanThread::GetStackBounds() const { |
198 | if (!atomic_load(a: &stack_switching_, mo: memory_order_acquire)) { |
199 | // Make sure the stack bounds are fully initialized. |
200 | if (stack_bottom_ >= stack_top_) |
201 | return {.bottom: 0, .top: 0}; |
202 | return {.bottom: stack_bottom_, .top: stack_top_}; |
203 | } |
204 | char local; |
205 | const uptr cur_stack = (uptr)&local; |
206 | // Note: need to check next stack first, because FinishSwitchFiber |
207 | // may be in process of overwriting stack_top_/bottom_. But in such case |
208 | // we are already on the next stack. |
209 | if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_) |
210 | return {.bottom: next_stack_bottom_, .top: next_stack_top_}; |
211 | return {.bottom: stack_bottom_, .top: stack_top_}; |
212 | } |
213 | |
214 | uptr AsanThread::stack_top() { return GetStackBounds().top; } |
215 | |
216 | uptr AsanThread::stack_bottom() { return GetStackBounds().bottom; } |
217 | |
218 | uptr AsanThread::stack_size() { |
219 | const auto bounds = GetStackBounds(); |
220 | return bounds.top - bounds.bottom; |
221 | } |
222 | |
223 | // We want to create the FakeStack lazily on the first use, but not earlier |
224 | // than the stack size is known and the procedure has to be async-signal safe. |
225 | FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() { |
226 | uptr stack_size = this->stack_size(); |
227 | if (stack_size == 0) // stack_size is not yet available, don't use FakeStack. |
228 | return nullptr; |
229 | uptr old_val = 0; |
230 | // fake_stack_ has 3 states: |
231 | // 0 -- not initialized |
232 | // 1 -- being initialized |
233 | // ptr -- initialized |
234 | // This CAS checks if the state was 0 and if so changes it to state 1, |
235 | // if that was successful, it initializes the pointer. |
236 | if (atomic_compare_exchange_strong( |
237 | a: reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), cmp: &old_val, xchg: 1UL, |
238 | mo: memory_order_relaxed)) { |
239 | uptr stack_size_log = Log2(x: RoundUpToPowerOfTwo(size: stack_size)); |
240 | CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log); |
241 | stack_size_log = |
242 | Min(a: stack_size_log, b: static_cast<uptr>(flags()->max_uar_stack_size_log)); |
243 | stack_size_log = |
244 | Max(a: stack_size_log, b: static_cast<uptr>(flags()->min_uar_stack_size_log)); |
245 | fake_stack_ = FakeStack::Create(stack_size_log); |
246 | DCHECK_EQ(GetCurrentThread(), this); |
247 | SetTLSFakeStack(fake_stack_); |
248 | return fake_stack_; |
249 | } |
250 | return nullptr; |
251 | } |
252 | |
253 | void AsanThread::Init(const InitOptions *options) { |
254 | DCHECK_NE(tid(), kInvalidTid); |
255 | next_stack_top_ = next_stack_bottom_ = 0; |
256 | atomic_store(a: &stack_switching_, v: false, mo: memory_order_release); |
257 | CHECK_EQ(this->stack_size(), 0U); |
258 | SetThreadStackAndTls(options); |
259 | if (stack_top_ != stack_bottom_) { |
260 | CHECK_GT(this->stack_size(), 0U); |
261 | CHECK(AddrIsInMem(stack_bottom_)); |
262 | CHECK(AddrIsInMem(stack_top_ - 1)); |
263 | } |
264 | ClearShadowForThreadStackAndTLS(); |
265 | fake_stack_ = nullptr; |
266 | if (__asan_option_detect_stack_use_after_return && |
267 | tid() == GetCurrentTidOrInvalid()) { |
268 | // AsyncSignalSafeLazyInitFakeStack makes use of threadlocals and must be |
269 | // called from the context of the thread it is initializing, not its parent. |
270 | // Most platforms call AsanThread::Init on the newly-spawned thread, but |
271 | // Fuchsia calls this function from the parent thread. To support that |
272 | // approach, we avoid calling AsyncSignalSafeLazyInitFakeStack here; it will |
273 | // be called by the new thread when it first attempts to access the fake |
274 | // stack. |
275 | AsyncSignalSafeLazyInitFakeStack(); |
276 | } |
277 | int local = 0; |
278 | VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n" , tid(), |
279 | (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_, |
280 | (void *)&local); |
281 | } |
282 | |
283 | // Fuchsia doesn't use ThreadStart. |
284 | // asan_fuchsia.c definies CreateMainThread and SetThreadStackAndTls. |
285 | #if !SANITIZER_FUCHSIA |
286 | |
287 | void AsanThread::ThreadStart(tid_t os_id) { |
288 | Init(); |
289 | asanThreadRegistry().StartThread(tid: tid(), os_id, thread_type: ThreadType::Regular, arg: nullptr); |
290 | |
291 | if (common_flags()->use_sigaltstack) |
292 | SetAlternateSignalStack(); |
293 | } |
294 | |
295 | AsanThread *CreateMainThread() { |
296 | AsanThread *main_thread = AsanThread::Create( |
297 | /* parent_tid */ kMainTid, |
298 | /* stack */ nullptr, /* detached */ true); |
299 | SetCurrentThread(main_thread); |
300 | main_thread->ThreadStart(os_id: internal_getpid()); |
301 | return main_thread; |
302 | } |
303 | |
304 | // This implementation doesn't use the argument, which is just passed down |
305 | // from the caller of Init (which see, above). It's only there to support |
306 | // OS-specific implementations that need more information passed through. |
307 | void AsanThread::SetThreadStackAndTls(const InitOptions *options) { |
308 | DCHECK_EQ(options, nullptr); |
309 | uptr tls_size = 0; |
310 | uptr stack_size = 0; |
311 | GetThreadStackAndTls(main: tid() == kMainTid, stk_addr: &stack_bottom_, stk_size: &stack_size, |
312 | tls_addr: &tls_begin_, tls_size: &tls_size); |
313 | stack_top_ = RoundDownTo(x: stack_bottom_ + stack_size, ASAN_SHADOW_GRANULARITY); |
314 | stack_bottom_ = RoundDownTo(x: stack_bottom_, ASAN_SHADOW_GRANULARITY); |
315 | tls_end_ = tls_begin_ + tls_size; |
316 | dtls_ = DTLS_Get(); |
317 | |
318 | if (stack_top_ != stack_bottom_) { |
319 | int local; |
320 | CHECK(AddrIsInStack((uptr)&local)); |
321 | } |
322 | } |
323 | |
324 | #endif // !SANITIZER_FUCHSIA |
325 | |
326 | void AsanThread::ClearShadowForThreadStackAndTLS() { |
327 | if (stack_top_ != stack_bottom_) |
328 | PoisonShadow(addr: stack_bottom_, size: stack_top_ - stack_bottom_, value: 0); |
329 | if (tls_begin_ != tls_end_) { |
330 | uptr tls_begin_aligned = RoundDownTo(x: tls_begin_, ASAN_SHADOW_GRANULARITY); |
331 | uptr tls_end_aligned = RoundUpTo(size: tls_end_, ASAN_SHADOW_GRANULARITY); |
332 | FastPoisonShadow(aligned_beg: tls_begin_aligned, aligned_size: tls_end_aligned - tls_begin_aligned, value: 0); |
333 | } |
334 | } |
335 | |
336 | bool AsanThread::GetStackFrameAccessByAddr(uptr addr, |
337 | StackFrameAccess *access) { |
338 | if (stack_top_ == stack_bottom_) |
339 | return false; |
340 | |
341 | uptr bottom = 0; |
342 | if (AddrIsInStack(addr)) { |
343 | bottom = stack_bottom(); |
344 | } else if (FakeStack *fake_stack = get_fake_stack()) { |
345 | bottom = fake_stack->AddrIsInFakeStack(addr); |
346 | CHECK(bottom); |
347 | access->offset = addr - bottom; |
348 | access->frame_pc = ((uptr *)bottom)[2]; |
349 | access->frame_descr = (const char *)((uptr *)bottom)[1]; |
350 | return true; |
351 | } |
352 | uptr aligned_addr = RoundDownTo(x: addr, SANITIZER_WORDSIZE / 8); // align addr. |
353 | uptr mem_ptr = RoundDownTo(x: aligned_addr, ASAN_SHADOW_GRANULARITY); |
354 | u8 *shadow_ptr = (u8 *)MemToShadow(p: aligned_addr); |
355 | u8 *shadow_bottom = (u8 *)MemToShadow(p: bottom); |
356 | |
357 | while (shadow_ptr >= shadow_bottom && |
358 | *shadow_ptr != kAsanStackLeftRedzoneMagic) { |
359 | shadow_ptr--; |
360 | mem_ptr -= ASAN_SHADOW_GRANULARITY; |
361 | } |
362 | |
363 | while (shadow_ptr >= shadow_bottom && |
364 | *shadow_ptr == kAsanStackLeftRedzoneMagic) { |
365 | shadow_ptr--; |
366 | mem_ptr -= ASAN_SHADOW_GRANULARITY; |
367 | } |
368 | |
369 | if (shadow_ptr < shadow_bottom) { |
370 | return false; |
371 | } |
372 | |
373 | uptr *ptr = (uptr *)(mem_ptr + ASAN_SHADOW_GRANULARITY); |
374 | CHECK(ptr[0] == kCurrentStackFrameMagic); |
375 | access->offset = addr - (uptr)ptr; |
376 | access->frame_pc = ptr[2]; |
377 | access->frame_descr = (const char *)ptr[1]; |
378 | return true; |
379 | } |
380 | |
381 | uptr AsanThread::GetStackVariableShadowStart(uptr addr) { |
382 | uptr bottom = 0; |
383 | if (AddrIsInStack(addr)) { |
384 | bottom = stack_bottom(); |
385 | } else if (FakeStack *fake_stack = get_fake_stack()) { |
386 | bottom = fake_stack->AddrIsInFakeStack(addr); |
387 | if (bottom == 0) { |
388 | return 0; |
389 | } |
390 | } else { |
391 | return 0; |
392 | } |
393 | |
394 | uptr aligned_addr = RoundDownTo(x: addr, SANITIZER_WORDSIZE / 8); // align addr. |
395 | u8 *shadow_ptr = (u8 *)MemToShadow(p: aligned_addr); |
396 | u8 *shadow_bottom = (u8 *)MemToShadow(p: bottom); |
397 | |
398 | while (shadow_ptr >= shadow_bottom && |
399 | (*shadow_ptr != kAsanStackLeftRedzoneMagic && |
400 | *shadow_ptr != kAsanStackMidRedzoneMagic && |
401 | *shadow_ptr != kAsanStackRightRedzoneMagic)) |
402 | shadow_ptr--; |
403 | |
404 | return (uptr)shadow_ptr + 1; |
405 | } |
406 | |
407 | bool AsanThread::AddrIsInStack(uptr addr) { |
408 | const auto bounds = GetStackBounds(); |
409 | return addr >= bounds.bottom && addr < bounds.top; |
410 | } |
411 | |
412 | static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base, |
413 | void *addr) { |
414 | AsanThreadContext *tctx = static_cast<AsanThreadContext *>(tctx_base); |
415 | AsanThread *t = tctx->thread; |
416 | if (!t) |
417 | return false; |
418 | if (t->AddrIsInStack(addr: (uptr)addr)) |
419 | return true; |
420 | FakeStack *fake_stack = t->get_fake_stack(); |
421 | if (!fake_stack) |
422 | return false; |
423 | return fake_stack->AddrIsInFakeStack(addr: (uptr)addr); |
424 | } |
425 | |
426 | AsanThread *GetCurrentThread() { |
427 | AsanThreadContext *context = |
428 | reinterpret_cast<AsanThreadContext *>(AsanTSDGet()); |
429 | if (!context) { |
430 | if (SANITIZER_ANDROID) { |
431 | // On Android, libc constructor is called _after_ asan_init, and cleans up |
432 | // TSD. Try to figure out if this is still the main thread by the stack |
433 | // address. We are not entirely sure that we have correct main thread |
434 | // limits, so only do this magic on Android, and only if the found thread |
435 | // is the main thread. |
436 | AsanThreadContext *tctx = GetThreadContextByTidLocked(tid: kMainTid); |
437 | if (tctx && ThreadStackContainsAddress(tctx_base: tctx, addr: &context)) { |
438 | SetCurrentThread(tctx->thread); |
439 | return tctx->thread; |
440 | } |
441 | } |
442 | return nullptr; |
443 | } |
444 | return context->thread; |
445 | } |
446 | |
447 | void SetCurrentThread(AsanThread *t) { |
448 | CHECK(t->context()); |
449 | VReport(2, "SetCurrentThread: %p for thread %p\n" , (void *)t->context(), |
450 | (void *)GetThreadSelf()); |
451 | // Make sure we do not reset the current AsanThread. |
452 | CHECK_EQ(0, AsanTSDGet()); |
453 | AsanTSDSet(tsd: t->context()); |
454 | CHECK_EQ(t->context(), AsanTSDGet()); |
455 | } |
456 | |
457 | u32 GetCurrentTidOrInvalid() { |
458 | AsanThread *t = GetCurrentThread(); |
459 | return t ? t->tid() : kInvalidTid; |
460 | } |
461 | |
462 | AsanThread *FindThreadByStackAddress(uptr addr) { |
463 | asanThreadRegistry().CheckLocked(); |
464 | AsanThreadContext *tctx = static_cast<AsanThreadContext *>( |
465 | asanThreadRegistry().FindThreadContextLocked(cb: ThreadStackContainsAddress, |
466 | arg: (void *)addr)); |
467 | return tctx ? tctx->thread : nullptr; |
468 | } |
469 | |
470 | void EnsureMainThreadIDIsCorrect() { |
471 | AsanThreadContext *context = |
472 | reinterpret_cast<AsanThreadContext *>(AsanTSDGet()); |
473 | if (context && (context->tid == kMainTid)) |
474 | context->os_id = GetTid(); |
475 | } |
476 | |
477 | __asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) { |
478 | __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>( |
479 | __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id)); |
480 | if (!context) |
481 | return nullptr; |
482 | return context->thread; |
483 | } |
484 | } // namespace __asan |
485 | |
486 | // --- Implementation of LSan-specific functions --- {{{1 |
487 | namespace __lsan { |
488 | void LockThreads() { |
489 | __asan::asanThreadRegistry().Lock(); |
490 | __asan::asanThreadArgRetval().Lock(); |
491 | } |
492 | |
493 | void UnlockThreads() { |
494 | __asan::asanThreadArgRetval().Unlock(); |
495 | __asan::asanThreadRegistry().Unlock(); |
496 | } |
497 | |
498 | static ThreadRegistry *GetAsanThreadRegistryLocked() { |
499 | __asan::asanThreadRegistry().CheckLocked(); |
500 | return &__asan::asanThreadRegistry(); |
501 | } |
502 | |
503 | void EnsureMainThreadIDIsCorrect() { __asan::EnsureMainThreadIDIsCorrect(); } |
504 | |
505 | bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, |
506 | uptr *tls_begin, uptr *tls_end, uptr *cache_begin, |
507 | uptr *cache_end, DTLS **dtls) { |
508 | __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); |
509 | if (!t) |
510 | return false; |
511 | *stack_begin = t->stack_bottom(); |
512 | *stack_end = t->stack_top(); |
513 | *tls_begin = t->tls_begin(); |
514 | *tls_end = t->tls_end(); |
515 | // ASan doesn't keep allocator caches in TLS, so these are unused. |
516 | *cache_begin = 0; |
517 | *cache_end = 0; |
518 | *dtls = t->dtls(); |
519 | return true; |
520 | } |
521 | |
522 | void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {} |
523 | |
524 | void (tid_t os_id, |
525 | InternalMmapVector<Range> *ranges) { |
526 | __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); |
527 | if (!t) |
528 | return; |
529 | __asan::FakeStack *fake_stack = t->get_fake_stack(); |
530 | if (!fake_stack) |
531 | return; |
532 | |
533 | fake_stack->ForEachFakeFrame( |
534 | callback: [](uptr begin, uptr end, void *arg) { |
535 | reinterpret_cast<InternalMmapVector<Range> *>(arg)->push_back( |
536 | element: {.begin: begin, .end: end}); |
537 | }, |
538 | arg: ranges); |
539 | } |
540 | |
541 | void (InternalMmapVector<Range> *ranges) { |
542 | GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked( |
543 | cb: [](ThreadContextBase *tctx, void *arg) { |
544 | GetThreadExtraStackRangesLocked( |
545 | os_id: tctx->os_id, ranges: reinterpret_cast<InternalMmapVector<Range> *>(arg)); |
546 | }, |
547 | arg: ranges); |
548 | } |
549 | |
550 | void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) { |
551 | __asan::asanThreadArgRetval().GetAllPtrsLocked(ptrs); |
552 | } |
553 | |
554 | void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) { |
555 | GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked( |
556 | cb: [](ThreadContextBase *tctx, void *threads) { |
557 | if (tctx->status == ThreadStatusRunning) |
558 | reinterpret_cast<InternalMmapVector<tid_t> *>(threads)->push_back( |
559 | element: tctx->os_id); |
560 | }, |
561 | arg: threads); |
562 | } |
563 | |
564 | } // namespace __lsan |
565 | |
566 | // ---------------------- Interface ---------------- {{{1 |
567 | using namespace __asan; |
568 | |
569 | extern "C" { |
570 | SANITIZER_INTERFACE_ATTRIBUTE |
571 | void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom, |
572 | uptr size) { |
573 | AsanThread *t = GetCurrentThread(); |
574 | if (!t) { |
575 | VReport(1, "__asan_start_switch_fiber called from unknown thread\n" ); |
576 | return; |
577 | } |
578 | t->StartSwitchFiber(fake_stack_save: (FakeStack **)fakestacksave, bottom: (uptr)bottom, size); |
579 | } |
580 | |
581 | SANITIZER_INTERFACE_ATTRIBUTE |
582 | void __sanitizer_finish_switch_fiber(void *fakestack, const void **bottom_old, |
583 | uptr *size_old) { |
584 | AsanThread *t = GetCurrentThread(); |
585 | if (!t) { |
586 | VReport(1, "__asan_finish_switch_fiber called from unknown thread\n" ); |
587 | return; |
588 | } |
589 | t->FinishSwitchFiber(fake_stack_save: (FakeStack *)fakestack, bottom_old: (uptr *)bottom_old, |
590 | size_old: (uptr *)size_old); |
591 | } |
592 | } |
593 | |