1//===-- tsan_rtl_thread.cpp -----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11//===----------------------------------------------------------------------===//
12
13#include "sanitizer_common/sanitizer_placement_new.h"
14#include "tsan_rtl.h"
15#include "tsan_mman.h"
16#include "tsan_platform.h"
17#include "tsan_report.h"
18#include "tsan_sync.h"
19
20namespace __tsan {
21
22// ThreadContext implementation.
23
24ThreadContext::ThreadContext(Tid tid) : ThreadContextBase(tid), thr(), sync() {}
25
26#if !SANITIZER_GO
27ThreadContext::~ThreadContext() {
28}
29#endif
30
31void ThreadContext::OnReset() { CHECK(!sync); }
32
33#if !SANITIZER_GO
34struct ThreadLeak {
35 ThreadContext *tctx;
36 int count;
37};
38
39static void CollectThreadLeaks(ThreadContextBase *tctx_base, void *arg) {
40 auto &leaks = *static_cast<Vector<ThreadLeak> *>(arg);
41 auto *tctx = static_cast<ThreadContext *>(tctx_base);
42 if (tctx->detached || tctx->status != ThreadStatusFinished)
43 return;
44 for (uptr i = 0; i < leaks.Size(); i++) {
45 if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) {
46 leaks[i].count++;
47 return;
48 }
49 }
50 leaks.PushBack(v: {.tctx: tctx, .count: 1});
51}
52#endif
53
54// Disabled on Mac because lldb test TestTsanBasic fails:
55// https://reviews.llvm.org/D112603#3163158
56#if !SANITIZER_GO && !SANITIZER_APPLE
57static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
58 if (tctx->tid == kMainTid) {
59 Printf(format: "ThreadSanitizer: main thread finished with ignores enabled\n");
60 } else {
61 Printf(format: "ThreadSanitizer: thread T%d %s finished with ignores enabled,"
62 " created at:\n", tctx->tid, tctx->name);
63 PrintStack(stack: SymbolizeStackId(stack_id: tctx->creation_stack_id));
64 }
65 Printf(format: " One of the following ignores was not ended"
66 " (in order of probability)\n");
67 for (uptr i = 0; i < set->Size(); i++) {
68 Printf(format: " Ignore was enabled at:\n");
69 PrintStack(stack: SymbolizeStackId(stack_id: set->At(i)));
70 }
71 Die();
72}
73
74static void ThreadCheckIgnore(ThreadState *thr) {
75 if (ctx->after_multithreaded_fork)
76 return;
77 if (thr->ignore_reads_and_writes)
78 ReportIgnoresEnabled(tctx: thr->tctx, set: &thr->mop_ignore_set);
79 if (thr->ignore_sync)
80 ReportIgnoresEnabled(tctx: thr->tctx, set: &thr->sync_ignore_set);
81}
82#else
83static void ThreadCheckIgnore(ThreadState *thr) {}
84#endif
85
86void ThreadFinalize(ThreadState *thr) {
87 ThreadCheckIgnore(thr);
88#if !SANITIZER_GO
89 if (!ShouldReport(thr, typ: ReportTypeThreadLeak))
90 return;
91 ThreadRegistryLock l(&ctx->thread_registry);
92 Vector<ThreadLeak> leaks;
93 ctx->thread_registry.RunCallbackForEachThreadLocked(cb: CollectThreadLeaks,
94 arg: &leaks);
95 for (uptr i = 0; i < leaks.Size(); i++) {
96 ScopedReport rep(ReportTypeThreadLeak);
97 rep.AddThread(tctx: leaks[i].tctx, suppressable: true);
98 rep.SetCount(leaks[i].count);
99 OutputReport(thr, srep: rep);
100 }
101#endif
102}
103
104int ThreadCount(ThreadState *thr) {
105 uptr result;
106 ctx->thread_registry.GetNumberOfThreads(total: 0, running: 0, alive: &result);
107 return (int)result;
108}
109
110struct OnCreatedArgs {
111 VectorClock *sync;
112 uptr sync_epoch;
113 StackID stack;
114};
115
116Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
117 // The main thread and GCD workers don't have a parent thread.
118 Tid parent = kInvalidTid;
119 OnCreatedArgs arg = {.sync: nullptr, .sync_epoch: 0, .stack: kInvalidStackID};
120 if (thr) {
121 parent = thr->tid;
122 arg.stack = CurrentStackId(thr, pc);
123 if (!thr->ignore_sync) {
124 SlotLocker locker(thr);
125 thr->clock.ReleaseStore(dstp: &arg.sync);
126 arg.sync_epoch = ctx->global_epoch;
127 IncrementEpoch(thr);
128 }
129 }
130 Tid tid = ctx->thread_registry.CreateThread(user_id: uid, detached, parent_tid: parent, arg: &arg);
131 DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent, tid, uid);
132 return tid;
133}
134
135void ThreadContext::OnCreated(void *arg) {
136 OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
137 sync = args->sync;
138 sync_epoch = args->sync_epoch;
139 creation_stack_id = args->stack;
140}
141
142extern "C" void __tsan_stack_initialization() {}
143
144struct OnStartedArgs {
145 ThreadState *thr;
146 uptr stk_addr;
147 uptr stk_size;
148 uptr tls_addr;
149 uptr tls_size;
150};
151
152void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
153 ThreadType thread_type) {
154 ctx->thread_registry.StartThread(tid, os_id, thread_type, arg: thr);
155 if (!thr->ignore_sync) {
156 SlotAttachAndLock(thr);
157 if (thr->tctx->sync_epoch == ctx->global_epoch)
158 thr->clock.Acquire(src: thr->tctx->sync);
159 SlotUnlock(thr);
160 }
161 Free(p&: thr->tctx->sync);
162
163#if !SANITIZER_GO
164 thr->is_inited = true;
165#endif
166
167 uptr stk_addr = 0;
168 uptr stk_end = 0;
169 uptr tls_addr = 0;
170 uptr tls_end = 0;
171#if !SANITIZER_GO
172 if (thread_type != ThreadType::Fiber)
173 GetThreadStackAndTls(main: tid == kMainTid, stk_begin: &stk_addr, stk_end: &stk_end, tls_begin: &tls_addr,
174 tls_end: &tls_end);
175#endif
176 uptr stk_size = stk_end - stk_addr;
177 uptr tls_size = tls_end - tls_addr;
178 thr->stk_addr = stk_addr;
179 thr->stk_size = stk_size;
180 thr->tls_addr = tls_addr;
181 thr->tls_size = tls_size;
182
183#if !SANITIZER_GO
184 if (ctx->after_multithreaded_fork) {
185 thr->ignore_interceptors++;
186 ThreadIgnoreBegin(thr, pc: 0);
187 ThreadIgnoreSyncBegin(thr, pc: 0);
188 }
189#endif
190
191#if !SANITIZER_GO
192 // Don't imitate stack/TLS writes for the main thread,
193 // because its initialization is synchronized with all
194 // subsequent threads anyway.
195 if (tid != kMainTid) {
196 if (stk_addr && stk_size) {
197 const uptr pc = StackTrace::GetNextInstructionPc(
198 pc: reinterpret_cast<uptr>(__tsan_stack_initialization));
199 MemoryRangeImitateWrite(thr, pc, addr: stk_addr, size: stk_size);
200 }
201
202 if (tls_addr && tls_size)
203 ImitateTlsWrite(thr, tls_addr, tls_size);
204 }
205#endif
206}
207
208void ThreadContext::OnStarted(void *arg) {
209 DPrintf("#%d: ThreadStart\n", tid);
210 thr = new (arg) ThreadState(tid);
211 if (common_flags()->detect_deadlocks)
212 thr->dd_lt = ctx->dd->CreateLogicalThread(ctx: tid);
213 thr->tctx = this;
214}
215
216void ThreadFinish(ThreadState *thr) {
217 DPrintf("#%d: ThreadFinish\n", thr->tid);
218 ThreadCheckIgnore(thr);
219 if (thr->stk_addr && thr->stk_size)
220 DontNeedShadowFor(addr: thr->stk_addr, size: thr->stk_size);
221 if (thr->tls_addr && thr->tls_size)
222 DontNeedShadowFor(addr: thr->tls_addr, size: thr->tls_size);
223 thr->is_dead = true;
224#if !SANITIZER_GO
225 thr->is_inited = false;
226 thr->ignore_interceptors++;
227 PlatformCleanUpThreadState(thr);
228#endif
229 if (!thr->ignore_sync) {
230 SlotLocker locker(thr);
231 ThreadRegistryLock lock(&ctx->thread_registry);
232 // Note: detached is protected by the thread registry mutex,
233 // the thread may be detaching concurrently in another thread.
234 if (!thr->tctx->detached) {
235 thr->clock.ReleaseStore(dstp: &thr->tctx->sync);
236 thr->tctx->sync_epoch = ctx->global_epoch;
237 IncrementEpoch(thr);
238 }
239 }
240#if !SANITIZER_GO
241 UnmapOrDie(addr: thr->shadow_stack, size: kShadowStackSize * sizeof(uptr));
242#else
243 Free(thr->shadow_stack);
244#endif
245 thr->shadow_stack = nullptr;
246 thr->shadow_stack_pos = nullptr;
247 thr->shadow_stack_end = nullptr;
248 if (common_flags()->detect_deadlocks)
249 ctx->dd->DestroyLogicalThread(lt: thr->dd_lt);
250 SlotDetach(thr);
251 ctx->thread_registry.FinishThread(tid: thr->tid);
252 thr->~ThreadState();
253}
254
255void ThreadContext::OnFinished() {
256 Lock lock(&ctx->slot_mtx);
257 Lock lock1(&trace.mtx);
258 // Queue all trace parts into the global recycle queue.
259 auto parts = &trace.parts;
260 while (trace.local_head) {
261 CHECK(parts->Queued(trace.local_head));
262 ctx->trace_part_recycle.PushBack(e: trace.local_head);
263 trace.local_head = parts->Next(e: trace.local_head);
264 }
265 ctx->trace_part_recycle_finished += parts->Size();
266 if (ctx->trace_part_recycle_finished > Trace::kFinishedThreadHi) {
267 ctx->trace_part_finished_excess += parts->Size();
268 trace.parts_allocated = 0;
269 } else if (ctx->trace_part_recycle_finished > Trace::kFinishedThreadLo &&
270 parts->Size() > 1) {
271 ctx->trace_part_finished_excess += parts->Size() - 1;
272 trace.parts_allocated = 1;
273 }
274 // From now on replay will use trace->final_pos.
275 trace.final_pos = (Event *)atomic_load_relaxed(a: &thr->trace_pos);
276 atomic_store_relaxed(a: &thr->trace_pos, v: 0);
277 thr->tctx = nullptr;
278 thr = nullptr;
279}
280
281struct ConsumeThreadContext {
282 uptr uid;
283 ThreadContextBase *tctx;
284};
285
286Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
287 return ctx->thread_registry.ConsumeThreadUserId(user_id: uid);
288}
289
290struct JoinArg {
291 VectorClock *sync;
292 uptr sync_epoch;
293};
294
295void ThreadJoin(ThreadState *thr, uptr pc, Tid tid) {
296 CHECK_GT(tid, 0);
297 DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
298 JoinArg arg = {};
299 ctx->thread_registry.JoinThread(tid, arg: &arg);
300 if (!thr->ignore_sync) {
301 SlotLocker locker(thr);
302 if (arg.sync_epoch == ctx->global_epoch)
303 thr->clock.Acquire(src: arg.sync);
304 }
305 Free(p&: arg.sync);
306}
307
308void ThreadContext::OnJoined(void *ptr) {
309 auto arg = static_cast<JoinArg *>(ptr);
310 arg->sync = sync;
311 arg->sync_epoch = sync_epoch;
312 sync = nullptr;
313 sync_epoch = 0;
314}
315
316void ThreadContext::OnDead() { CHECK_EQ(sync, nullptr); }
317
318void ThreadDetach(ThreadState *thr, uptr pc, Tid tid) {
319 CHECK_GT(tid, 0);
320 ctx->thread_registry.DetachThread(tid, arg: thr);
321}
322
323void ThreadContext::OnDetached(void *arg) { Free(p&: sync); }
324
325void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid) {
326 CHECK_GT(tid, 0);
327 ctx->thread_registry.SetThreadUserId(tid, user_id: uid);
328}
329
330void ThreadSetName(ThreadState *thr, const char *name) {
331 ctx->thread_registry.SetThreadName(tid: thr->tid, name);
332}
333
334#if !SANITIZER_GO
335void FiberSwitchImpl(ThreadState *from, ThreadState *to) {
336 Processor *proc = from->proc();
337 ProcUnwire(proc, thr: from);
338 ProcWire(proc, thr: to);
339 set_cur_thread(to);
340}
341
342ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags) {
343 void *mem = Alloc(sz: sizeof(ThreadState));
344 ThreadState *fiber = static_cast<ThreadState *>(mem);
345 internal_memset(s: fiber, c: 0, n: sizeof(*fiber));
346 Tid tid = ThreadCreate(thr, pc, uid: 0, detached: true);
347 FiberSwitchImpl(from: thr, to: fiber);
348 ThreadStart(thr: fiber, tid, os_id: 0, thread_type: ThreadType::Fiber);
349 FiberSwitchImpl(from: fiber, to: thr);
350 return fiber;
351}
352
353void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber) {
354 FiberSwitchImpl(from: thr, to: fiber);
355 ThreadFinish(thr: fiber);
356 FiberSwitchImpl(from: fiber, to: thr);
357 Free(p&: fiber);
358}
359
360void FiberSwitch(ThreadState *thr, uptr pc,
361 ThreadState *fiber, unsigned flags) {
362 if (!(flags & FiberSwitchFlagNoSync))
363 Release(thr, pc, addr: (uptr)fiber);
364 FiberSwitchImpl(from: thr, to: fiber);
365 if (!(flags & FiberSwitchFlagNoSync))
366 Acquire(thr: fiber, pc, addr: (uptr)fiber);
367}
368#endif
369
370} // namespace __tsan
371

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp