1 | //===-- sanitizer_stackdepot.cpp ------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is shared between AddressSanitizer and ThreadSanitizer |
10 | // run-time libraries. |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "sanitizer_stackdepot.h" |
14 | |
15 | #include "sanitizer_atomic.h" |
16 | #include "sanitizer_common.h" |
17 | #include "sanitizer_hash.h" |
18 | #include "sanitizer_mutex.h" |
19 | #include "sanitizer_stack_store.h" |
20 | #include "sanitizer_stackdepotbase.h" |
21 | |
22 | namespace __sanitizer { |
23 | |
24 | struct StackDepotNode { |
25 | using hash_type = u64; |
26 | hash_type stack_hash; |
27 | u32 link; |
28 | StackStore::Id store_id; |
29 | |
30 | static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20; |
31 | |
32 | typedef StackTrace args_type; |
33 | bool eq(hash_type hash, const args_type &args) const { |
34 | return hash == stack_hash; |
35 | } |
36 | static uptr allocated(); |
37 | static hash_type hash(const args_type &args) { |
38 | MurMur2Hash64Builder H(args.size * sizeof(uptr)); |
39 | for (uptr i = 0; i < args.size; i++) H.add(k: args.trace[i]); |
40 | H.add(k: args.tag); |
41 | return H.get(); |
42 | } |
43 | static bool is_valid(const args_type &args) { |
44 | return args.size > 0 && args.trace; |
45 | } |
46 | void store(u32 id, const args_type &args, hash_type hash); |
47 | args_type load(u32 id) const; |
48 | static StackDepotHandle get_handle(u32 id); |
49 | |
50 | typedef StackDepotHandle handle_type; |
51 | }; |
52 | |
53 | static StackStore stackStore; |
54 | |
55 | // FIXME(dvyukov): this single reserved bit is used in TSan. |
56 | typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog> |
57 | StackDepot; |
58 | static StackDepot theDepot; |
59 | // Keep mutable data out of frequently access nodes to improve caching |
60 | // efficiency. |
61 | static TwoLevelMap<atomic_uint32_t, StackDepot::kNodesSize1, |
62 | StackDepot::kNodesSize2> |
63 | useCounts; |
64 | |
65 | int StackDepotHandle::use_count() const { |
66 | return atomic_load_relaxed(a: &useCounts[id_]); |
67 | } |
68 | |
69 | void StackDepotHandle::inc_use_count_unsafe() { |
70 | atomic_fetch_add(a: &useCounts[id_], v: 1, mo: memory_order_relaxed); |
71 | } |
72 | |
73 | uptr StackDepotNode::allocated() { |
74 | return stackStore.Allocated() + useCounts.MemoryUsage(); |
75 | } |
76 | |
77 | static void CompressStackStore() { |
78 | u64 start = Verbosity() >= 1 ? MonotonicNanoTime() : 0; |
79 | uptr diff = stackStore.Pack(type: static_cast<StackStore::Compression>( |
80 | Abs(a: common_flags()->compress_stack_depot))); |
81 | if (!diff) |
82 | return; |
83 | if (Verbosity() >= 1) { |
84 | u64 finish = MonotonicNanoTime(); |
85 | uptr total_before = theDepot.GetStats().allocated + diff; |
86 | VPrintf(1, "%s: StackDepot released %zu KiB out of %zu KiB in %llu ms\n" , |
87 | SanitizerToolName, diff >> 10, total_before >> 10, |
88 | (finish - start) / 1000000); |
89 | } |
90 | } |
91 | |
92 | namespace { |
93 | |
94 | class CompressThread { |
95 | public: |
96 | constexpr CompressThread() = default; |
97 | void NewWorkNotify(); |
98 | void Stop(); |
99 | void LockAndStop() SANITIZER_NO_THREAD_SAFETY_ANALYSIS; |
100 | void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS; |
101 | |
102 | private: |
103 | enum class State { |
104 | NotStarted = 0, |
105 | Started, |
106 | Failed, |
107 | Stopped, |
108 | }; |
109 | |
110 | void Run(); |
111 | |
112 | bool WaitForWork() { |
113 | semaphore_.Wait(); |
114 | return atomic_load(a: &run_, mo: memory_order_acquire); |
115 | } |
116 | |
117 | Semaphore semaphore_ = {}; |
118 | StaticSpinMutex mutex_ = {}; |
119 | State state_ SANITIZER_GUARDED_BY(mutex_) = State::NotStarted; |
120 | void *thread_ SANITIZER_GUARDED_BY(mutex_) = nullptr; |
121 | atomic_uint8_t run_ = {}; |
122 | }; |
123 | |
124 | static CompressThread compress_thread; |
125 | |
126 | void CompressThread::NewWorkNotify() { |
127 | int compress = common_flags()->compress_stack_depot; |
128 | if (!compress) |
129 | return; |
130 | if (compress > 0 /* for testing or debugging */) { |
131 | SpinMutexLock l(&mutex_); |
132 | if (state_ == State::NotStarted) { |
133 | atomic_store(a: &run_, v: 1, mo: memory_order_release); |
134 | CHECK_EQ(nullptr, thread_); |
135 | thread_ = internal_start_thread( |
136 | func: [](void *arg) -> void * { |
137 | reinterpret_cast<CompressThread *>(arg)->Run(); |
138 | return nullptr; |
139 | }, |
140 | arg: this); |
141 | state_ = thread_ ? State::Started : State::Failed; |
142 | } |
143 | if (state_ == State::Started) { |
144 | semaphore_.Post(); |
145 | return; |
146 | } |
147 | } |
148 | CompressStackStore(); |
149 | } |
150 | |
151 | void CompressThread::Run() { |
152 | VPrintf(1, "%s: StackDepot compression thread started\n" , SanitizerToolName); |
153 | while (WaitForWork()) CompressStackStore(); |
154 | VPrintf(1, "%s: StackDepot compression thread stopped\n" , SanitizerToolName); |
155 | } |
156 | |
157 | void CompressThread::Stop() { |
158 | void *t = nullptr; |
159 | { |
160 | SpinMutexLock l(&mutex_); |
161 | if (state_ != State::Started) |
162 | return; |
163 | state_ = State::Stopped; |
164 | CHECK_NE(nullptr, thread_); |
165 | t = thread_; |
166 | thread_ = nullptr; |
167 | } |
168 | atomic_store(a: &run_, v: 0, mo: memory_order_release); |
169 | semaphore_.Post(); |
170 | internal_join_thread(th: t); |
171 | } |
172 | |
173 | void CompressThread::LockAndStop() { |
174 | mutex_.Lock(); |
175 | if (state_ != State::Started) |
176 | return; |
177 | CHECK_NE(nullptr, thread_); |
178 | |
179 | atomic_store(a: &run_, v: 0, mo: memory_order_release); |
180 | semaphore_.Post(); |
181 | internal_join_thread(th: thread_); |
182 | // Allow to restart after Unlock() if needed. |
183 | state_ = State::NotStarted; |
184 | thread_ = nullptr; |
185 | } |
186 | |
187 | void CompressThread::Unlock() { mutex_.Unlock(); } |
188 | |
189 | } // namespace |
190 | |
191 | void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) { |
192 | stack_hash = hash; |
193 | uptr pack = 0; |
194 | store_id = stackStore.Store(trace: args, pack: &pack); |
195 | if (LIKELY(!pack)) |
196 | return; |
197 | compress_thread.NewWorkNotify(); |
198 | } |
199 | |
200 | StackDepotNode::args_type StackDepotNode::load(u32 id) const { |
201 | if (!store_id) |
202 | return {}; |
203 | return stackStore.Load(id: store_id); |
204 | } |
205 | |
206 | StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); } |
207 | |
208 | u32 StackDepotPut(StackTrace stack) { return theDepot.Put(args: stack); } |
209 | |
210 | StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) { |
211 | return StackDepotNode::get_handle(id: theDepot.Put(args: stack)); |
212 | } |
213 | |
214 | StackTrace StackDepotGet(u32 id) { |
215 | return theDepot.Get(id); |
216 | } |
217 | |
218 | void StackDepotLockBeforeFork() { |
219 | theDepot.LockBeforeFork(); |
220 | compress_thread.LockAndStop(); |
221 | stackStore.LockAll(); |
222 | } |
223 | |
224 | void StackDepotUnlockAfterFork(bool fork_child) { |
225 | stackStore.UnlockAll(); |
226 | compress_thread.Unlock(); |
227 | theDepot.UnlockAfterFork(fork_child); |
228 | } |
229 | |
230 | void StackDepotPrintAll() { |
231 | #if !SANITIZER_GO |
232 | theDepot.PrintAll(); |
233 | #endif |
234 | } |
235 | |
236 | void StackDepotStopBackgroundThread() { compress_thread.Stop(); } |
237 | |
238 | StackDepotHandle StackDepotNode::get_handle(u32 id) { |
239 | return StackDepotHandle(&theDepot.nodes[id], id); |
240 | } |
241 | |
242 | void StackDepotTestOnlyUnmap() { |
243 | theDepot.TestOnlyUnmap(); |
244 | stackStore.TestOnlyUnmap(); |
245 | } |
246 | |
247 | } // namespace __sanitizer |
248 | |