1 | //===-- sanitizer_stack_store.cpp -------------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "sanitizer_stack_store.h" |
10 | |
11 | #include "sanitizer_atomic.h" |
12 | #include "sanitizer_common.h" |
13 | #include "sanitizer_internal_defs.h" |
14 | #include "sanitizer_leb128.h" |
15 | #include "sanitizer_lzw.h" |
16 | #include "sanitizer_placement_new.h" |
17 | #include "sanitizer_stacktrace.h" |
18 | |
19 | namespace __sanitizer { |
20 | |
21 | namespace { |
22 | struct { |
23 | static constexpr u32 = 8; |
24 | |
25 | u8 ; |
26 | u8 ; |
27 | explicit (const StackTrace &trace) |
28 | : size(Min<uptr>(a: trace.size, b: (1u << 8) - 1)), tag(trace.tag) { |
29 | CHECK_EQ(trace.tag, static_cast<uptr>(tag)); |
30 | } |
31 | explicit (uptr h) |
32 | : size(h & ((1 << kStackSizeBits) - 1)), tag(h >> kStackSizeBits) {} |
33 | |
34 | uptr () const { |
35 | return static_cast<uptr>(size) | (static_cast<uptr>(tag) << kStackSizeBits); |
36 | } |
37 | }; |
38 | } // namespace |
39 | |
40 | StackStore::Id StackStore::Store(const StackTrace &trace, uptr *pack) { |
41 | if (!trace.size && !trace.tag) |
42 | return 0; |
43 | StackTraceHeader h(trace); |
44 | uptr idx = 0; |
45 | *pack = 0; |
46 | uptr *stack_trace = Alloc(count: h.size + 1, idx: &idx, pack); |
47 | // No more space. |
48 | if (stack_trace == nullptr) |
49 | return 0; |
50 | *stack_trace = h.ToUptr(); |
51 | internal_memcpy(dest: stack_trace + 1, src: trace.trace, n: h.size * sizeof(uptr)); |
52 | *pack += blocks_[GetBlockIdx(frame_idx: idx)].Stored(n: h.size + 1); |
53 | return OffsetToId(id: idx); |
54 | } |
55 | |
56 | StackTrace StackStore::Load(Id id) { |
57 | if (!id) |
58 | return {}; |
59 | uptr idx = IdToOffset(id); |
60 | uptr block_idx = GetBlockIdx(frame_idx: idx); |
61 | CHECK_LT(block_idx, ARRAY_SIZE(blocks_)); |
62 | const uptr *stack_trace = blocks_[block_idx].GetOrUnpack(store: this); |
63 | if (!stack_trace) |
64 | return {}; |
65 | stack_trace += GetInBlockIdx(frame_idx: idx); |
66 | StackTraceHeader h(*stack_trace); |
67 | return StackTrace(stack_trace + 1, h.size, h.tag); |
68 | } |
69 | |
70 | uptr StackStore::Allocated() const { |
71 | return atomic_load_relaxed(a: &allocated_) + sizeof(*this); |
72 | } |
73 | |
74 | uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) { |
75 | for (;;) { |
76 | // Optimisic lock-free allocation, essentially try to bump the |
77 | // total_frames_. |
78 | uptr start = atomic_fetch_add(a: &total_frames_, v: count, mo: memory_order_relaxed); |
79 | uptr block_idx = GetBlockIdx(frame_idx: start); |
80 | uptr last_idx = GetBlockIdx(frame_idx: start + count - 1); |
81 | if (LIKELY(block_idx == last_idx)) { |
82 | // Fits into a single block. |
83 | // No more available blocks. Indicate inability to allocate more memory. |
84 | if (block_idx >= ARRAY_SIZE(blocks_)) |
85 | return nullptr; |
86 | *idx = start; |
87 | return blocks_[block_idx].GetOrCreate(store: this) + GetInBlockIdx(frame_idx: start); |
88 | } |
89 | |
90 | // Retry. We can't use range allocated in two different blocks. |
91 | CHECK_LE(count, kBlockSizeFrames); |
92 | uptr in_first = kBlockSizeFrames - GetInBlockIdx(frame_idx: start); |
93 | // Mark tail/head of these blocks as "stored".to avoid waiting before we can |
94 | // Pack(). |
95 | *pack += blocks_[block_idx].Stored(n: in_first); |
96 | *pack += blocks_[last_idx].Stored(n: count - in_first); |
97 | } |
98 | } |
99 | |
100 | void *StackStore::Map(uptr size, const char *mem_type) { |
101 | atomic_fetch_add(a: &allocated_, v: size, mo: memory_order_relaxed); |
102 | return MmapNoReserveOrDie(size, mem_type); |
103 | } |
104 | |
105 | void StackStore::Unmap(void *addr, uptr size) { |
106 | atomic_fetch_sub(a: &allocated_, v: size, mo: memory_order_relaxed); |
107 | UnmapOrDie(addr, size); |
108 | } |
109 | |
110 | uptr StackStore::Pack(Compression type) { |
111 | uptr res = 0; |
112 | for (BlockInfo &b : blocks_) res += b.Pack(type, store: this); |
113 | return res; |
114 | } |
115 | |
116 | void StackStore::LockAll() { |
117 | for (BlockInfo &b : blocks_) b.Lock(); |
118 | } |
119 | |
120 | void StackStore::UnlockAll() { |
121 | for (BlockInfo &b : blocks_) b.Unlock(); |
122 | } |
123 | |
124 | void StackStore::TestOnlyUnmap() { |
125 | for (BlockInfo &b : blocks_) b.TestOnlyUnmap(store: this); |
126 | internal_memset(s: this, c: 0, n: sizeof(*this)); |
127 | } |
128 | |
129 | uptr *StackStore::BlockInfo::Get() const { |
130 | // Idiomatic double-checked locking uses memory_order_acquire here. But |
131 | // relaxed is fine for us, justification is similar to |
132 | // TwoLevelMap::GetOrCreate. |
133 | return reinterpret_cast<uptr *>(atomic_load_relaxed(a: &data_)); |
134 | } |
135 | |
136 | uptr *StackStore::BlockInfo::Create(StackStore *store) { |
137 | SpinMutexLock l(&mtx_); |
138 | uptr *ptr = Get(); |
139 | if (!ptr) { |
140 | ptr = reinterpret_cast<uptr *>(store->Map(size: kBlockSizeBytes, mem_type: "StackStore" )); |
141 | atomic_store(a: &data_, v: reinterpret_cast<uptr>(ptr), mo: memory_order_release); |
142 | } |
143 | return ptr; |
144 | } |
145 | |
146 | uptr *StackStore::BlockInfo::GetOrCreate(StackStore *store) { |
147 | uptr *ptr = Get(); |
148 | if (LIKELY(ptr)) |
149 | return ptr; |
150 | return Create(store); |
151 | } |
152 | |
153 | class SLeb128Encoder { |
154 | public: |
155 | SLeb128Encoder(u8 *begin, u8 *end) : begin(begin), end(end) {} |
156 | |
157 | bool operator==(const SLeb128Encoder &other) const { |
158 | return begin == other.begin; |
159 | } |
160 | |
161 | bool operator!=(const SLeb128Encoder &other) const { |
162 | return begin != other.begin; |
163 | } |
164 | |
165 | SLeb128Encoder &operator=(uptr v) { |
166 | sptr diff = v - previous; |
167 | begin = EncodeSLEB128(value: diff, begin, end); |
168 | previous = v; |
169 | return *this; |
170 | } |
171 | SLeb128Encoder &operator*() { return *this; } |
172 | SLeb128Encoder &operator++() { return *this; } |
173 | |
174 | u8 *base() const { return begin; } |
175 | |
176 | private: |
177 | u8 *begin; |
178 | u8 *end; |
179 | uptr previous = 0; |
180 | }; |
181 | |
182 | class SLeb128Decoder { |
183 | public: |
184 | SLeb128Decoder(const u8 *begin, const u8 *end) : begin(begin), end(end) {} |
185 | |
186 | bool operator==(const SLeb128Decoder &other) const { |
187 | return begin == other.begin; |
188 | } |
189 | |
190 | bool operator!=(const SLeb128Decoder &other) const { |
191 | return begin != other.begin; |
192 | } |
193 | |
194 | uptr operator*() { |
195 | sptr diff; |
196 | begin = DecodeSLEB128(begin, end, v: &diff); |
197 | previous += diff; |
198 | return previous; |
199 | } |
200 | SLeb128Decoder &operator++() { return *this; } |
201 | |
202 | SLeb128Decoder operator++(int) { return *this; } |
203 | |
204 | private: |
205 | const u8 *begin; |
206 | const u8 *end; |
207 | uptr previous = 0; |
208 | }; |
209 | |
210 | static u8 *CompressDelta(const uptr *from, const uptr *from_end, u8 *to, |
211 | u8 *to_end) { |
212 | SLeb128Encoder encoder(to, to_end); |
213 | for (; from != from_end; ++from, ++encoder) *encoder = *from; |
214 | return encoder.base(); |
215 | } |
216 | |
217 | static uptr *UncompressDelta(const u8 *from, const u8 *from_end, uptr *to, |
218 | uptr *to_end) { |
219 | SLeb128Decoder decoder(from, from_end); |
220 | SLeb128Decoder end(from_end, from_end); |
221 | for (; decoder != end; ++to, ++decoder) *to = *decoder; |
222 | CHECK_EQ(to, to_end); |
223 | return to; |
224 | } |
225 | |
226 | static u8 *CompressLzw(const uptr *from, const uptr *from_end, u8 *to, |
227 | u8 *to_end) { |
228 | SLeb128Encoder encoder(to, to_end); |
229 | encoder = LzwEncode<uptr>(begin: from, end: from_end, out: encoder); |
230 | return encoder.base(); |
231 | } |
232 | |
233 | static uptr *UncompressLzw(const u8 *from, const u8 *from_end, uptr *to, |
234 | uptr *to_end) { |
235 | SLeb128Decoder decoder(from, from_end); |
236 | SLeb128Decoder end(from_end, from_end); |
237 | to = LzwDecode<uptr>(begin: decoder, end, out: to); |
238 | CHECK_EQ(to, to_end); |
239 | return to; |
240 | } |
241 | |
242 | #if defined(_MSC_VER) && !defined(__clang__) |
243 | # pragma warning(push) |
244 | // Disable 'nonstandard extension used: zero-sized array in struct/union'. |
245 | # pragma warning(disable : 4200) |
246 | #endif |
247 | namespace { |
248 | struct { |
249 | uptr ; |
250 | StackStore::Compression ; |
251 | u8 []; |
252 | }; |
253 | } // namespace |
254 | #if defined(_MSC_VER) && !defined(__clang__) |
255 | # pragma warning(pop) |
256 | #endif |
257 | |
258 | uptr *StackStore::BlockInfo::GetOrUnpack(StackStore *store) { |
259 | SpinMutexLock l(&mtx_); |
260 | switch (state) { |
261 | case State::Storing: |
262 | state = State::Unpacked; |
263 | FALLTHROUGH; |
264 | case State::Unpacked: |
265 | return Get(); |
266 | case State::Packed: |
267 | break; |
268 | } |
269 | |
270 | u8 *ptr = reinterpret_cast<u8 *>(Get()); |
271 | CHECK_NE(nullptr, ptr); |
272 | const PackedHeader * = reinterpret_cast<const PackedHeader *>(ptr); |
273 | CHECK_LE(header->size, kBlockSizeBytes); |
274 | CHECK_GE(header->size, sizeof(PackedHeader)); |
275 | |
276 | uptr packed_size_aligned = RoundUpTo(size: header->size, boundary: GetPageSizeCached()); |
277 | |
278 | uptr *unpacked = |
279 | reinterpret_cast<uptr *>(store->Map(size: kBlockSizeBytes, mem_type: "StackStoreUnpack" )); |
280 | |
281 | uptr *unpacked_end; |
282 | switch (header->type) { |
283 | case Compression::Delta: |
284 | unpacked_end = UncompressDelta(from: header->data, from_end: ptr + header->size, to: unpacked, |
285 | to_end: unpacked + kBlockSizeFrames); |
286 | break; |
287 | case Compression::LZW: |
288 | unpacked_end = UncompressLzw(from: header->data, from_end: ptr + header->size, to: unpacked, |
289 | to_end: unpacked + kBlockSizeFrames); |
290 | break; |
291 | default: |
292 | UNREACHABLE("Unexpected type" ); |
293 | break; |
294 | } |
295 | |
296 | CHECK_EQ(kBlockSizeFrames, unpacked_end - unpacked); |
297 | |
298 | MprotectReadOnly(addr: reinterpret_cast<uptr>(unpacked), size: kBlockSizeBytes); |
299 | atomic_store(a: &data_, v: reinterpret_cast<uptr>(unpacked), mo: memory_order_release); |
300 | store->Unmap(addr: ptr, size: packed_size_aligned); |
301 | |
302 | state = State::Unpacked; |
303 | return Get(); |
304 | } |
305 | |
306 | uptr StackStore::BlockInfo::Pack(Compression type, StackStore *store) { |
307 | if (type == Compression::None) |
308 | return 0; |
309 | |
310 | SpinMutexLock l(&mtx_); |
311 | switch (state) { |
312 | case State::Unpacked: |
313 | case State::Packed: |
314 | return 0; |
315 | case State::Storing: |
316 | break; |
317 | } |
318 | |
319 | uptr *ptr = Get(); |
320 | if (!ptr || !Stored(n: 0)) |
321 | return 0; |
322 | |
323 | u8 *packed = |
324 | reinterpret_cast<u8 *>(store->Map(size: kBlockSizeBytes, mem_type: "StackStorePack" )); |
325 | PackedHeader * = reinterpret_cast<PackedHeader *>(packed); |
326 | u8 *alloc_end = packed + kBlockSizeBytes; |
327 | |
328 | u8 *packed_end = nullptr; |
329 | switch (type) { |
330 | case Compression::Delta: |
331 | packed_end = |
332 | CompressDelta(from: ptr, from_end: ptr + kBlockSizeFrames, to: header->data, to_end: alloc_end); |
333 | break; |
334 | case Compression::LZW: |
335 | packed_end = |
336 | CompressLzw(from: ptr, from_end: ptr + kBlockSizeFrames, to: header->data, to_end: alloc_end); |
337 | break; |
338 | default: |
339 | UNREACHABLE("Unexpected type" ); |
340 | break; |
341 | } |
342 | |
343 | header->type = type; |
344 | header->size = packed_end - packed; |
345 | |
346 | VPrintf(1, "Packed block of %zu KiB to %zu KiB\n" , kBlockSizeBytes >> 10, |
347 | header->size >> 10); |
348 | |
349 | if (kBlockSizeBytes - header->size < kBlockSizeBytes / 8) { |
350 | VPrintf(1, "Undo and keep block unpacked\n" ); |
351 | MprotectReadOnly(addr: reinterpret_cast<uptr>(ptr), size: kBlockSizeBytes); |
352 | store->Unmap(addr: packed, size: kBlockSizeBytes); |
353 | state = State::Unpacked; |
354 | return 0; |
355 | } |
356 | |
357 | uptr packed_size_aligned = RoundUpTo(size: header->size, boundary: GetPageSizeCached()); |
358 | store->Unmap(addr: packed + packed_size_aligned, |
359 | size: kBlockSizeBytes - packed_size_aligned); |
360 | MprotectReadOnly(addr: reinterpret_cast<uptr>(packed), size: packed_size_aligned); |
361 | |
362 | atomic_store(a: &data_, v: reinterpret_cast<uptr>(packed), mo: memory_order_release); |
363 | store->Unmap(addr: ptr, size: kBlockSizeBytes); |
364 | |
365 | state = State::Packed; |
366 | return kBlockSizeBytes - packed_size_aligned; |
367 | } |
368 | |
369 | void StackStore::BlockInfo::TestOnlyUnmap(StackStore *store) { |
370 | if (uptr *ptr = Get()) |
371 | store->Unmap(addr: ptr, size: kBlockSizeBytes); |
372 | } |
373 | |
374 | bool StackStore::BlockInfo::Stored(uptr n) { |
375 | return n + atomic_fetch_add(a: &stored_, v: n, mo: memory_order_release) == |
376 | kBlockSizeFrames; |
377 | } |
378 | |
379 | bool StackStore::BlockInfo::IsPacked() const { |
380 | SpinMutexLock l(&mtx_); |
381 | return state == State::Packed; |
382 | } |
383 | |
384 | } // namespace __sanitizer |
385 | |