1 | //===-- tsd.h ---------------------------------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #ifndef SCUDO_TSD_H_ |
10 | #define SCUDO_TSD_H_ |
11 | |
12 | #include "atomic_helpers.h" |
13 | #include "common.h" |
14 | #include "mutex.h" |
15 | #include "thread_annotations.h" |
16 | |
17 | #include <limits.h> // for PTHREAD_DESTRUCTOR_ITERATIONS |
18 | #include <pthread.h> |
19 | |
20 | // With some build setups, this might still not be defined. |
21 | #ifndef PTHREAD_DESTRUCTOR_ITERATIONS |
22 | #define PTHREAD_DESTRUCTOR_ITERATIONS 4 |
23 | #endif |
24 | |
25 | namespace scudo { |
26 | |
27 | template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD { |
28 | using ThisT = TSD<Allocator>; |
29 | u8 DestructorIterations = 0; |
30 | |
31 | void init(Allocator *Instance) NO_THREAD_SAFETY_ANALYSIS { |
32 | DCHECK_EQ(DestructorIterations, 0U); |
33 | DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT))); |
34 | Instance->initCache(&Cache); |
35 | DestructorIterations = PTHREAD_DESTRUCTOR_ITERATIONS; |
36 | } |
37 | |
38 | inline bool tryLock() NO_THREAD_SAFETY_ANALYSIS { |
39 | if (Mutex.tryLock()) { |
40 | atomic_store_relaxed(A: &Precedence, V: 0); |
41 | return true; |
42 | } |
43 | if (atomic_load_relaxed(A: &Precedence) == 0) |
44 | atomic_store_relaxed( |
45 | A: &Precedence, |
46 | V: static_cast<uptr>(getMonotonicTime() >> FIRST_32_SECOND_64(16, 0))); |
47 | return false; |
48 | } |
49 | inline void lock() NO_THREAD_SAFETY_ANALYSIS { |
50 | atomic_store_relaxed(A: &Precedence, V: 0); |
51 | Mutex.lock(); |
52 | } |
53 | inline void unlock() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); } |
54 | inline uptr getPrecedence() { return atomic_load_relaxed(A: &Precedence); } |
55 | |
56 | void commitBack(Allocator *Instance) { Instance->commitBack(this); } |
57 | |
58 | // As the comments attached to `getCache()`, the TSD doesn't always need to be |
59 | // locked. In that case, we would only skip the check before we have all TSDs |
60 | // locked in all paths. |
61 | void assertLocked(bool BypassCheck) ASSERT_CAPABILITY(Mutex) { |
62 | if (SCUDO_DEBUG && !BypassCheck) |
63 | Mutex.assertHeld(); |
64 | } |
65 | |
66 | // Ideally, we may want to assert that all the operations on |
67 | // Cache/QuarantineCache always have the `Mutex` acquired. However, the |
68 | // current architecture of accessing TSD is not easy to cooperate with the |
69 | // thread-safety analysis because of pointer aliasing. So now we just add the |
70 | // assertion on the getters of Cache/QuarantineCache. |
71 | // |
72 | // TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring |
73 | // TSD doesn't always require holding the lock. Add this assertion while the |
74 | // lock is always acquired. |
75 | typename Allocator::CacheT &getCache() REQUIRES(Mutex) { return Cache; } |
76 | typename Allocator::QuarantineCacheT &getQuarantineCache() REQUIRES(Mutex) { |
77 | return QuarantineCache; |
78 | } |
79 | |
80 | private: |
81 | HybridMutex Mutex; |
82 | atomic_uptr Precedence = {}; |
83 | |
84 | typename Allocator::CacheT Cache GUARDED_BY(Mutex); |
85 | typename Allocator::QuarantineCacheT QuarantineCache GUARDED_BY(Mutex); |
86 | }; |
87 | |
88 | } // namespace scudo |
89 | |
90 | #endif // SCUDO_TSD_H_ |
91 | |