1 | //===-- mutex.h -------------------------------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #ifndef SCUDO_MUTEX_H_ |
10 | #define SCUDO_MUTEX_H_ |
11 | |
12 | #include "atomic_helpers.h" |
13 | #include "common.h" |
14 | #include "thread_annotations.h" |
15 | |
16 | #include <string.h> |
17 | |
18 | #if SCUDO_FUCHSIA |
19 | #include <lib/sync/mutex.h> // for sync_mutex_t |
20 | #endif |
21 | |
22 | namespace scudo { |
23 | |
24 | class CAPABILITY("mutex" ) HybridMutex { |
25 | public: |
26 | bool tryLock() TRY_ACQUIRE(true); |
27 | NOINLINE void lock() ACQUIRE() { |
28 | if (LIKELY(tryLock())) |
29 | return; |
30 | // The compiler may try to fully unroll the loop, ending up in a |
31 | // NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This |
32 | // is large, ugly and unneeded, a compact loop is better for our purpose |
33 | // here. Use a pragma to tell the compiler not to unroll the loop. |
34 | #ifdef __clang__ |
35 | #pragma nounroll |
36 | #endif |
37 | for (u8 I = 0U; I < NumberOfTries; I++) { |
38 | delayLoop(); |
39 | if (tryLock()) |
40 | return; |
41 | } |
42 | lockSlow(); |
43 | } |
44 | void unlock() RELEASE(); |
45 | |
46 | // TODO(chiahungduan): In general, we may want to assert the owner of lock as |
47 | // well. Given the current uses of HybridMutex, it's acceptable without |
48 | // asserting the owner. Re-evaluate this when we have certain scenarios which |
49 | // requires a more fine-grained lock granularity. |
50 | ALWAYS_INLINE void assertHeld() ASSERT_CAPABILITY(this) { |
51 | if (SCUDO_DEBUG) |
52 | assertHeldImpl(); |
53 | } |
54 | |
55 | private: |
56 | void delayLoop() { |
57 | // The value comes from the average time spent in accessing caches (which |
58 | // are the fastest operations) so that we are unlikely to wait too long for |
59 | // fast operations. |
60 | constexpr u32 SpinTimes = 16; |
61 | volatile u32 V = 0; |
62 | for (u32 I = 0; I < SpinTimes; ++I) { |
63 | u32 Tmp = V + 1; |
64 | V = Tmp; |
65 | } |
66 | } |
67 | |
68 | void assertHeldImpl(); |
69 | |
70 | // TODO(chiahungduan): Adapt this value based on scenarios. E.g., primary and |
71 | // secondary allocator have different allocation times. |
72 | static constexpr u8 NumberOfTries = 32U; |
73 | |
74 | #if SCUDO_LINUX |
75 | atomic_u32 M = {}; |
76 | #elif SCUDO_FUCHSIA |
77 | sync_mutex_t M = {}; |
78 | #endif |
79 | |
80 | void lockSlow() ACQUIRE(); |
81 | }; |
82 | |
83 | class SCOPED_CAPABILITY ScopedLock { |
84 | public: |
85 | explicit ScopedLock(HybridMutex &M) ACQUIRE(M) : Mutex(M) { Mutex.lock(); } |
86 | ~ScopedLock() RELEASE() { Mutex.unlock(); } |
87 | |
88 | private: |
89 | HybridMutex &Mutex; |
90 | |
91 | ScopedLock(const ScopedLock &) = delete; |
92 | void operator=(const ScopedLock &) = delete; |
93 | }; |
94 | |
95 | } // namespace scudo |
96 | |
97 | #endif // SCUDO_MUTEX_H_ |
98 | |