1//===-- sanitizer_mutex.cpp -----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is shared between AddressSanitizer and ThreadSanitizer
10// run-time libraries.
11//===----------------------------------------------------------------------===//
12
13#include "sanitizer_mutex.h"
14
15#include "sanitizer_common.h"
16
17namespace __sanitizer {
18
19void StaticSpinMutex::LockSlow() {
20 for (int i = 0;; i++) {
21 if (i < 100)
22 proc_yield(cnt: 1);
23 else
24 internal_sched_yield();
25 if (atomic_load(a: &state_, mo: memory_order_relaxed) == 0 &&
26 atomic_exchange(a: &state_, v: 1, mo: memory_order_acquire) == 0)
27 return;
28 }
29}
30
31void Semaphore::Wait() {
32 u32 count = atomic_load(a: &state_, mo: memory_order_relaxed);
33 for (;;) {
34 if (count == 0) {
35 FutexWait(p: &state_, cmp: 0);
36 count = atomic_load(a: &state_, mo: memory_order_relaxed);
37 continue;
38 }
39 if (atomic_compare_exchange_weak(a: &state_, cmp: &count, xchg: count - 1,
40 mo: memory_order_acquire))
41 break;
42 }
43}
44
45void Semaphore::Post(u32 count) {
46 CHECK_NE(count, 0);
47 atomic_fetch_add(a: &state_, v: count, mo: memory_order_release);
48 FutexWake(p: &state_, count);
49}
50
51#if SANITIZER_CHECK_DEADLOCKS
52// An empty mutex meta table, it effectively disables deadlock detection.
53// Each tool can override the table to define own mutex hierarchy and
54// enable deadlock detection.
55// The table defines a static mutex type hierarchy (what mutex types can be locked
56// under what mutex types). This table is checked to be acyclic and then
57// actual mutex lock/unlock operations are checked to adhere to this hierarchy.
58// The checking happens on mutex types rather than on individual mutex instances
59// because doing it on mutex instances will both significantly complicate
60// the implementation, worsen performance and memory overhead and is mostly
61// unnecessary (we almost never lock multiple mutexes of the same type recursively).
62static constexpr int kMutexTypeMax = 20;
63SANITIZER_WEAK_ATTRIBUTE MutexMeta mutex_meta[kMutexTypeMax] = {};
64SANITIZER_WEAK_ATTRIBUTE void PrintMutexPC(uptr pc) {}
65static StaticSpinMutex mutex_meta_mtx;
66static int mutex_type_count = -1;
67// Adjacency matrix of what mutexes can be locked under what mutexes.
68static bool mutex_can_lock[kMutexTypeMax][kMutexTypeMax];
69// Mutex types with MutexMulti mark.
70static bool mutex_multi[kMutexTypeMax];
71
72void DebugMutexInit() {
73 // Build adjacency matrix.
74 bool leaf[kMutexTypeMax];
75 internal_memset(&leaf, 0, sizeof(leaf));
76 int cnt[kMutexTypeMax];
77 internal_memset(&cnt, 0, sizeof(cnt));
78 for (int t = 0; t < kMutexTypeMax; t++) {
79 mutex_type_count = t;
80 if (!mutex_meta[t].name)
81 break;
82 CHECK_EQ(t, mutex_meta[t].type);
83 for (uptr j = 0; j < ARRAY_SIZE(mutex_meta[t].can_lock); j++) {
84 MutexType z = mutex_meta[t].can_lock[j];
85 if (z == MutexInvalid)
86 break;
87 if (z == MutexLeaf) {
88 CHECK(!leaf[t]);
89 leaf[t] = true;
90 continue;
91 }
92 if (z == MutexMulti) {
93 mutex_multi[t] = true;
94 continue;
95 }
96 CHECK_LT(z, kMutexTypeMax);
97 CHECK(!mutex_can_lock[t][z]);
98 mutex_can_lock[t][z] = true;
99 cnt[t]++;
100 }
101 }
102 // Indicates the array is not properly terminated.
103 CHECK_LT(mutex_type_count, kMutexTypeMax);
104 // Add leaf mutexes.
105 for (int t = 0; t < mutex_type_count; t++) {
106 if (!leaf[t])
107 continue;
108 CHECK_EQ(cnt[t], 0);
109 for (int z = 0; z < mutex_type_count; z++) {
110 if (z == MutexInvalid || t == z || leaf[z])
111 continue;
112 CHECK(!mutex_can_lock[z][t]);
113 mutex_can_lock[z][t] = true;
114 }
115 }
116 // Build the transitive closure and check that the graphs is acyclic.
117 u32 trans[kMutexTypeMax];
118 static_assert(sizeof(trans[0]) * 8 >= kMutexTypeMax,
119 "kMutexTypeMax does not fit into u32, switch to u64");
120 internal_memset(&trans, 0, sizeof(trans));
121 for (int i = 0; i < mutex_type_count; i++) {
122 for (int j = 0; j < mutex_type_count; j++)
123 if (mutex_can_lock[i][j])
124 trans[i] |= 1 << j;
125 }
126 for (int k = 0; k < mutex_type_count; k++) {
127 for (int i = 0; i < mutex_type_count; i++) {
128 if (trans[i] & (1 << k))
129 trans[i] |= trans[k];
130 }
131 }
132 for (int i = 0; i < mutex_type_count; i++) {
133 if (trans[i] & (1 << i)) {
134 Printf("Mutex %s participates in a cycle\n", mutex_meta[i].name);
135 Die();
136 }
137 }
138}
139
140struct InternalDeadlockDetector {
141 struct LockDesc {
142 u64 seq;
143 uptr pc;
144 int recursion;
145 };
146 int initialized;
147 u64 sequence;
148 LockDesc locked[kMutexTypeMax];
149
150 void Lock(MutexType type, uptr pc) {
151 if (!Initialize(type))
152 return;
153 CHECK_LT(type, mutex_type_count);
154 // Find the last locked mutex type.
155 // This is the type we will use for hierarchy checks.
156 u64 max_seq = 0;
157 MutexType max_idx = MutexInvalid;
158 for (int i = 0; i != mutex_type_count; i++) {
159 if (locked[i].seq == 0)
160 continue;
161 CHECK_NE(locked[i].seq, max_seq);
162 if (max_seq < locked[i].seq) {
163 max_seq = locked[i].seq;
164 max_idx = (MutexType)i;
165 }
166 }
167 if (max_idx == type && mutex_multi[type]) {
168 // Recursive lock of the same type.
169 CHECK_EQ(locked[type].seq, max_seq);
170 CHECK(locked[type].pc);
171 locked[type].recursion++;
172 return;
173 }
174 if (max_idx != MutexInvalid && !mutex_can_lock[max_idx][type]) {
175 Printf("%s: internal deadlock: can't lock %s under %s mutex\n", SanitizerToolName,
176 mutex_meta[type].name, mutex_meta[max_idx].name);
177 PrintMutexPC(locked[max_idx].pc);
178 CHECK(0);
179 }
180 locked[type].seq = ++sequence;
181 locked[type].pc = pc;
182 locked[type].recursion = 1;
183 }
184
185 void Unlock(MutexType type) {
186 if (!Initialize(type))
187 return;
188 CHECK_LT(type, mutex_type_count);
189 CHECK(locked[type].seq);
190 CHECK_GT(locked[type].recursion, 0);
191 if (--locked[type].recursion)
192 return;
193 locked[type].seq = 0;
194 locked[type].pc = 0;
195 }
196
197 void CheckNoLocks() {
198 for (int i = 0; i < mutex_type_count; i++) CHECK_EQ(locked[i].recursion, 0);
199 }
200
201 bool Initialize(MutexType type) {
202 if (type == MutexUnchecked || type == MutexInvalid)
203 return false;
204 CHECK_GT(type, MutexInvalid);
205 if (initialized != 0)
206 return initialized > 0;
207 initialized = -1;
208 SpinMutexLock lock(&mutex_meta_mtx);
209 if (mutex_type_count < 0)
210 DebugMutexInit();
211 initialized = mutex_type_count ? 1 : -1;
212 return initialized > 0;
213 }
214};
215// This variable is used by the __tls_get_addr interceptor, so cannot use the
216// global-dynamic TLS model, as that would result in crashes.
217__attribute__((tls_model("initial-exec"))) static THREADLOCAL
218 InternalDeadlockDetector deadlock_detector;
219
220void CheckedMutex::LockImpl(uptr pc) { deadlock_detector.Lock(type_, pc); }
221
222void CheckedMutex::UnlockImpl() { deadlock_detector.Unlock(type_); }
223
224void CheckedMutex::CheckNoLocksImpl() { deadlock_detector.CheckNoLocks(); }
225#endif
226
227} // namespace __sanitizer
228

source code of compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp