1 | //===-- sanitizer_mutex_test.cpp ------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of ThreadSanitizer/AddressSanitizer runtime. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | #include "sanitizer_common/sanitizer_mutex.h" |
13 | #include "sanitizer_common/sanitizer_common.h" |
14 | |
15 | #include "sanitizer_pthread_wrappers.h" |
16 | |
17 | #include "gtest/gtest.h" |
18 | |
19 | #include <string.h> |
20 | |
21 | namespace __sanitizer { |
22 | |
23 | template<typename MutexType> |
24 | class TestData { |
25 | public: |
26 | explicit TestData(MutexType *mtx) |
27 | : mtx_(mtx) { |
28 | for (int i = 0; i < kSize; i++) |
29 | data_[i] = 0; |
30 | } |
31 | |
32 | void Write() { |
33 | Lock l(mtx_); |
34 | T v0 = data_[0]; |
35 | for (int i = 0; i < kSize; i++) { |
36 | mtx_->CheckLocked(); |
37 | CHECK_EQ(data_[i], v0); |
38 | data_[i]++; |
39 | } |
40 | } |
41 | |
42 | void TryWrite() { |
43 | if (!mtx_->TryLock()) |
44 | return; |
45 | T v0 = data_[0]; |
46 | for (int i = 0; i < kSize; i++) { |
47 | mtx_->CheckLocked(); |
48 | CHECK_EQ(data_[i], v0); |
49 | data_[i]++; |
50 | } |
51 | mtx_->Unlock(); |
52 | } |
53 | |
54 | void Read() { |
55 | ReadLock l(mtx_); |
56 | T v0 = data_[0]; |
57 | for (int i = 0; i < kSize; i++) { |
58 | mtx_->CheckReadLocked(); |
59 | CHECK_EQ(data_[i], v0); |
60 | } |
61 | } |
62 | |
63 | void Backoff() { |
64 | volatile T data[kSize] = {}; |
65 | for (int i = 0; i < kSize; i++) { |
66 | data[i]++; |
67 | CHECK_EQ(data[i], 1); |
68 | } |
69 | } |
70 | |
71 | private: |
72 | typedef GenericScopedLock<MutexType> Lock; |
73 | typedef GenericScopedReadLock<MutexType> ReadLock; |
74 | static const int kSize = 64; |
75 | typedef u64 T; |
76 | MutexType *mtx_; |
77 | char pad_[kCacheLineSize]; |
78 | T data_[kSize]; |
79 | }; |
80 | |
81 | const int kThreads = 8; |
82 | #if SANITIZER_DEBUG |
83 | const int kIters = 16*1024; |
84 | #else |
85 | const int kIters = 64*1024; |
86 | #endif |
87 | |
88 | template<typename MutexType> |
89 | static void *lock_thread(void *param) { |
90 | TestData<MutexType> *data = (TestData<MutexType>*)param; |
91 | for (int i = 0; i < kIters; i++) { |
92 | data->Write(); |
93 | data->Backoff(); |
94 | } |
95 | return 0; |
96 | } |
97 | |
98 | template<typename MutexType> |
99 | static void *try_thread(void *param) { |
100 | TestData<MutexType> *data = (TestData<MutexType>*)param; |
101 | for (int i = 0; i < kIters; i++) { |
102 | data->TryWrite(); |
103 | data->Backoff(); |
104 | } |
105 | return 0; |
106 | } |
107 | |
108 | template <typename MutexType> |
109 | static void *read_write_thread(void *param) { |
110 | TestData<MutexType> *data = (TestData<MutexType> *)param; |
111 | for (int i = 0; i < kIters; i++) { |
112 | if ((i % 10) == 0) |
113 | data->Write(); |
114 | else |
115 | data->Read(); |
116 | data->Backoff(); |
117 | } |
118 | return 0; |
119 | } |
120 | |
121 | template<typename MutexType> |
122 | static void check_locked(MutexType *mtx) { |
123 | GenericScopedLock<MutexType> l(mtx); |
124 | mtx->CheckLocked(); |
125 | } |
126 | |
127 | TEST(SanitizerCommon, SpinMutex) { |
128 | SpinMutex mtx; |
129 | mtx.Init(); |
130 | TestData<SpinMutex> data(&mtx); |
131 | pthread_t threads[kThreads]; |
132 | for (int i = 0; i < kThreads; i++) |
133 | PTHREAD_CREATE(&threads[i], 0, lock_thread<SpinMutex>, &data); |
134 | for (int i = 0; i < kThreads; i++) |
135 | PTHREAD_JOIN(threads[i], 0); |
136 | } |
137 | |
138 | TEST(SanitizerCommon, SpinMutexTry) { |
139 | SpinMutex mtx; |
140 | mtx.Init(); |
141 | TestData<SpinMutex> data(&mtx); |
142 | pthread_t threads[kThreads]; |
143 | for (int i = 0; i < kThreads; i++) |
144 | PTHREAD_CREATE(&threads[i], 0, try_thread<SpinMutex>, &data); |
145 | for (int i = 0; i < kThreads; i++) |
146 | PTHREAD_JOIN(threads[i], 0); |
147 | } |
148 | |
149 | TEST(SanitizerCommon, Mutex) { |
150 | Mutex mtx; |
151 | TestData<Mutex> data(&mtx); |
152 | pthread_t threads[kThreads]; |
153 | for (int i = 0; i < kThreads; i++) |
154 | PTHREAD_CREATE(&threads[i], 0, read_write_thread<Mutex>, &data); |
155 | for (int i = 0; i < kThreads; i++) PTHREAD_JOIN(threads[i], 0); |
156 | } |
157 | |
158 | TEST(SanitizerCommon, MutexTry) { |
159 | Mutex mtx; |
160 | TestData<Mutex> data(&mtx); |
161 | pthread_t threads[kThreads]; |
162 | for (int i = 0; i < kThreads; i++) |
163 | PTHREAD_CREATE(&threads[i], 0, try_thread<Mutex>, &data); |
164 | for (int i = 0; i < kThreads; i++) PTHREAD_JOIN(threads[i], 0); |
165 | } |
166 | |
167 | struct SemaphoreData { |
168 | Semaphore *sem; |
169 | bool done; |
170 | }; |
171 | |
172 | void *SemaphoreThread(void *arg) { |
173 | auto data = static_cast<SemaphoreData *>(arg); |
174 | data->sem->Wait(); |
175 | data->done = true; |
176 | return nullptr; |
177 | } |
178 | |
179 | TEST(SanitizerCommon, Semaphore) { |
180 | Semaphore sem; |
181 | sem.Post(count: 1); |
182 | sem.Wait(); |
183 | sem.Post(count: 3); |
184 | sem.Wait(); |
185 | sem.Wait(); |
186 | sem.Wait(); |
187 | |
188 | SemaphoreData data = {.sem: &sem, .done: false}; |
189 | pthread_t thread; |
190 | PTHREAD_CREATE(&thread, nullptr, SemaphoreThread, &data); |
191 | internal_sleep(seconds: 1); |
192 | CHECK(!data.done); |
193 | sem.Post(count: 1); |
194 | PTHREAD_JOIN(thread, nullptr); |
195 | } |
196 | |
197 | } // namespace __sanitizer |
198 | |