| 1 | //===-- tsan_trace_test.cpp -----------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | #include "tsan_trace.h" |
| 13 | |
| 14 | #include <pthread.h> |
| 15 | |
| 16 | #include "gtest/gtest.h" |
| 17 | #include "tsan_rtl.h" |
| 18 | |
| 19 | #if !defined(__x86_64__) |
| 20 | // These tests are currently crashing on ppc64: |
| 21 | // https://reviews.llvm.org/D110546#3025422 |
| 22 | // due to the way we create thread contexts |
| 23 | // There must be some difference in thread initialization |
| 24 | // between normal execution and unit tests. |
| 25 | # define TRACE_TEST(SUITE, NAME) TEST(SUITE, DISABLED_##NAME) |
| 26 | #else |
| 27 | # define TRACE_TEST(SUITE, NAME) TEST(SUITE, NAME) |
| 28 | #endif |
| 29 | |
| 30 | namespace __tsan { |
| 31 | |
| 32 | // We need to run all trace tests in a new thread, |
| 33 | // so that the thread trace is empty initially. |
| 34 | template <uptr N> |
| 35 | struct ThreadArray { |
| 36 | ThreadArray() { |
| 37 | for (auto *&thr : threads) { |
| 38 | thr = static_cast<ThreadState *>( |
| 39 | MmapOrDie(sizeof(ThreadState), "ThreadState" )); |
| 40 | Tid tid = ThreadCreate(cur_thread(), 0, 0, true); |
| 41 | Processor *proc = ProcCreate(); |
| 42 | ProcWire(proc, thr); |
| 43 | ThreadStart(thr, tid, 0, ThreadType::Fiber); |
| 44 | } |
| 45 | } |
| 46 | |
| 47 | ~ThreadArray() { |
| 48 | for (uptr i = 0; i < N; i++) { |
| 49 | if (threads[i]) |
| 50 | Finish(i); |
| 51 | } |
| 52 | } |
| 53 | |
| 54 | void Finish(uptr i) { |
| 55 | auto *thr = threads[i]; |
| 56 | threads[i] = nullptr; |
| 57 | Processor *proc = thr->proc(); |
| 58 | ThreadFinish(thr); |
| 59 | ProcUnwire(proc, thr); |
| 60 | ProcDestroy(proc); |
| 61 | UnmapOrDie(thr, sizeof(ThreadState)); |
| 62 | } |
| 63 | |
| 64 | ThreadState *threads[N]; |
| 65 | ThreadState *operator[](uptr i) { return threads[i]; } |
| 66 | ThreadState *operator->() { return threads[0]; } |
| 67 | operator ThreadState *() { return threads[0]; } |
| 68 | }; |
| 69 | |
| 70 | TRACE_TEST(Trace, RestoreAccess) { |
| 71 | // A basic test with some function entry/exit events, |
| 72 | // some mutex lock/unlock events and some other distracting |
| 73 | // memory events. |
| 74 | ThreadArray<1> thr; |
| 75 | TraceFunc(thr, 0x1000); |
| 76 | TraceFunc(thr, 0x1001); |
| 77 | TraceMutexLock(thr, EventType::kLock, 0x4000, 0x5000, 0x6000); |
| 78 | TraceMutexLock(thr, EventType::kLock, 0x4001, 0x5001, 0x6001); |
| 79 | TraceMutexUnlock(thr, 0x5000); |
| 80 | TraceFunc(thr); |
| 81 | CHECK(TryTraceMemoryAccess(thr, 0x2001, 0x3001, 8, kAccessRead)); |
| 82 | TraceMutexLock(thr, EventType::kRLock, 0x4002, 0x5002, 0x6002); |
| 83 | TraceFunc(thr, 0x1002); |
| 84 | CHECK(TryTraceMemoryAccess(thr, 0x2000, 0x3000, 8, kAccessRead)); |
| 85 | // This is the access we want to find. |
| 86 | // The previous one is equivalent, but RestoreStack must prefer |
| 87 | // the last of the matchig accesses. |
| 88 | CHECK(TryTraceMemoryAccess(thr, 0x2002, 0x3000, 8, kAccessRead)); |
| 89 | Lock slot_lock(&ctx->slots[static_cast<uptr>(thr->fast_state.sid())].mtx); |
| 90 | ThreadRegistryLock lock1(&ctx->thread_registry); |
| 91 | Lock lock2(&ctx->slot_mtx); |
| 92 | Tid tid = kInvalidTid; |
| 93 | VarSizeStackTrace stk; |
| 94 | MutexSet mset; |
| 95 | uptr tag = kExternalTagNone; |
| 96 | bool res = RestoreStack(EventType::kAccessExt, thr->fast_state.sid(), |
| 97 | thr->fast_state.epoch(), 0x3000, 8, kAccessRead, &tid, |
| 98 | &stk, &mset, &tag); |
| 99 | CHECK(res); |
| 100 | CHECK_EQ(tid, thr->tid); |
| 101 | CHECK_EQ(stk.size, 3); |
| 102 | CHECK_EQ(stk.trace[0], 0x1000); |
| 103 | CHECK_EQ(stk.trace[1], 0x1002); |
| 104 | CHECK_EQ(stk.trace[2], 0x2002); |
| 105 | CHECK_EQ(mset.Size(), 2); |
| 106 | CHECK_EQ(mset.Get(0).addr, 0x5001); |
| 107 | CHECK_EQ(mset.Get(0).stack_id, 0x6001); |
| 108 | CHECK_EQ(mset.Get(0).write, true); |
| 109 | CHECK_EQ(mset.Get(1).addr, 0x5002); |
| 110 | CHECK_EQ(mset.Get(1).stack_id, 0x6002); |
| 111 | CHECK_EQ(mset.Get(1).write, false); |
| 112 | CHECK_EQ(tag, kExternalTagNone); |
| 113 | } |
| 114 | |
| 115 | TRACE_TEST(Trace, MemoryAccessSize) { |
| 116 | // Test tracing and matching of accesses of different sizes. |
| 117 | struct Params { |
| 118 | uptr access_size, offset, size; |
| 119 | bool res; |
| 120 | }; |
| 121 | Params tests[] = { |
| 122 | {1, 0, 1, true}, {4, 0, 2, true}, |
| 123 | {4, 2, 2, true}, {8, 3, 1, true}, |
| 124 | {2, 1, 1, true}, {1, 1, 1, false}, |
| 125 | {8, 5, 4, false}, {4, static_cast<uptr>(-1l), 4, false}, |
| 126 | }; |
| 127 | for (auto params : tests) { |
| 128 | for (int type = 0; type < 3; type++) { |
| 129 | ThreadArray<1> thr; |
| 130 | Printf("access_size=%zu, offset=%zu, size=%zu, res=%d, type=%d\n" , |
| 131 | params.access_size, params.offset, params.size, params.res, type); |
| 132 | TraceFunc(thr, 0x1000); |
| 133 | switch (type) { |
| 134 | case 0: |
| 135 | // This should emit compressed event. |
| 136 | CHECK(TryTraceMemoryAccess(thr, 0x2000, 0x3000, params.access_size, |
| 137 | kAccessRead)); |
| 138 | break; |
| 139 | case 1: |
| 140 | // This should emit full event. |
| 141 | CHECK(TryTraceMemoryAccess(thr, 0x2000000, 0x3000, params.access_size, |
| 142 | kAccessRead)); |
| 143 | break; |
| 144 | case 2: |
| 145 | TraceMemoryAccessRange(thr, 0x2000000, 0x3000, params.access_size, |
| 146 | kAccessRead); |
| 147 | break; |
| 148 | } |
| 149 | Lock slot_lock(&ctx->slots[static_cast<uptr>(thr->fast_state.sid())].mtx); |
| 150 | ThreadRegistryLock lock1(&ctx->thread_registry); |
| 151 | Lock lock2(&ctx->slot_mtx); |
| 152 | Tid tid = kInvalidTid; |
| 153 | VarSizeStackTrace stk; |
| 154 | MutexSet mset; |
| 155 | uptr tag = kExternalTagNone; |
| 156 | bool res = |
| 157 | RestoreStack(EventType::kAccessExt, thr->fast_state.sid(), |
| 158 | thr->fast_state.epoch(), 0x3000 + params.offset, |
| 159 | params.size, kAccessRead, &tid, &stk, &mset, &tag); |
| 160 | CHECK_EQ(res, params.res); |
| 161 | if (params.res) { |
| 162 | CHECK_EQ(stk.size, 2); |
| 163 | CHECK_EQ(stk.trace[0], 0x1000); |
| 164 | CHECK_EQ(stk.trace[1], type ? 0x2000000 : 0x2000); |
| 165 | } |
| 166 | } |
| 167 | } |
| 168 | } |
| 169 | |
| 170 | TRACE_TEST(Trace, RestoreMutexLock) { |
| 171 | // Check of restoration of a mutex lock event. |
| 172 | ThreadArray<1> thr; |
| 173 | TraceFunc(thr, 0x1000); |
| 174 | TraceMutexLock(thr, EventType::kLock, 0x4000, 0x5000, 0x6000); |
| 175 | TraceMutexLock(thr, EventType::kRLock, 0x4001, 0x5001, 0x6001); |
| 176 | TraceMutexLock(thr, EventType::kRLock, 0x4002, 0x5001, 0x6002); |
| 177 | Lock slot_lock(&ctx->slots[static_cast<uptr>(thr->fast_state.sid())].mtx); |
| 178 | ThreadRegistryLock lock1(&ctx->thread_registry); |
| 179 | Lock lock2(&ctx->slot_mtx); |
| 180 | Tid tid = kInvalidTid; |
| 181 | VarSizeStackTrace stk; |
| 182 | MutexSet mset; |
| 183 | uptr tag = kExternalTagNone; |
| 184 | bool res = RestoreStack(EventType::kLock, thr->fast_state.sid(), |
| 185 | thr->fast_state.epoch(), 0x5001, 0, 0, &tid, &stk, |
| 186 | &mset, &tag); |
| 187 | CHECK(res); |
| 188 | CHECK_EQ(stk.size, 2); |
| 189 | CHECK_EQ(stk.trace[0], 0x1000); |
| 190 | CHECK_EQ(stk.trace[1], 0x4002); |
| 191 | CHECK_EQ(mset.Size(), 2); |
| 192 | CHECK_EQ(mset.Get(0).addr, 0x5000); |
| 193 | CHECK_EQ(mset.Get(0).stack_id, 0x6000); |
| 194 | CHECK_EQ(mset.Get(0).write, true); |
| 195 | CHECK_EQ(mset.Get(1).addr, 0x5001); |
| 196 | CHECK_EQ(mset.Get(1).stack_id, 0x6001); |
| 197 | CHECK_EQ(mset.Get(1).write, false); |
| 198 | } |
| 199 | |
| 200 | TRACE_TEST(Trace, MultiPart) { |
| 201 | // Check replay of a trace with multiple parts. |
| 202 | ThreadArray<1> thr; |
| 203 | FuncEntry(thr, 0x1000); |
| 204 | FuncEntry(thr, 0x2000); |
| 205 | MutexPreLock(thr, 0x4000, 0x5000, 0); |
| 206 | MutexPostLock(thr, 0x4000, 0x5000, 0); |
| 207 | MutexPreLock(thr, 0x4000, 0x5000, 0); |
| 208 | MutexPostLock(thr, 0x4000, 0x5000, 0); |
| 209 | const uptr kEvents = 3 * sizeof(TracePart) / sizeof(Event); |
| 210 | for (uptr i = 0; i < kEvents; i++) { |
| 211 | FuncEntry(thr, 0x3000); |
| 212 | MutexPreLock(thr, 0x4002, 0x5002, 0); |
| 213 | MutexPostLock(thr, 0x4002, 0x5002, 0); |
| 214 | MutexUnlock(thr, 0x4003, 0x5002, 0); |
| 215 | FuncExit(thr); |
| 216 | } |
| 217 | FuncEntry(thr, 0x4000); |
| 218 | TraceMutexLock(thr, EventType::kRLock, 0x4001, 0x5001, 0x6001); |
| 219 | CHECK(TryTraceMemoryAccess(thr, 0x2002, 0x3000, 8, kAccessRead)); |
| 220 | Lock slot_lock(&ctx->slots[static_cast<uptr>(thr->fast_state.sid())].mtx); |
| 221 | ThreadRegistryLock lock1(&ctx->thread_registry); |
| 222 | Lock lock2(&ctx->slot_mtx); |
| 223 | Tid tid = kInvalidTid; |
| 224 | VarSizeStackTrace stk; |
| 225 | MutexSet mset; |
| 226 | uptr tag = kExternalTagNone; |
| 227 | bool res = RestoreStack(EventType::kAccessExt, thr->fast_state.sid(), |
| 228 | thr->fast_state.epoch(), 0x3000, 8, kAccessRead, &tid, |
| 229 | &stk, &mset, &tag); |
| 230 | CHECK(res); |
| 231 | CHECK_EQ(tid, thr->tid); |
| 232 | CHECK_EQ(stk.size, 4); |
| 233 | CHECK_EQ(stk.trace[0], 0x1000); |
| 234 | CHECK_EQ(stk.trace[1], 0x2000); |
| 235 | CHECK_EQ(stk.trace[2], 0x4000); |
| 236 | CHECK_EQ(stk.trace[3], 0x2002); |
| 237 | CHECK_EQ(mset.Size(), 2); |
| 238 | CHECK_EQ(mset.Get(0).addr, 0x5000); |
| 239 | CHECK_EQ(mset.Get(0).write, true); |
| 240 | CHECK_EQ(mset.Get(0).count, 2); |
| 241 | CHECK_EQ(mset.Get(1).addr, 0x5001); |
| 242 | CHECK_EQ(mset.Get(1).write, false); |
| 243 | CHECK_EQ(mset.Get(1).count, 1); |
| 244 | } |
| 245 | |
| 246 | TRACE_TEST(Trace, DeepSwitch) { |
| 247 | ThreadArray<1> thr; |
| 248 | for (int i = 0; i < 2000; i++) { |
| 249 | FuncEntry(thr, 0x1000); |
| 250 | const uptr kEvents = sizeof(TracePart) / sizeof(Event); |
| 251 | for (uptr i = 0; i < kEvents; i++) { |
| 252 | TraceMutexLock(thr, EventType::kLock, 0x4000, 0x5000, 0x6000); |
| 253 | TraceMutexUnlock(thr, 0x5000); |
| 254 | } |
| 255 | } |
| 256 | } |
| 257 | |
| 258 | void CheckTraceState(uptr count, uptr finished, uptr excess, uptr recycle) { |
| 259 | Lock l(&ctx->slot_mtx); |
| 260 | Printf("CheckTraceState(%zu/%zu, %zu/%zu, %zu/%zu, %zu/%zu)\n" , |
| 261 | ctx->trace_part_total_allocated, count, |
| 262 | ctx->trace_part_recycle_finished, finished, |
| 263 | ctx->trace_part_finished_excess, excess, |
| 264 | ctx->trace_part_recycle.Size(), recycle); |
| 265 | CHECK_EQ(ctx->trace_part_total_allocated, count); |
| 266 | CHECK_EQ(ctx->trace_part_recycle_finished, finished); |
| 267 | CHECK_EQ(ctx->trace_part_finished_excess, excess); |
| 268 | CHECK_EQ(ctx->trace_part_recycle.Size(), recycle); |
| 269 | } |
| 270 | |
| 271 | TRACE_TEST(TraceAlloc, SingleThread) { |
| 272 | TraceResetForTesting(); |
| 273 | auto check_thread = [&](ThreadState *thr, uptr size, uptr count, |
| 274 | uptr finished, uptr excess, uptr recycle) { |
| 275 | CHECK_EQ(thr->tctx->trace.parts.Size(), size); |
| 276 | CheckTraceState(count, finished, excess, recycle); |
| 277 | }; |
| 278 | ThreadArray<2> threads; |
| 279 | check_thread(threads[0], 0, 0, 0, 0, 0); |
| 280 | TraceSwitchPartImpl(threads[0]); |
| 281 | check_thread(threads[0], 1, 1, 0, 0, 0); |
| 282 | TraceSwitchPartImpl(threads[0]); |
| 283 | check_thread(threads[0], 2, 2, 0, 0, 0); |
| 284 | TraceSwitchPartImpl(threads[0]); |
| 285 | check_thread(threads[0], 3, 3, 0, 0, 1); |
| 286 | TraceSwitchPartImpl(threads[0]); |
| 287 | check_thread(threads[0], 3, 3, 0, 0, 1); |
| 288 | threads.Finish(0); |
| 289 | CheckTraceState(count: 3, finished: 3, excess: 0, recycle: 3); |
| 290 | threads.Finish(1); |
| 291 | CheckTraceState(count: 3, finished: 3, excess: 0, recycle: 3); |
| 292 | } |
| 293 | |
| 294 | TRACE_TEST(TraceAlloc, FinishedThreadReuse) { |
| 295 | TraceResetForTesting(); |
| 296 | constexpr uptr Hi = Trace::kFinishedThreadHi; |
| 297 | constexpr uptr kThreads = 4 * Hi; |
| 298 | ThreadArray<kThreads> threads; |
| 299 | for (uptr i = 0; i < kThreads; i++) { |
| 300 | Printf("thread %zu\n" , i); |
| 301 | TraceSwitchPartImpl(threads[i]); |
| 302 | if (i <= Hi) |
| 303 | CheckTraceState(i + 1, i, 0, i); |
| 304 | else if (i <= 2 * Hi) |
| 305 | CheckTraceState(Hi + 1, Hi, i - Hi, Hi); |
| 306 | else |
| 307 | CheckTraceState(Hi + 1, Hi, Hi, Hi); |
| 308 | threads.Finish(i); |
| 309 | if (i < Hi) |
| 310 | CheckTraceState(i + 1, i + 1, 0, i + 1); |
| 311 | else if (i < 2 * Hi) |
| 312 | CheckTraceState(Hi + 1, Hi + 1, i - Hi + 1, Hi + 1); |
| 313 | else |
| 314 | CheckTraceState(Hi + 1, Hi + 1, Hi + 1, Hi + 1); |
| 315 | } |
| 316 | } |
| 317 | |
| 318 | TRACE_TEST(TraceAlloc, FinishedThreadReuse2) { |
| 319 | TraceResetForTesting(); |
| 320 | // constexpr uptr Lo = Trace::kFinishedThreadLo; |
| 321 | // constexpr uptr Hi = Trace::kFinishedThreadHi; |
| 322 | constexpr uptr Min = Trace::kMinParts; |
| 323 | constexpr uptr kThreads = 10; |
| 324 | constexpr uptr kParts = 2 * Min; |
| 325 | ThreadArray<kThreads> threads; |
| 326 | for (uptr i = 0; i < kThreads; i++) { |
| 327 | Printf("thread %zu\n" , i); |
| 328 | for (uptr j = 0; j < kParts; j++) TraceSwitchPartImpl(threads[i]); |
| 329 | if (i == 0) |
| 330 | CheckTraceState(Min, 0, 0, 1); |
| 331 | else |
| 332 | CheckTraceState(2 * Min, 0, Min, Min + 1); |
| 333 | threads.Finish(i); |
| 334 | if (i == 0) |
| 335 | CheckTraceState(Min, Min, 0, Min); |
| 336 | else |
| 337 | CheckTraceState(2 * Min, 2 * Min, Min, 2 * Min); |
| 338 | } |
| 339 | } |
| 340 | |
| 341 | } // namespace __tsan |
| 342 | |