1 | //===-- tsan_trace.h --------------------------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | #ifndef TSAN_TRACE_H |
13 | #define TSAN_TRACE_H |
14 | |
15 | #include "tsan_defs.h" |
16 | #include "tsan_ilist.h" |
17 | #include "tsan_mutexset.h" |
18 | #include "tsan_stack_trace.h" |
19 | |
20 | namespace __tsan { |
21 | |
22 | enum class EventType : u64 { |
23 | kAccessExt, |
24 | kAccessRange, |
25 | kLock, |
26 | kRLock, |
27 | kUnlock, |
28 | kTime, |
29 | }; |
30 | |
31 | // "Base" type for all events for type dispatch. |
32 | struct Event { |
33 | // We use variable-length type encoding to give more bits to some event |
34 | // types that need them. If is_access is set, this is EventAccess. |
35 | // Otherwise, if is_func is set, this is EventFunc. |
36 | // Otherwise type denotes the type. |
37 | u64 is_access : 1; |
38 | u64 is_func : 1; |
39 | EventType type : 3; |
40 | u64 _ : 59; |
41 | }; |
42 | static_assert(sizeof(Event) == 8, "bad Event size" ); |
43 | |
44 | // Nop event used as padding and does not affect state during replay. |
45 | static constexpr Event NopEvent = {.is_access: 1, .is_func: 0, .type: EventType::kAccessExt, ._: 0}; |
46 | |
47 | // Compressed memory access can represent only some events with PCs |
48 | // close enough to each other. Otherwise we fall back to EventAccessExt. |
49 | struct EventAccess { |
50 | static constexpr uptr kPCBits = 15; |
51 | static_assert(kPCBits + kCompressedAddrBits + 5 == 64, |
52 | "unused bits in EventAccess" ); |
53 | |
54 | u64 is_access : 1; // = 1 |
55 | u64 is_read : 1; |
56 | u64 is_atomic : 1; |
57 | u64 size_log : 2; |
58 | u64 pc_delta : kPCBits; // signed delta from the previous memory access PC |
59 | u64 addr : kCompressedAddrBits; |
60 | }; |
61 | static_assert(sizeof(EventAccess) == 8, "bad EventAccess size" ); |
62 | |
63 | // Function entry (pc != 0) or exit (pc == 0). |
64 | struct EventFunc { |
65 | u64 is_access : 1; // = 0 |
66 | u64 is_func : 1; // = 1 |
67 | u64 pc : 62; |
68 | }; |
69 | static_assert(sizeof(EventFunc) == 8, "bad EventFunc size" ); |
70 | |
71 | // Extended memory access with full PC. |
72 | struct EventAccessExt { |
73 | // Note: precisely specifying the unused parts of the bitfield is critical for |
74 | // performance. If we don't specify them, compiler will generate code to load |
75 | // the old value and shuffle it to extract the unused bits to apply to the new |
76 | // value. If we specify the unused part and store 0 in there, all that |
77 | // unnecessary code goes away (store of the 0 const is combined with other |
78 | // constant parts). |
79 | static constexpr uptr kUnusedBits = 11; |
80 | static_assert(kCompressedAddrBits + kUnusedBits + 9 == 64, |
81 | "unused bits in EventAccessExt" ); |
82 | |
83 | u64 is_access : 1; // = 0 |
84 | u64 is_func : 1; // = 0 |
85 | EventType type : 3; // = EventType::kAccessExt |
86 | u64 is_read : 1; |
87 | u64 is_atomic : 1; |
88 | u64 size_log : 2; |
89 | u64 _ : kUnusedBits; |
90 | u64 addr : kCompressedAddrBits; |
91 | u64 pc; |
92 | }; |
93 | static_assert(sizeof(EventAccessExt) == 16, "bad EventAccessExt size" ); |
94 | |
95 | // Access to a memory range. |
96 | struct EventAccessRange { |
97 | static constexpr uptr kSizeLoBits = 13; |
98 | static_assert(kCompressedAddrBits + kSizeLoBits + 7 == 64, |
99 | "unused bits in EventAccessRange" ); |
100 | |
101 | u64 is_access : 1; // = 0 |
102 | u64 is_func : 1; // = 0 |
103 | EventType type : 3; // = EventType::kAccessRange |
104 | u64 is_read : 1; |
105 | u64 is_free : 1; |
106 | u64 size_lo : kSizeLoBits; |
107 | u64 pc : kCompressedAddrBits; |
108 | u64 addr : kCompressedAddrBits; |
109 | u64 size_hi : 64 - kCompressedAddrBits; |
110 | }; |
111 | static_assert(sizeof(EventAccessRange) == 16, "bad EventAccessRange size" ); |
112 | |
113 | // Mutex lock. |
114 | struct EventLock { |
115 | static constexpr uptr kStackIDLoBits = 15; |
116 | static constexpr uptr kStackIDHiBits = |
117 | sizeof(StackID) * kByteBits - kStackIDLoBits; |
118 | static constexpr uptr kUnusedBits = 3; |
119 | static_assert(kCompressedAddrBits + kStackIDLoBits + 5 == 64, |
120 | "unused bits in EventLock" ); |
121 | static_assert(kCompressedAddrBits + kStackIDHiBits + kUnusedBits == 64, |
122 | "unused bits in EventLock" ); |
123 | |
124 | u64 is_access : 1; // = 0 |
125 | u64 is_func : 1; // = 0 |
126 | EventType type : 3; // = EventType::kLock or EventType::kRLock |
127 | u64 pc : kCompressedAddrBits; |
128 | u64 stack_lo : kStackIDLoBits; |
129 | u64 stack_hi : sizeof(StackID) * kByteBits - kStackIDLoBits; |
130 | u64 _ : kUnusedBits; |
131 | u64 addr : kCompressedAddrBits; |
132 | }; |
133 | static_assert(sizeof(EventLock) == 16, "bad EventLock size" ); |
134 | |
135 | // Mutex unlock. |
136 | struct EventUnlock { |
137 | static constexpr uptr kUnusedBits = 15; |
138 | static_assert(kCompressedAddrBits + kUnusedBits + 5 == 64, |
139 | "unused bits in EventUnlock" ); |
140 | |
141 | u64 is_access : 1; // = 0 |
142 | u64 is_func : 1; // = 0 |
143 | EventType type : 3; // = EventType::kUnlock |
144 | u64 _ : kUnusedBits; |
145 | u64 addr : kCompressedAddrBits; |
146 | }; |
147 | static_assert(sizeof(EventUnlock) == 8, "bad EventUnlock size" ); |
148 | |
149 | // Time change event. |
150 | struct EventTime { |
151 | static constexpr uptr kUnusedBits = 37; |
152 | static_assert(kUnusedBits + sizeof(Sid) * kByteBits + kEpochBits + 5 == 64, |
153 | "unused bits in EventTime" ); |
154 | |
155 | u64 is_access : 1; // = 0 |
156 | u64 is_func : 1; // = 0 |
157 | EventType type : 3; // = EventType::kTime |
158 | u64 sid : sizeof(Sid) * kByteBits; |
159 | u64 epoch : kEpochBits; |
160 | u64 _ : kUnusedBits; |
161 | }; |
162 | static_assert(sizeof(EventTime) == 8, "bad EventTime size" ); |
163 | |
164 | struct Trace; |
165 | |
166 | struct { |
167 | Trace* = nullptr; // back-pointer to Trace containing this part |
168 | INode ; // in Trace::parts |
169 | INode ; // in Contex::trace_part_recycle |
170 | }; |
171 | |
172 | struct TracePart : TraceHeader { |
173 | // There are a lot of goroutines in Go, so we use smaller parts. |
174 | static constexpr uptr kByteSize = (SANITIZER_GO ? 128 : 256) << 10; |
175 | static constexpr uptr kSize = |
176 | (kByteSize - sizeof(TraceHeader)) / sizeof(Event); |
177 | // TraceAcquire does a fast event pointer overflow check by comparing |
178 | // pointer into TracePart::events with kAlignment mask. Since TracePart's |
179 | // are allocated page-aligned, this check detects end of the array |
180 | // (it also have false positives in the middle that are filtered separately). |
181 | // This also requires events to be the last field. |
182 | static constexpr uptr kAlignment = 0xff0; |
183 | Event events[kSize]; |
184 | |
185 | TracePart() {} |
186 | }; |
187 | static_assert(sizeof(TracePart) == TracePart::kByteSize, "bad TracePart size" ); |
188 | |
189 | struct Trace { |
190 | Mutex mtx; |
191 | IList<TraceHeader, &TraceHeader::trace_parts, TracePart> parts; |
192 | // First node non-queued into ctx->trace_part_recycle. |
193 | TracePart* local_head; |
194 | // Final position in the last part for finished threads. |
195 | Event* final_pos = nullptr; |
196 | // Number of trace parts allocated on behalf of this trace specifically. |
197 | // Total number of parts in this trace can be larger if we retake some |
198 | // parts from other traces. |
199 | uptr parts_allocated = 0; |
200 | |
201 | Trace() : mtx(MutexTypeTrace) {} |
202 | |
203 | // We need at least 3 parts per thread, because we want to keep at last |
204 | // 2 parts per thread that are not queued into ctx->trace_part_recycle |
205 | // (the current one being filled and one full part that ensures that |
206 | // we always have at least one part worth of previous memory accesses). |
207 | static constexpr uptr kMinParts = 3; |
208 | |
209 | static constexpr uptr kFinishedThreadLo = 16; |
210 | static constexpr uptr kFinishedThreadHi = 64; |
211 | }; |
212 | |
213 | } // namespace __tsan |
214 | |
215 | #endif // TSAN_TRACE_H |
216 | |