1 | /* |
2 | Copyright (c) 2005-2021 Intel Corporation |
3 | |
4 | Licensed under the Apache License, Version 2.0 (the "License"); |
5 | you may not use this file except in compliance with the License. |
6 | You may obtain a copy of the License at |
7 | |
8 | http://www.apache.org/licenses/LICENSE-2.0 |
9 | |
10 | Unless required by applicable law or agreed to in writing, software |
11 | distributed under the License is distributed on an "AS IS" BASIS, |
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | See the License for the specific language governing permissions and |
14 | limitations under the License. |
15 | */ |
16 | |
17 | #ifndef __TBB_detail__utils_H |
18 | #define __TBB_detail__utils_H |
19 | |
20 | #include <type_traits> |
21 | #include <cstdint> |
22 | #include <atomic> |
23 | |
24 | #include "_config.h" |
25 | #include "_assert.h" |
26 | #include "_machine.h" |
27 | |
28 | namespace tbb { |
29 | namespace detail { |
30 | inline namespace d0 { |
31 | |
32 | //! Utility template function to prevent "unused" warnings by various compilers. |
33 | template<typename... T> void suppress_unused_warning(T&&...) {} |
34 | |
35 | //! Compile-time constant that is upper bound on cache line/sector size. |
36 | /** It should be used only in situations where having a compile-time upper |
37 | bound is more useful than a run-time exact answer. |
38 | @ingroup memory_allocation */ |
39 | constexpr size_t max_nfs_size = 128; |
40 | constexpr std::size_t max_nfs_size_exp = 7; |
41 | static_assert(1 << max_nfs_size_exp == max_nfs_size, "max_nfs_size_exp must be a log2(max_nfs_size)" ); |
42 | |
43 | //! Class that implements exponential backoff. |
44 | class atomic_backoff { |
45 | //! Time delay, in units of "pause" instructions. |
46 | /** Should be equal to approximately the number of "pause" instructions |
47 | that take the same time as an context switch. Must be a power of two.*/ |
48 | static constexpr std::int32_t LOOPS_BEFORE_YIELD = 16; |
49 | std::int32_t count; |
50 | |
51 | public: |
52 | // In many cases, an object of this type is initialized eagerly on hot path, |
53 | // as in for(atomic_backoff b; ; b.pause()) { /*loop body*/ } |
54 | // For this reason, the construction cost must be very small! |
55 | atomic_backoff() : count(1) {} |
56 | // This constructor pauses immediately; do not use on hot paths! |
57 | atomic_backoff(bool) : count(1) { pause(); } |
58 | |
59 | //! No Copy |
60 | atomic_backoff(const atomic_backoff&) = delete; |
61 | atomic_backoff& operator=(const atomic_backoff&) = delete; |
62 | |
63 | //! Pause for a while. |
64 | void pause() { |
65 | if (count <= LOOPS_BEFORE_YIELD) { |
66 | machine_pause(delay: count); |
67 | // Pause twice as long the next time. |
68 | count *= 2; |
69 | } else { |
70 | // Pause is so long that we might as well yield CPU to scheduler. |
71 | yield(); |
72 | } |
73 | } |
74 | |
75 | //! Pause for a few times and return false if saturated. |
76 | bool bounded_pause() { |
77 | machine_pause(delay: count); |
78 | if (count < LOOPS_BEFORE_YIELD) { |
79 | // Pause twice as long the next time. |
80 | count *= 2; |
81 | return true; |
82 | } else { |
83 | return false; |
84 | } |
85 | } |
86 | |
87 | void reset() { |
88 | count = 1; |
89 | } |
90 | }; |
91 | |
92 | //! Spin WHILE the condition is true. |
93 | /** T and U should be comparable types. */ |
94 | template <typename T, typename C> |
95 | T spin_wait_while(const std::atomic<T>& location, C comp, std::memory_order order) { |
96 | atomic_backoff backoff; |
97 | T snapshot = location.load(order); |
98 | while (comp(snapshot)) { |
99 | backoff.pause(); |
100 | snapshot = location.load(order); |
101 | } |
102 | return snapshot; |
103 | } |
104 | |
105 | //! Spin WHILE the value of the variable is equal to a given value |
106 | /** T and U should be comparable types. */ |
107 | template <typename T, typename U> |
108 | T spin_wait_while_eq(const std::atomic<T>& location, const U value, std::memory_order order = std::memory_order_acquire) { |
109 | return spin_wait_while(location, [&value](T t) { return t == value; }, order); |
110 | } |
111 | |
112 | //! Spin UNTIL the value of the variable is equal to a given value |
113 | /** T and U should be comparable types. */ |
114 | template<typename T, typename U> |
115 | T spin_wait_until_eq(const std::atomic<T>& location, const U value, std::memory_order order = std::memory_order_acquire) { |
116 | return spin_wait_while(location, [&value](T t) { return t != value; }, order); |
117 | } |
118 | |
119 | //! Spin UNTIL the condition returns true or spinning time is up. |
120 | /** Returns what the passed functor returned last time it was invoked. */ |
121 | template <typename Condition> |
122 | bool timed_spin_wait_until(Condition condition) { |
123 | // 32 pauses + 32 yields are meausered as balanced spin time before sleep. |
124 | bool finish = condition(); |
125 | for (int i = 1; !finish && i < 32; finish = condition(), i *= 2) { |
126 | machine_pause(delay: i); |
127 | } |
128 | for (int i = 32; !finish && i < 64; finish = condition(), ++i) { |
129 | yield(); |
130 | } |
131 | return finish; |
132 | } |
133 | |
134 | template <typename T> |
135 | std::uintptr_t log2(T in) { |
136 | __TBB_ASSERT(in > 0, "The logarithm of a non-positive value is undefined." ); |
137 | return machine_log2(in); |
138 | } |
139 | |
140 | template<typename T> |
141 | T reverse_bits(T src) { |
142 | return machine_reverse_bits(src); |
143 | } |
144 | |
145 | template<typename T> |
146 | T reverse_n_bits(T src, std::size_t n) { |
147 | __TBB_ASSERT(n != 0, "Reverse for 0 bits is undefined behavior." ); |
148 | return reverse_bits(src) >> (number_of_bits<T>() - n); |
149 | } |
150 | |
151 | // A function to check if passed integer is a power of two |
152 | template <typename IntegerType> |
153 | constexpr bool is_power_of_two( IntegerType arg ) { |
154 | static_assert(std::is_integral<IntegerType>::value, |
155 | "An argument for is_power_of_two should be integral type" ); |
156 | return arg && (0 == (arg & (arg - 1))); |
157 | } |
158 | |
159 | // A function to determine if passed integer is a power of two |
160 | // at least as big as another power of two, i.e. for strictly positive i and j, |
161 | // with j being a power of two, determines whether i==j<<k for some nonnegative k |
162 | template <typename ArgIntegerType, typename DivisorIntegerType> |
163 | constexpr bool is_power_of_two_at_least(ArgIntegerType arg, DivisorIntegerType divisor) { |
164 | // Divisor should be a power of two |
165 | static_assert(std::is_integral<ArgIntegerType>::value, |
166 | "An argument for is_power_of_two_at_least should be integral type" ); |
167 | return 0 == (arg & (arg - divisor)); |
168 | } |
169 | |
170 | // A function to compute arg modulo divisor where divisor is a power of 2. |
171 | template<typename ArgIntegerType, typename DivisorIntegerType> |
172 | inline ArgIntegerType modulo_power_of_two(ArgIntegerType arg, DivisorIntegerType divisor) { |
173 | __TBB_ASSERT( is_power_of_two(divisor), "Divisor should be a power of two" ); |
174 | return arg & (divisor - 1); |
175 | } |
176 | |
177 | //! A function to check if passed in pointer is aligned on a specific border |
178 | template<typename T> |
179 | constexpr bool is_aligned(T* pointer, std::uintptr_t alignment) { |
180 | return 0 == ((std::uintptr_t)pointer & (alignment - 1)); |
181 | } |
182 | |
183 | #if TBB_USE_ASSERT |
184 | static void* const poisoned_ptr = reinterpret_cast<void*>(-1); |
185 | |
186 | //! Set p to invalid pointer value. |
187 | template<typename T> |
188 | inline void poison_pointer( T* &p ) { p = reinterpret_cast<T*>(poisoned_ptr); } |
189 | |
190 | template<typename T> |
191 | inline void poison_pointer(std::atomic<T*>& p) { p.store(reinterpret_cast<T*>(poisoned_ptr), std::memory_order_relaxed); } |
192 | |
193 | /** Expected to be used in assertions only, thus no empty form is defined. **/ |
194 | template<typename T> |
195 | inline bool is_poisoned( T* p ) { return p == reinterpret_cast<T*>(poisoned_ptr); } |
196 | |
197 | template<typename T> |
198 | inline bool is_poisoned(const std::atomic<T*>& p) { return is_poisoned(p.load(std::memory_order_relaxed)); } |
199 | #else |
200 | template<typename T> |
201 | inline void poison_pointer(T&) {/*do nothing*/} |
202 | #endif /* !TBB_USE_ASSERT */ |
203 | |
204 | template <std::size_t alignment = 0, typename T> |
205 | bool assert_pointer_valid(T* p, const char* = nullptr) { |
206 | suppress_unused_warning(p, comment); |
207 | __TBB_ASSERT(p != nullptr, comment); |
208 | __TBB_ASSERT(!is_poisoned(p), comment); |
209 | #if !(_MSC_VER && _MSC_VER <= 1900 && !__INTEL_COMPILER) |
210 | __TBB_ASSERT(is_aligned(p, alignment == 0 ? alignof(T) : alignment), comment); |
211 | #endif |
212 | // Returns something to simplify assert_pointers_valid implementation. |
213 | return true; |
214 | } |
215 | |
216 | template <typename... Args> |
217 | void assert_pointers_valid(Args*... p) { |
218 | // suppress_unused_warning is used as an evaluation context for the variadic pack. |
219 | suppress_unused_warning(assert_pointer_valid(p)...); |
220 | } |
221 | |
222 | //! Base class for types that should not be assigned. |
223 | class no_assign { |
224 | public: |
225 | void operator=(const no_assign&) = delete; |
226 | no_assign(const no_assign&) = default; |
227 | no_assign() = default; |
228 | }; |
229 | |
230 | //! Base class for types that should not be copied or assigned. |
231 | class no_copy: no_assign { |
232 | public: |
233 | no_copy(const no_copy&) = delete; |
234 | no_copy() = default; |
235 | }; |
236 | |
237 | template <typename T> |
238 | void swap_atomics_relaxed(std::atomic<T>& lhs, std::atomic<T>& rhs){ |
239 | T tmp = lhs.load(std::memory_order_relaxed); |
240 | lhs.store(rhs.load(std::memory_order_relaxed), std::memory_order_relaxed); |
241 | rhs.store(tmp, std::memory_order_relaxed); |
242 | } |
243 | |
244 | //! One-time initialization states |
245 | enum class do_once_state { |
246 | uninitialized = 0, ///< No execution attempts have been undertaken yet |
247 | pending, ///< A thread is executing associated do-once routine |
248 | executed, ///< Do-once routine has been executed |
249 | initialized = executed ///< Convenience alias |
250 | }; |
251 | |
252 | //! One-time initialization function |
253 | /** /param initializer Pointer to function without arguments |
254 | The variant that returns bool is used for cases when initialization can fail |
255 | and it is OK to continue execution, but the state should be reset so that |
256 | the initialization attempt was repeated the next time. |
257 | /param state Shared state associated with initializer that specifies its |
258 | initialization state. Must be initially set to #uninitialized value |
259 | (e.g. by means of default static zero initialization). **/ |
260 | template <typename F> |
261 | void atomic_do_once( const F& initializer, std::atomic<do_once_state>& state ) { |
262 | // The loop in the implementation is necessary to avoid race when thread T2 |
263 | // that arrived in the middle of initialization attempt by another thread T1 |
264 | // has just made initialization possible. |
265 | // In such a case T2 has to rely on T1 to initialize, but T1 may already be past |
266 | // the point where it can recognize the changed conditions. |
267 | do_once_state expected_state; |
268 | while ( state.load( m: std::memory_order_acquire ) != do_once_state::executed ) { |
269 | if( state.load( m: std::memory_order_relaxed ) == do_once_state::uninitialized ) { |
270 | expected_state = do_once_state::uninitialized; |
271 | #if defined(__INTEL_COMPILER) && __INTEL_COMPILER <= 1910 |
272 | using enum_type = typename std::underlying_type<do_once_state>::type; |
273 | if( ((std::atomic<enum_type>&)state).compare_exchange_strong( (enum_type&)expected_state, (enum_type)do_once_state::pending ) ) { |
274 | #else |
275 | if( state.compare_exchange_strong( e&: expected_state, i: do_once_state::pending ) ) { |
276 | #endif |
277 | run_initializer( initializer, state ); |
278 | break; |
279 | } |
280 | } |
281 | spin_wait_while_eq( location: state, value: do_once_state::pending ); |
282 | } |
283 | } |
284 | |
285 | // Run the initializer which can not fail |
286 | template<typename Functor> |
287 | void run_initializer(const Functor& f, std::atomic<do_once_state>& state ) { |
288 | f(); |
289 | state.store(i: do_once_state::executed, m: std::memory_order_release); |
290 | } |
291 | |
292 | #if __TBB_CPP20_CONCEPTS_PRESENT |
293 | template <typename T> |
294 | concept boolean_testable_impl = std::convertible_to<T, bool>; |
295 | |
296 | template <typename T> |
297 | concept boolean_testable = boolean_testable_impl<T> && requires( T&& t ) { |
298 | { !std::forward<T>(t) } -> boolean_testable_impl; |
299 | }; |
300 | |
301 | #if __TBB_CPP20_COMPARISONS_PRESENT |
302 | struct synthesized_three_way_comparator { |
303 | template <typename T1, typename T2> |
304 | auto operator()( const T1& lhs, const T2& rhs ) const |
305 | requires requires { |
306 | { lhs < rhs } -> boolean_testable; |
307 | { rhs < lhs } -> boolean_testable; |
308 | } |
309 | { |
310 | if constexpr (std::three_way_comparable_with<T1, T2>) { |
311 | return lhs <=> rhs; |
312 | } else { |
313 | if (lhs < rhs) { |
314 | return std::weak_ordering::less; |
315 | } |
316 | if (rhs < lhs) { |
317 | return std::weak_ordering::greater; |
318 | } |
319 | return std::weak_ordering::equivalent; |
320 | } |
321 | } |
322 | }; // struct synthesized_three_way_comparator |
323 | |
324 | template <typename T1, typename T2 = T1> |
325 | using synthesized_three_way_result = decltype(synthesized_three_way_comparator{}(std::declval<T1&>(), |
326 | std::declval<T2&>())); |
327 | |
328 | #endif // __TBB_CPP20_COMPARISONS_PRESENT |
329 | |
330 | // Check if the type T is implicitly OR explicitly convertible to U |
331 | template <typename T, typename U> |
332 | concept relaxed_convertible_to = std::constructible_from<U, T>; |
333 | |
334 | template <typename T, typename U> |
335 | concept adaptive_same_as = |
336 | #if __TBB_STRICT_CONSTRAINTS |
337 | std::same_as<T, U>; |
338 | #else |
339 | std::convertible_to<T, U>; |
340 | #endif |
341 | #endif // __TBB_CPP20_CONCEPTS_PRESENT |
342 | |
343 | } // namespace d0 |
344 | |
345 | namespace d1 { |
346 | |
347 | class delegate_base { |
348 | public: |
349 | virtual bool operator()() const = 0; |
350 | virtual ~delegate_base() {} |
351 | }; |
352 | |
353 | template <typename FuncType> |
354 | class delegated_function : public delegate_base { |
355 | public: |
356 | delegated_function(FuncType& f) : my_func(f) {} |
357 | |
358 | bool operator()() const override { |
359 | return my_func(); |
360 | } |
361 | |
362 | private: |
363 | FuncType &my_func; |
364 | }; |
365 | } // namespace d1 |
366 | |
367 | } // namespace detail |
368 | } // namespace tbb |
369 | |
370 | #endif // __TBB_detail__utils_H |
371 | |