1 | // Necessary for implementing atomic methods for `AtomicUnit` |
2 | #![allow (clippy::unit_arg)] |
3 | |
4 | use crate::primitive::sync::atomic::{self, Ordering}; |
5 | use crate::CachePadded; |
6 | use core::cell::UnsafeCell; |
7 | use core::cmp; |
8 | use core::fmt; |
9 | use core::mem::{self, ManuallyDrop, MaybeUninit}; |
10 | use core::panic::{RefUnwindSafe, UnwindSafe}; |
11 | use core::ptr; |
12 | |
13 | use super::seq_lock::SeqLock; |
14 | |
15 | /// A thread-safe mutable memory location. |
16 | /// |
17 | /// This type is equivalent to [`Cell`], except it can also be shared among multiple threads. |
18 | /// |
19 | /// Operations on `AtomicCell`s use atomic instructions whenever possible, and synchronize using |
20 | /// global locks otherwise. You can call [`AtomicCell::<T>::is_lock_free()`] to check whether |
21 | /// atomic instructions or locks will be used. |
22 | /// |
23 | /// Atomic loads use the [`Acquire`] ordering and atomic stores use the [`Release`] ordering. |
24 | /// |
25 | /// [`Cell`]: std::cell::Cell |
26 | /// [`AtomicCell::<T>::is_lock_free()`]: AtomicCell::is_lock_free |
27 | /// [`Acquire`]: std::sync::atomic::Ordering::Acquire |
28 | /// [`Release`]: std::sync::atomic::Ordering::Release |
29 | #[repr (transparent)] |
30 | pub struct AtomicCell<T> { |
31 | /// The inner value. |
32 | /// |
33 | /// If this value can be transmuted into a primitive atomic type, it will be treated as such. |
34 | /// Otherwise, all potentially concurrent operations on this data will be protected by a global |
35 | /// lock. |
36 | /// |
37 | /// Using MaybeUninit to prevent code outside the cell from observing partially initialized state: |
38 | /// <https://github.com/crossbeam-rs/crossbeam/issues/833> |
39 | /// (This rustc bug has been fixed in Rust 1.64.) |
40 | /// |
41 | /// Note: |
42 | /// - we'll never store uninitialized `T` due to our API only using initialized `T`. |
43 | /// - this `MaybeUninit` does *not* fix <https://github.com/crossbeam-rs/crossbeam/issues/315>. |
44 | value: UnsafeCell<MaybeUninit<T>>, |
45 | } |
46 | |
47 | unsafe impl<T: Send> Send for AtomicCell<T> {} |
48 | unsafe impl<T: Send> Sync for AtomicCell<T> {} |
49 | |
50 | impl<T> UnwindSafe for AtomicCell<T> {} |
51 | impl<T> RefUnwindSafe for AtomicCell<T> {} |
52 | |
53 | impl<T> AtomicCell<T> { |
54 | /// Creates a new atomic cell initialized with `val`. |
55 | /// |
56 | /// # Examples |
57 | /// |
58 | /// ``` |
59 | /// use crossbeam_utils::atomic::AtomicCell; |
60 | /// |
61 | /// let a = AtomicCell::new(7); |
62 | /// ``` |
63 | pub const fn new(val: T) -> AtomicCell<T> { |
64 | AtomicCell { |
65 | value: UnsafeCell::new(MaybeUninit::new(val)), |
66 | } |
67 | } |
68 | |
69 | /// Consumes the atomic and returns the contained value. |
70 | /// |
71 | /// This is safe because passing `self` by value guarantees that no other threads are |
72 | /// concurrently accessing the atomic data. |
73 | /// |
74 | /// # Examples |
75 | /// |
76 | /// ``` |
77 | /// use crossbeam_utils::atomic::AtomicCell; |
78 | /// |
79 | /// let a = AtomicCell::new(7); |
80 | /// let v = a.into_inner(); |
81 | /// |
82 | /// assert_eq!(v, 7); |
83 | /// ``` |
84 | pub fn into_inner(self) -> T { |
85 | let this = ManuallyDrop::new(self); |
86 | // SAFETY: |
87 | // - passing `self` by value guarantees that no other threads are concurrently |
88 | // accessing the atomic data |
89 | // - the raw pointer passed in is valid because we got it from an owned value. |
90 | // - `ManuallyDrop` prevents double dropping `T` |
91 | unsafe { this.as_ptr().read() } |
92 | } |
93 | |
94 | /// Returns `true` if operations on values of this type are lock-free. |
95 | /// |
96 | /// If the compiler or the platform doesn't support the necessary atomic instructions, |
97 | /// `AtomicCell<T>` will use global locks for every potentially concurrent atomic operation. |
98 | /// |
99 | /// # Examples |
100 | /// |
101 | /// ``` |
102 | /// use crossbeam_utils::atomic::AtomicCell; |
103 | /// |
104 | /// // This type is internally represented as `AtomicUsize` so we can just use atomic |
105 | /// // operations provided by it. |
106 | /// assert_eq!(AtomicCell::<usize>::is_lock_free(), true); |
107 | /// |
108 | /// // A wrapper struct around `isize`. |
109 | /// struct Foo { |
110 | /// bar: isize, |
111 | /// } |
112 | /// // `AtomicCell<Foo>` will be internally represented as `AtomicIsize`. |
113 | /// assert_eq!(AtomicCell::<Foo>::is_lock_free(), true); |
114 | /// |
115 | /// // Operations on zero-sized types are always lock-free. |
116 | /// assert_eq!(AtomicCell::<()>::is_lock_free(), true); |
117 | /// |
118 | /// // Very large types cannot be represented as any of the standard atomic types, so atomic |
119 | /// // operations on them will have to use global locks for synchronization. |
120 | /// assert_eq!(AtomicCell::<[u8; 1000]>::is_lock_free(), false); |
121 | /// ``` |
122 | pub const fn is_lock_free() -> bool { |
123 | atomic_is_lock_free::<T>() |
124 | } |
125 | |
126 | /// Stores `val` into the atomic cell. |
127 | /// |
128 | /// # Examples |
129 | /// |
130 | /// ``` |
131 | /// use crossbeam_utils::atomic::AtomicCell; |
132 | /// |
133 | /// let a = AtomicCell::new(7); |
134 | /// |
135 | /// assert_eq!(a.load(), 7); |
136 | /// a.store(8); |
137 | /// assert_eq!(a.load(), 8); |
138 | /// ``` |
139 | pub fn store(&self, val: T) { |
140 | if mem::needs_drop::<T>() { |
141 | drop(self.swap(val)); |
142 | } else { |
143 | unsafe { |
144 | atomic_store(self.as_ptr(), val); |
145 | } |
146 | } |
147 | } |
148 | |
149 | /// Stores `val` into the atomic cell and returns the previous value. |
150 | /// |
151 | /// # Examples |
152 | /// |
153 | /// ``` |
154 | /// use crossbeam_utils::atomic::AtomicCell; |
155 | /// |
156 | /// let a = AtomicCell::new(7); |
157 | /// |
158 | /// assert_eq!(a.load(), 7); |
159 | /// assert_eq!(a.swap(8), 7); |
160 | /// assert_eq!(a.load(), 8); |
161 | /// ``` |
162 | pub fn swap(&self, val: T) -> T { |
163 | unsafe { atomic_swap(self.as_ptr(), val) } |
164 | } |
165 | |
166 | /// Returns a raw pointer to the underlying data in this atomic cell. |
167 | /// |
168 | /// # Examples |
169 | /// |
170 | /// ``` |
171 | /// use crossbeam_utils::atomic::AtomicCell; |
172 | /// |
173 | /// let a = AtomicCell::new(5); |
174 | /// |
175 | /// let ptr = a.as_ptr(); |
176 | /// ``` |
177 | #[inline ] |
178 | pub fn as_ptr(&self) -> *mut T { |
179 | self.value.get().cast::<T>() |
180 | } |
181 | } |
182 | |
183 | impl<T: Default> AtomicCell<T> { |
184 | /// Takes the value of the atomic cell, leaving `Default::default()` in its place. |
185 | /// |
186 | /// # Examples |
187 | /// |
188 | /// ``` |
189 | /// use crossbeam_utils::atomic::AtomicCell; |
190 | /// |
191 | /// let a = AtomicCell::new(5); |
192 | /// let five = a.take(); |
193 | /// |
194 | /// assert_eq!(five, 5); |
195 | /// assert_eq!(a.into_inner(), 0); |
196 | /// ``` |
197 | pub fn take(&self) -> T { |
198 | self.swap(val:Default::default()) |
199 | } |
200 | } |
201 | |
202 | impl<T: Copy> AtomicCell<T> { |
203 | /// Loads a value from the atomic cell. |
204 | /// |
205 | /// # Examples |
206 | /// |
207 | /// ``` |
208 | /// use crossbeam_utils::atomic::AtomicCell; |
209 | /// |
210 | /// let a = AtomicCell::new(7); |
211 | /// |
212 | /// assert_eq!(a.load(), 7); |
213 | /// ``` |
214 | pub fn load(&self) -> T { |
215 | unsafe { atomic_load(self.as_ptr()) } |
216 | } |
217 | } |
218 | |
219 | impl<T: Copy + Eq> AtomicCell<T> { |
220 | /// If the current value equals `current`, stores `new` into the atomic cell. |
221 | /// |
222 | /// The return value is always the previous value. If it is equal to `current`, then the value |
223 | /// was updated. |
224 | /// |
225 | /// # Examples |
226 | /// |
227 | /// ``` |
228 | /// # #![allow (deprecated)] |
229 | /// use crossbeam_utils::atomic::AtomicCell; |
230 | /// |
231 | /// let a = AtomicCell::new(1); |
232 | /// |
233 | /// assert_eq!(a.compare_and_swap(2, 3), 1); |
234 | /// assert_eq!(a.load(), 1); |
235 | /// |
236 | /// assert_eq!(a.compare_and_swap(1, 2), 1); |
237 | /// assert_eq!(a.load(), 2); |
238 | /// ``` |
239 | // TODO: remove in the next major version. |
240 | #[deprecated (note = "Use `compare_exchange` instead" )] |
241 | pub fn compare_and_swap(&self, current: T, new: T) -> T { |
242 | match self.compare_exchange(current, new) { |
243 | Ok(v) => v, |
244 | Err(v) => v, |
245 | } |
246 | } |
247 | |
248 | /// If the current value equals `current`, stores `new` into the atomic cell. |
249 | /// |
250 | /// The return value is a result indicating whether the new value was written and containing |
251 | /// the previous value. On success this value is guaranteed to be equal to `current`. |
252 | /// |
253 | /// # Examples |
254 | /// |
255 | /// ``` |
256 | /// use crossbeam_utils::atomic::AtomicCell; |
257 | /// |
258 | /// let a = AtomicCell::new(1); |
259 | /// |
260 | /// assert_eq!(a.compare_exchange(2, 3), Err(1)); |
261 | /// assert_eq!(a.load(), 1); |
262 | /// |
263 | /// assert_eq!(a.compare_exchange(1, 2), Ok(1)); |
264 | /// assert_eq!(a.load(), 2); |
265 | /// ``` |
266 | pub fn compare_exchange(&self, current: T, new: T) -> Result<T, T> { |
267 | unsafe { atomic_compare_exchange_weak(self.as_ptr(), current, new) } |
268 | } |
269 | |
270 | /// Fetches the value, and applies a function to it that returns an optional |
271 | /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else |
272 | /// `Err(previous_value)`. |
273 | /// |
274 | /// Note: This may call the function multiple times if the value has been changed from other threads in |
275 | /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied |
276 | /// only once to the stored value. |
277 | /// |
278 | /// # Examples |
279 | /// |
280 | /// ```rust |
281 | /// use crossbeam_utils::atomic::AtomicCell; |
282 | /// |
283 | /// let a = AtomicCell::new(7); |
284 | /// assert_eq!(a.fetch_update(|_| None), Err(7)); |
285 | /// assert_eq!(a.fetch_update(|a| Some(a + 1)), Ok(7)); |
286 | /// assert_eq!(a.fetch_update(|a| Some(a + 1)), Ok(8)); |
287 | /// assert_eq!(a.load(), 9); |
288 | /// ``` |
289 | #[inline ] |
290 | pub fn fetch_update<F>(&self, mut f: F) -> Result<T, T> |
291 | where |
292 | F: FnMut(T) -> Option<T>, |
293 | { |
294 | let mut prev = self.load(); |
295 | while let Some(next) = f(prev) { |
296 | match self.compare_exchange(prev, next) { |
297 | x @ Ok(_) => return x, |
298 | Err(next_prev) => prev = next_prev, |
299 | } |
300 | } |
301 | Err(prev) |
302 | } |
303 | } |
304 | |
305 | // `MaybeUninit` prevents `T` from being dropped, so we need to implement `Drop` |
306 | // for `AtomicCell` to avoid leaks of non-`Copy` types. |
307 | impl<T> Drop for AtomicCell<T> { |
308 | fn drop(&mut self) { |
309 | if mem::needs_drop::<T>() { |
310 | // SAFETY: |
311 | // - the mutable reference guarantees that no other threads are concurrently accessing the atomic data |
312 | // - the raw pointer passed in is valid because we got it from a reference |
313 | // - `MaybeUninit` prevents double dropping `T` |
314 | unsafe { |
315 | self.as_ptr().drop_in_place(); |
316 | } |
317 | } |
318 | } |
319 | } |
320 | |
321 | macro_rules! atomic { |
322 | // If values of type `$t` can be transmuted into values of the primitive atomic type `$atomic`, |
323 | // declares variable `$a` of type `$atomic` and executes `$atomic_op`, breaking out of the loop. |
324 | (@check, $t:ty, $atomic:ty, $a:ident, $atomic_op:expr) => { |
325 | if can_transmute::<$t, $atomic>() { |
326 | let $a: &$atomic; |
327 | break $atomic_op; |
328 | } |
329 | }; |
330 | |
331 | // If values of type `$t` can be transmuted into values of a primitive atomic type, declares |
332 | // variable `$a` of that type and executes `$atomic_op`. Otherwise, just executes |
333 | // `$fallback_op`. |
334 | ($t:ty, $a:ident, $atomic_op:expr, $fallback_op:expr) => { |
335 | loop { |
336 | atomic!(@check, $t, AtomicUnit, $a, $atomic_op); |
337 | |
338 | atomic!(@check, $t, atomic::AtomicU8, $a, $atomic_op); |
339 | atomic!(@check, $t, atomic::AtomicU16, $a, $atomic_op); |
340 | atomic!(@check, $t, atomic::AtomicU32, $a, $atomic_op); |
341 | #[cfg(target_has_atomic = "64" )] |
342 | atomic!(@check, $t, atomic::AtomicU64, $a, $atomic_op); |
343 | // TODO: AtomicU128 is unstable |
344 | // atomic!(@check, $t, atomic::AtomicU128, $a, $atomic_op); |
345 | |
346 | break $fallback_op; |
347 | } |
348 | }; |
349 | } |
350 | |
351 | macro_rules! impl_arithmetic { |
352 | ($t:ty, fallback, $example:tt) => { |
353 | impl AtomicCell<$t> { |
354 | /// Increments the current value by `val` and returns the previous value. |
355 | /// |
356 | /// The addition wraps on overflow. |
357 | /// |
358 | /// # Examples |
359 | /// |
360 | /// ``` |
361 | /// use crossbeam_utils::atomic::AtomicCell; |
362 | /// |
363 | #[doc = $example] |
364 | /// |
365 | /// assert_eq!(a.fetch_add(3), 7); |
366 | /// assert_eq!(a.load(), 10); |
367 | /// ``` |
368 | #[inline] |
369 | pub fn fetch_add(&self, val: $t) -> $t { |
370 | let _guard = lock(self.as_ptr() as usize).write(); |
371 | let value = unsafe { &mut *(self.as_ptr()) }; |
372 | let old = *value; |
373 | *value = value.wrapping_add(val); |
374 | old |
375 | } |
376 | |
377 | /// Decrements the current value by `val` and returns the previous value. |
378 | /// |
379 | /// The subtraction wraps on overflow. |
380 | /// |
381 | /// # Examples |
382 | /// |
383 | /// ``` |
384 | /// use crossbeam_utils::atomic::AtomicCell; |
385 | /// |
386 | #[doc = $example] |
387 | /// |
388 | /// assert_eq!(a.fetch_sub(3), 7); |
389 | /// assert_eq!(a.load(), 4); |
390 | /// ``` |
391 | #[inline] |
392 | pub fn fetch_sub(&self, val: $t) -> $t { |
393 | let _guard = lock(self.as_ptr() as usize).write(); |
394 | let value = unsafe { &mut *(self.as_ptr()) }; |
395 | let old = *value; |
396 | *value = value.wrapping_sub(val); |
397 | old |
398 | } |
399 | |
400 | /// Applies bitwise "and" to the current value and returns the previous value. |
401 | /// |
402 | /// # Examples |
403 | /// |
404 | /// ``` |
405 | /// use crossbeam_utils::atomic::AtomicCell; |
406 | /// |
407 | #[doc = $example] |
408 | /// |
409 | /// assert_eq!(a.fetch_and(3), 7); |
410 | /// assert_eq!(a.load(), 3); |
411 | /// ``` |
412 | #[inline] |
413 | pub fn fetch_and(&self, val: $t) -> $t { |
414 | let _guard = lock(self.as_ptr() as usize).write(); |
415 | let value = unsafe { &mut *(self.as_ptr()) }; |
416 | let old = *value; |
417 | *value &= val; |
418 | old |
419 | } |
420 | |
421 | /// Applies bitwise "nand" to the current value and returns the previous value. |
422 | /// |
423 | /// # Examples |
424 | /// |
425 | /// ``` |
426 | /// use crossbeam_utils::atomic::AtomicCell; |
427 | /// |
428 | #[doc = $example] |
429 | /// |
430 | /// assert_eq!(a.fetch_nand(3), 7); |
431 | /// assert_eq!(a.load(), !(7 & 3)); |
432 | /// ``` |
433 | #[inline] |
434 | pub fn fetch_nand(&self, val: $t) -> $t { |
435 | let _guard = lock(self.as_ptr() as usize).write(); |
436 | let value = unsafe { &mut *(self.as_ptr()) }; |
437 | let old = *value; |
438 | *value = !(old & val); |
439 | old |
440 | } |
441 | |
442 | /// Applies bitwise "or" to the current value and returns the previous value. |
443 | /// |
444 | /// # Examples |
445 | /// |
446 | /// ``` |
447 | /// use crossbeam_utils::atomic::AtomicCell; |
448 | /// |
449 | #[doc = $example] |
450 | /// |
451 | /// assert_eq!(a.fetch_or(16), 7); |
452 | /// assert_eq!(a.load(), 23); |
453 | /// ``` |
454 | #[inline] |
455 | pub fn fetch_or(&self, val: $t) -> $t { |
456 | let _guard = lock(self.as_ptr() as usize).write(); |
457 | let value = unsafe { &mut *(self.as_ptr()) }; |
458 | let old = *value; |
459 | *value |= val; |
460 | old |
461 | } |
462 | |
463 | /// Applies bitwise "xor" to the current value and returns the previous value. |
464 | /// |
465 | /// # Examples |
466 | /// |
467 | /// ``` |
468 | /// use crossbeam_utils::atomic::AtomicCell; |
469 | /// |
470 | #[doc = $example] |
471 | /// |
472 | /// assert_eq!(a.fetch_xor(2), 7); |
473 | /// assert_eq!(a.load(), 5); |
474 | /// ``` |
475 | #[inline] |
476 | pub fn fetch_xor(&self, val: $t) -> $t { |
477 | let _guard = lock(self.as_ptr() as usize).write(); |
478 | let value = unsafe { &mut *(self.as_ptr()) }; |
479 | let old = *value; |
480 | *value ^= val; |
481 | old |
482 | } |
483 | |
484 | /// Compares and sets the maximum of the current value and `val`, |
485 | /// and returns the previous value. |
486 | /// |
487 | /// # Examples |
488 | /// |
489 | /// ``` |
490 | /// use crossbeam_utils::atomic::AtomicCell; |
491 | /// |
492 | #[doc = $example] |
493 | /// |
494 | /// assert_eq!(a.fetch_max(2), 7); |
495 | /// assert_eq!(a.load(), 7); |
496 | /// ``` |
497 | #[inline] |
498 | pub fn fetch_max(&self, val: $t) -> $t { |
499 | let _guard = lock(self.as_ptr() as usize).write(); |
500 | let value = unsafe { &mut *(self.as_ptr()) }; |
501 | let old = *value; |
502 | *value = cmp::max(old, val); |
503 | old |
504 | } |
505 | |
506 | /// Compares and sets the minimum of the current value and `val`, |
507 | /// and returns the previous value. |
508 | /// |
509 | /// # Examples |
510 | /// |
511 | /// ``` |
512 | /// use crossbeam_utils::atomic::AtomicCell; |
513 | /// |
514 | #[doc = $example] |
515 | /// |
516 | /// assert_eq!(a.fetch_min(2), 7); |
517 | /// assert_eq!(a.load(), 2); |
518 | /// ``` |
519 | #[inline] |
520 | pub fn fetch_min(&self, val: $t) -> $t { |
521 | let _guard = lock(self.as_ptr() as usize).write(); |
522 | let value = unsafe { &mut *(self.as_ptr()) }; |
523 | let old = *value; |
524 | *value = cmp::min(old, val); |
525 | old |
526 | } |
527 | } |
528 | }; |
529 | ($t:ty, $atomic:ident, $example:tt) => { |
530 | impl AtomicCell<$t> { |
531 | /// Increments the current value by `val` and returns the previous value. |
532 | /// |
533 | /// The addition wraps on overflow. |
534 | /// |
535 | /// # Examples |
536 | /// |
537 | /// ``` |
538 | /// use crossbeam_utils::atomic::AtomicCell; |
539 | /// |
540 | #[doc = $example] |
541 | /// |
542 | /// assert_eq!(a.fetch_add(3), 7); |
543 | /// assert_eq!(a.load(), 10); |
544 | /// ``` |
545 | #[inline] |
546 | pub fn fetch_add(&self, val: $t) -> $t { |
547 | atomic! { |
548 | $t, _a, |
549 | { |
550 | let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; |
551 | a.fetch_add(val, Ordering::AcqRel) |
552 | }, |
553 | { |
554 | let _guard = lock(self.as_ptr() as usize).write(); |
555 | let value = unsafe { &mut *(self.as_ptr()) }; |
556 | let old = *value; |
557 | *value = value.wrapping_add(val); |
558 | old |
559 | } |
560 | } |
561 | } |
562 | |
563 | /// Decrements the current value by `val` and returns the previous value. |
564 | /// |
565 | /// The subtraction wraps on overflow. |
566 | /// |
567 | /// # Examples |
568 | /// |
569 | /// ``` |
570 | /// use crossbeam_utils::atomic::AtomicCell; |
571 | /// |
572 | #[doc = $example] |
573 | /// |
574 | /// assert_eq!(a.fetch_sub(3), 7); |
575 | /// assert_eq!(a.load(), 4); |
576 | /// ``` |
577 | #[inline] |
578 | pub fn fetch_sub(&self, val: $t) -> $t { |
579 | atomic! { |
580 | $t, _a, |
581 | { |
582 | let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; |
583 | a.fetch_sub(val, Ordering::AcqRel) |
584 | }, |
585 | { |
586 | let _guard = lock(self.as_ptr() as usize).write(); |
587 | let value = unsafe { &mut *(self.as_ptr()) }; |
588 | let old = *value; |
589 | *value = value.wrapping_sub(val); |
590 | old |
591 | } |
592 | } |
593 | } |
594 | |
595 | /// Applies bitwise "and" to the current value and returns the previous value. |
596 | /// |
597 | /// # Examples |
598 | /// |
599 | /// ``` |
600 | /// use crossbeam_utils::atomic::AtomicCell; |
601 | /// |
602 | #[doc = $example] |
603 | /// |
604 | /// assert_eq!(a.fetch_and(3), 7); |
605 | /// assert_eq!(a.load(), 3); |
606 | /// ``` |
607 | #[inline] |
608 | pub fn fetch_and(&self, val: $t) -> $t { |
609 | atomic! { |
610 | $t, _a, |
611 | { |
612 | let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; |
613 | a.fetch_and(val, Ordering::AcqRel) |
614 | }, |
615 | { |
616 | let _guard = lock(self.as_ptr() as usize).write(); |
617 | let value = unsafe { &mut *(self.as_ptr()) }; |
618 | let old = *value; |
619 | *value &= val; |
620 | old |
621 | } |
622 | } |
623 | } |
624 | |
625 | /// Applies bitwise "nand" to the current value and returns the previous value. |
626 | /// |
627 | /// # Examples |
628 | /// |
629 | /// ``` |
630 | /// use crossbeam_utils::atomic::AtomicCell; |
631 | /// |
632 | #[doc = $example] |
633 | /// |
634 | /// assert_eq!(a.fetch_nand(3), 7); |
635 | /// assert_eq!(a.load(), !(7 & 3)); |
636 | /// ``` |
637 | #[inline] |
638 | pub fn fetch_nand(&self, val: $t) -> $t { |
639 | atomic! { |
640 | $t, _a, |
641 | { |
642 | let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; |
643 | a.fetch_nand(val, Ordering::AcqRel) |
644 | }, |
645 | { |
646 | let _guard = lock(self.as_ptr() as usize).write(); |
647 | let value = unsafe { &mut *(self.as_ptr()) }; |
648 | let old = *value; |
649 | *value = !(old & val); |
650 | old |
651 | } |
652 | } |
653 | } |
654 | |
655 | /// Applies bitwise "or" to the current value and returns the previous value. |
656 | /// |
657 | /// # Examples |
658 | /// |
659 | /// ``` |
660 | /// use crossbeam_utils::atomic::AtomicCell; |
661 | /// |
662 | #[doc = $example] |
663 | /// |
664 | /// assert_eq!(a.fetch_or(16), 7); |
665 | /// assert_eq!(a.load(), 23); |
666 | /// ``` |
667 | #[inline] |
668 | pub fn fetch_or(&self, val: $t) -> $t { |
669 | atomic! { |
670 | $t, _a, |
671 | { |
672 | let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; |
673 | a.fetch_or(val, Ordering::AcqRel) |
674 | }, |
675 | { |
676 | let _guard = lock(self.as_ptr() as usize).write(); |
677 | let value = unsafe { &mut *(self.as_ptr()) }; |
678 | let old = *value; |
679 | *value |= val; |
680 | old |
681 | } |
682 | } |
683 | } |
684 | |
685 | /// Applies bitwise "xor" to the current value and returns the previous value. |
686 | /// |
687 | /// # Examples |
688 | /// |
689 | /// ``` |
690 | /// use crossbeam_utils::atomic::AtomicCell; |
691 | /// |
692 | #[doc = $example] |
693 | /// |
694 | /// assert_eq!(a.fetch_xor(2), 7); |
695 | /// assert_eq!(a.load(), 5); |
696 | /// ``` |
697 | #[inline] |
698 | pub fn fetch_xor(&self, val: $t) -> $t { |
699 | atomic! { |
700 | $t, _a, |
701 | { |
702 | let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; |
703 | a.fetch_xor(val, Ordering::AcqRel) |
704 | }, |
705 | { |
706 | let _guard = lock(self.as_ptr() as usize).write(); |
707 | let value = unsafe { &mut *(self.as_ptr()) }; |
708 | let old = *value; |
709 | *value ^= val; |
710 | old |
711 | } |
712 | } |
713 | } |
714 | |
715 | /// Compares and sets the maximum of the current value and `val`, |
716 | /// and returns the previous value. |
717 | /// |
718 | /// # Examples |
719 | /// |
720 | /// ``` |
721 | /// use crossbeam_utils::atomic::AtomicCell; |
722 | /// |
723 | #[doc = $example] |
724 | /// |
725 | /// assert_eq!(a.fetch_max(9), 7); |
726 | /// assert_eq!(a.load(), 9); |
727 | /// ``` |
728 | #[inline] |
729 | pub fn fetch_max(&self, val: $t) -> $t { |
730 | atomic! { |
731 | $t, _a, |
732 | { |
733 | let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; |
734 | a.fetch_max(val, Ordering::AcqRel) |
735 | }, |
736 | { |
737 | let _guard = lock(self.as_ptr() as usize).write(); |
738 | let value = unsafe { &mut *(self.as_ptr()) }; |
739 | let old = *value; |
740 | *value = cmp::max(old, val); |
741 | old |
742 | } |
743 | } |
744 | } |
745 | |
746 | /// Compares and sets the minimum of the current value and `val`, |
747 | /// and returns the previous value. |
748 | /// |
749 | /// # Examples |
750 | /// |
751 | /// ``` |
752 | /// use crossbeam_utils::atomic::AtomicCell; |
753 | /// |
754 | #[doc = $example] |
755 | /// |
756 | /// assert_eq!(a.fetch_min(2), 7); |
757 | /// assert_eq!(a.load(), 2); |
758 | /// ``` |
759 | #[inline] |
760 | pub fn fetch_min(&self, val: $t) -> $t { |
761 | atomic! { |
762 | $t, _a, |
763 | { |
764 | let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; |
765 | a.fetch_min(val, Ordering::AcqRel) |
766 | }, |
767 | { |
768 | let _guard = lock(self.as_ptr() as usize).write(); |
769 | let value = unsafe { &mut *(self.as_ptr()) }; |
770 | let old = *value; |
771 | *value = cmp::min(old, val); |
772 | old |
773 | } |
774 | } |
775 | } |
776 | } |
777 | }; |
778 | } |
779 | |
780 | impl_arithmetic!(u8, AtomicU8, "let a = AtomicCell::new(7u8);" ); |
781 | impl_arithmetic!(i8, AtomicI8, "let a = AtomicCell::new(7i8);" ); |
782 | impl_arithmetic!(u16, AtomicU16, "let a = AtomicCell::new(7u16);" ); |
783 | impl_arithmetic!(i16, AtomicI16, "let a = AtomicCell::new(7i16);" ); |
784 | |
785 | impl_arithmetic!(u32, AtomicU32, "let a = AtomicCell::new(7u32);" ); |
786 | impl_arithmetic!(i32, AtomicI32, "let a = AtomicCell::new(7i32);" ); |
787 | |
788 | #[cfg (target_has_atomic = "64" )] |
789 | impl_arithmetic!(u64, AtomicU64, "let a = AtomicCell::new(7u64);" ); |
790 | #[cfg (target_has_atomic = "64" )] |
791 | impl_arithmetic!(i64, AtomicI64, "let a = AtomicCell::new(7i64);" ); |
792 | #[cfg (not(target_has_atomic = "64" ))] |
793 | impl_arithmetic!(u64, fallback, "let a = AtomicCell::new(7u64);" ); |
794 | #[cfg (not(target_has_atomic = "64" ))] |
795 | impl_arithmetic!(i64, fallback, "let a = AtomicCell::new(7i64);" ); |
796 | |
797 | // TODO: AtomicU128 is unstable |
798 | // impl_arithmetic!(u128, AtomicU128, "let a = AtomicCell::new(7u128);"); |
799 | // impl_arithmetic!(i128, AtomicI128, "let a = AtomicCell::new(7i128);"); |
800 | impl_arithmetic!(u128, fallback, "let a = AtomicCell::new(7u128);" ); |
801 | impl_arithmetic!(i128, fallback, "let a = AtomicCell::new(7i128);" ); |
802 | |
803 | impl_arithmetic!(usize, AtomicUsize, "let a = AtomicCell::new(7usize);" ); |
804 | impl_arithmetic!(isize, AtomicIsize, "let a = AtomicCell::new(7isize);" ); |
805 | |
806 | impl AtomicCell<bool> { |
807 | /// Applies logical "and" to the current value and returns the previous value. |
808 | /// |
809 | /// # Examples |
810 | /// |
811 | /// ``` |
812 | /// use crossbeam_utils::atomic::AtomicCell; |
813 | /// |
814 | /// let a = AtomicCell::new(true); |
815 | /// |
816 | /// assert_eq!(a.fetch_and(true), true); |
817 | /// assert_eq!(a.load(), true); |
818 | /// |
819 | /// assert_eq!(a.fetch_and(false), true); |
820 | /// assert_eq!(a.load(), false); |
821 | /// ``` |
822 | #[inline ] |
823 | pub fn fetch_and(&self, val: bool) -> bool { |
824 | atomic! { |
825 | bool, _a, |
826 | { |
827 | let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) }; |
828 | a.fetch_and(val, Ordering::AcqRel) |
829 | }, |
830 | { |
831 | let _guard = lock(self.as_ptr() as usize).write(); |
832 | let value = unsafe { &mut *(self.as_ptr()) }; |
833 | let old = *value; |
834 | *value &= val; |
835 | old |
836 | } |
837 | } |
838 | } |
839 | |
840 | /// Applies logical "nand" to the current value and returns the previous value. |
841 | /// |
842 | /// # Examples |
843 | /// |
844 | /// ``` |
845 | /// use crossbeam_utils::atomic::AtomicCell; |
846 | /// |
847 | /// let a = AtomicCell::new(true); |
848 | /// |
849 | /// assert_eq!(a.fetch_nand(false), true); |
850 | /// assert_eq!(a.load(), true); |
851 | /// |
852 | /// assert_eq!(a.fetch_nand(true), true); |
853 | /// assert_eq!(a.load(), false); |
854 | /// |
855 | /// assert_eq!(a.fetch_nand(false), false); |
856 | /// assert_eq!(a.load(), true); |
857 | /// ``` |
858 | #[inline ] |
859 | pub fn fetch_nand(&self, val: bool) -> bool { |
860 | atomic! { |
861 | bool, _a, |
862 | { |
863 | let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) }; |
864 | a.fetch_nand(val, Ordering::AcqRel) |
865 | }, |
866 | { |
867 | let _guard = lock(self.as_ptr() as usize).write(); |
868 | let value = unsafe { &mut *(self.as_ptr()) }; |
869 | let old = *value; |
870 | *value = !(old & val); |
871 | old |
872 | } |
873 | } |
874 | } |
875 | |
876 | /// Applies logical "or" to the current value and returns the previous value. |
877 | /// |
878 | /// # Examples |
879 | /// |
880 | /// ``` |
881 | /// use crossbeam_utils::atomic::AtomicCell; |
882 | /// |
883 | /// let a = AtomicCell::new(false); |
884 | /// |
885 | /// assert_eq!(a.fetch_or(false), false); |
886 | /// assert_eq!(a.load(), false); |
887 | /// |
888 | /// assert_eq!(a.fetch_or(true), false); |
889 | /// assert_eq!(a.load(), true); |
890 | /// ``` |
891 | #[inline ] |
892 | pub fn fetch_or(&self, val: bool) -> bool { |
893 | atomic! { |
894 | bool, _a, |
895 | { |
896 | let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) }; |
897 | a.fetch_or(val, Ordering::AcqRel) |
898 | }, |
899 | { |
900 | let _guard = lock(self.as_ptr() as usize).write(); |
901 | let value = unsafe { &mut *(self.as_ptr()) }; |
902 | let old = *value; |
903 | *value |= val; |
904 | old |
905 | } |
906 | } |
907 | } |
908 | |
909 | /// Applies logical "xor" to the current value and returns the previous value. |
910 | /// |
911 | /// # Examples |
912 | /// |
913 | /// ``` |
914 | /// use crossbeam_utils::atomic::AtomicCell; |
915 | /// |
916 | /// let a = AtomicCell::new(true); |
917 | /// |
918 | /// assert_eq!(a.fetch_xor(false), true); |
919 | /// assert_eq!(a.load(), true); |
920 | /// |
921 | /// assert_eq!(a.fetch_xor(true), true); |
922 | /// assert_eq!(a.load(), false); |
923 | /// ``` |
924 | #[inline ] |
925 | pub fn fetch_xor(&self, val: bool) -> bool { |
926 | atomic! { |
927 | bool, _a, |
928 | { |
929 | let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) }; |
930 | a.fetch_xor(val, Ordering::AcqRel) |
931 | }, |
932 | { |
933 | let _guard = lock(self.as_ptr() as usize).write(); |
934 | let value = unsafe { &mut *(self.as_ptr()) }; |
935 | let old = *value; |
936 | *value ^= val; |
937 | old |
938 | } |
939 | } |
940 | } |
941 | } |
942 | |
943 | impl<T: Default> Default for AtomicCell<T> { |
944 | fn default() -> AtomicCell<T> { |
945 | AtomicCell::new(T::default()) |
946 | } |
947 | } |
948 | |
949 | impl<T> From<T> for AtomicCell<T> { |
950 | #[inline ] |
951 | fn from(val: T) -> AtomicCell<T> { |
952 | AtomicCell::new(val) |
953 | } |
954 | } |
955 | |
956 | impl<T: Copy + fmt::Debug> fmt::Debug for AtomicCell<T> { |
957 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
958 | f&mut DebugStruct<'_, '_>.debug_struct("AtomicCell" ) |
959 | .field(name:"value" , &self.load()) |
960 | .finish() |
961 | } |
962 | } |
963 | |
964 | /// Returns `true` if values of type `A` can be transmuted into values of type `B`. |
965 | const fn can_transmute<A, B>() -> bool { |
966 | // Sizes must be equal, but alignment of `A` must be greater or equal than that of `B`. |
967 | (mem::size_of::<A>() == mem::size_of::<B>()) & (mem::align_of::<A>() >= mem::align_of::<B>()) |
968 | } |
969 | |
970 | /// Returns a reference to the global lock associated with the `AtomicCell` at address `addr`. |
971 | /// |
972 | /// This function is used to protect atomic data which doesn't fit into any of the primitive atomic |
973 | /// types in `std::sync::atomic`. Operations on such atomics must therefore use a global lock. |
974 | /// |
975 | /// However, there is not only one global lock but an array of many locks, and one of them is |
976 | /// picked based on the given address. Having many locks reduces contention and improves |
977 | /// scalability. |
978 | #[inline ] |
979 | #[must_use ] |
980 | fn lock(addr: usize) -> &'static SeqLock { |
981 | // The number of locks is a prime number because we want to make sure `addr % LEN` gets |
982 | // dispersed across all locks. |
983 | // |
984 | // Note that addresses are always aligned to some power of 2, depending on type `T` in |
985 | // `AtomicCell<T>`. If `LEN` was an even number, then `addr % LEN` would be an even number, |
986 | // too, which means only half of the locks would get utilized! |
987 | // |
988 | // It is also possible for addresses to accidentally get aligned to a number that is not a |
989 | // power of 2. Consider this example: |
990 | // |
991 | // ``` |
992 | // #[repr(C)] |
993 | // struct Foo { |
994 | // a: AtomicCell<u8>, |
995 | // b: u8, |
996 | // c: u8, |
997 | // } |
998 | // ``` |
999 | // |
1000 | // Now, if we have a slice of type `&[Foo]`, it is possible that field `a` in all items gets |
1001 | // stored at addresses that are multiples of 3. It'd be too bad if `LEN` was divisible by 3. |
1002 | // In order to protect from such cases, we simply choose a large prime number for `LEN`. |
1003 | const LEN: usize = 67; |
1004 | const L: CachePadded<SeqLock> = CachePadded::new(SeqLock::new()); |
1005 | static LOCKS: [CachePadded<SeqLock>; LEN] = [L; LEN]; |
1006 | |
1007 | // If the modulus is a constant number, the compiler will use crazy math to transform this into |
1008 | // a sequence of cheap arithmetic operations rather than using the slow modulo instruction. |
1009 | &LOCKS[addr % LEN] |
1010 | } |
1011 | |
1012 | /// An atomic `()`. |
1013 | /// |
1014 | /// All operations are noops. |
1015 | struct AtomicUnit; |
1016 | |
1017 | impl AtomicUnit { |
1018 | #[inline ] |
1019 | fn load(&self, _order: Ordering) {} |
1020 | |
1021 | #[inline ] |
1022 | fn store(&self, _val: (), _order: Ordering) {} |
1023 | |
1024 | #[inline ] |
1025 | fn swap(&self, _val: (), _order: Ordering) {} |
1026 | |
1027 | #[inline ] |
1028 | fn compare_exchange_weak( |
1029 | &self, |
1030 | _current: (), |
1031 | _new: (), |
1032 | _success: Ordering, |
1033 | _failure: Ordering, |
1034 | ) -> Result<(), ()> { |
1035 | Ok(()) |
1036 | } |
1037 | } |
1038 | |
1039 | /// Returns `true` if operations on `AtomicCell<T>` are lock-free. |
1040 | const fn atomic_is_lock_free<T>() -> bool { |
1041 | atomic! { T, _a, true, false } |
1042 | } |
1043 | |
1044 | /// Atomically reads data from `src`. |
1045 | /// |
1046 | /// This operation uses the `Acquire` ordering. If possible, an atomic instructions is used, and a |
1047 | /// global lock otherwise. |
1048 | unsafe fn atomic_load<T>(src: *mut T) -> T |
1049 | where |
1050 | T: Copy, |
1051 | { |
1052 | atomic! { |
1053 | T, a, |
1054 | { |
1055 | a = &*(src as *const _ as *const _); |
1056 | mem::transmute_copy(&a.load(Ordering::Acquire)) |
1057 | }, |
1058 | { |
1059 | let lock = lock(src as usize); |
1060 | |
1061 | // Try doing an optimistic read first. |
1062 | if let Some(stamp) = lock.optimistic_read() { |
1063 | // We need a volatile read here because other threads might concurrently modify the |
1064 | // value. In theory, data races are *always* UB, even if we use volatile reads and |
1065 | // discard the data when a data race is detected. The proper solution would be to |
1066 | // do atomic reads and atomic writes, but we can't atomically read and write all |
1067 | // kinds of data since `AtomicU8` is not available on stable Rust yet. |
1068 | // Load as `MaybeUninit` because we may load a value that is not valid as `T`. |
1069 | let val = ptr::read_volatile(src.cast::<MaybeUninit<T>>()); |
1070 | |
1071 | if lock.validate_read(stamp) { |
1072 | return val.assume_init(); |
1073 | } |
1074 | } |
1075 | |
1076 | // Grab a regular write lock so that writers don't starve this load. |
1077 | let guard = lock.write(); |
1078 | let val = ptr::read(src); |
1079 | // The value hasn't been changed. Drop the guard without incrementing the stamp. |
1080 | guard.abort(); |
1081 | val |
1082 | } |
1083 | } |
1084 | } |
1085 | |
1086 | /// Atomically writes `val` to `dst`. |
1087 | /// |
1088 | /// This operation uses the `Release` ordering. If possible, an atomic instructions is used, and a |
1089 | /// global lock otherwise. |
1090 | unsafe fn atomic_store<T>(dst: *mut T, val: T) { |
1091 | atomic! { |
1092 | T, a, |
1093 | { |
1094 | a = &*(dst as *const _ as *const _); |
1095 | a.store(mem::transmute_copy(&val), Ordering::Release); |
1096 | mem::forget(val); |
1097 | }, |
1098 | { |
1099 | let _guard = lock(dst as usize).write(); |
1100 | ptr::write(dst, val); |
1101 | } |
1102 | } |
1103 | } |
1104 | |
1105 | /// Atomically swaps data at `dst` with `val`. |
1106 | /// |
1107 | /// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a |
1108 | /// global lock otherwise. |
1109 | unsafe fn atomic_swap<T>(dst: *mut T, val: T) -> T { |
1110 | atomic! { |
1111 | T, a, |
1112 | { |
1113 | a = &*(dst as *const _ as *const _); |
1114 | let res = mem::transmute_copy(&a.swap(mem::transmute_copy(&val), Ordering::AcqRel)); |
1115 | mem::forget(val); |
1116 | res |
1117 | }, |
1118 | { |
1119 | let _guard = lock(dst as usize).write(); |
1120 | ptr::replace(dst, val) |
1121 | } |
1122 | } |
1123 | } |
1124 | |
1125 | /// Atomically compares data at `dst` to `current` and, if equal byte-for-byte, exchanges data at |
1126 | /// `dst` with `new`. |
1127 | /// |
1128 | /// Returns the old value on success, or the current value at `dst` on failure. |
1129 | /// |
1130 | /// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a |
1131 | /// global lock otherwise. |
1132 | #[allow (clippy::let_unit_value)] |
1133 | unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T, mut current: T, new: T) -> Result<T, T> |
1134 | where |
1135 | T: Copy + Eq, |
1136 | { |
1137 | atomic! { |
1138 | T, a, |
1139 | { |
1140 | a = &*(dst as *const _ as *const _); |
1141 | let mut current_raw = mem::transmute_copy(¤t); |
1142 | let new_raw = mem::transmute_copy(&new); |
1143 | |
1144 | loop { |
1145 | match a.compare_exchange_weak( |
1146 | current_raw, |
1147 | new_raw, |
1148 | Ordering::AcqRel, |
1149 | Ordering::Acquire, |
1150 | ) { |
1151 | Ok(_) => break Ok(current), |
1152 | Err(previous_raw) => { |
1153 | let previous = mem::transmute_copy(&previous_raw); |
1154 | |
1155 | if !T::eq(&previous, ¤t) { |
1156 | break Err(previous); |
1157 | } |
1158 | |
1159 | // The compare-exchange operation has failed and didn't store `new`. The |
1160 | // failure is either spurious, or `previous` was semantically equal to |
1161 | // `current` but not byte-equal. Let's retry with `previous` as the new |
1162 | // `current`. |
1163 | current = previous; |
1164 | current_raw = previous_raw; |
1165 | } |
1166 | } |
1167 | } |
1168 | }, |
1169 | { |
1170 | let guard = lock(dst as usize).write(); |
1171 | |
1172 | if T::eq(&*dst, ¤t) { |
1173 | Ok(ptr::replace(dst, new)) |
1174 | } else { |
1175 | let val = ptr::read(dst); |
1176 | // The value hasn't been changed. Drop the guard without incrementing the stamp. |
1177 | guard.abort(); |
1178 | Err(val) |
1179 | } |
1180 | } |
1181 | } |
1182 | } |
1183 | |