1use std::mem;
2use std::sync::atomic::AtomicUsize;
3use std::sync::atomic::Ordering::SeqCst;
4
5use crossbeam_utils::atomic::AtomicCell;
6
7#[test]
8fn is_lock_free() {
9 struct UsizeWrap(usize);
10 struct U8Wrap(bool);
11 struct I16Wrap(i16);
12 #[repr(align(8))]
13 struct U64Align8(u64);
14
15 assert!(AtomicCell::<usize>::is_lock_free());
16 assert!(AtomicCell::<isize>::is_lock_free());
17 assert!(AtomicCell::<UsizeWrap>::is_lock_free());
18
19 assert!(AtomicCell::<()>::is_lock_free());
20
21 assert!(AtomicCell::<u8>::is_lock_free());
22 assert!(AtomicCell::<i8>::is_lock_free());
23 assert!(AtomicCell::<bool>::is_lock_free());
24 assert!(AtomicCell::<U8Wrap>::is_lock_free());
25
26 assert!(AtomicCell::<u16>::is_lock_free());
27 assert!(AtomicCell::<i16>::is_lock_free());
28 assert!(AtomicCell::<I16Wrap>::is_lock_free());
29
30 assert!(AtomicCell::<u32>::is_lock_free());
31 assert!(AtomicCell::<i32>::is_lock_free());
32
33 // Sizes of both types must be equal, and the alignment of `u64` must be greater or equal than
34 // that of `AtomicU64`. In i686-unknown-linux-gnu, the alignment of `u64` is `4` and alignment
35 // of `AtomicU64` is `8`, so `AtomicCell<u64>` is not lock-free.
36 assert_eq!(
37 AtomicCell::<u64>::is_lock_free(),
38 cfg!(target_has_atomic = "64") && std::mem::align_of::<u64>() == 8
39 );
40 assert_eq!(mem::size_of::<U64Align8>(), 8);
41 assert_eq!(mem::align_of::<U64Align8>(), 8);
42 assert_eq!(
43 AtomicCell::<U64Align8>::is_lock_free(),
44 cfg!(target_has_atomic = "64")
45 );
46
47 // AtomicU128 is unstable
48 assert!(!AtomicCell::<u128>::is_lock_free());
49}
50
51#[test]
52fn const_is_lock_free() {
53 const _U: bool = AtomicCell::<usize>::is_lock_free();
54 const _I: bool = AtomicCell::<isize>::is_lock_free();
55}
56
57#[test]
58fn drops_unit() {
59 static CNT: AtomicUsize = AtomicUsize::new(0);
60 CNT.store(0, SeqCst);
61
62 #[derive(Debug, PartialEq, Eq)]
63 struct Foo();
64
65 impl Foo {
66 fn new() -> Foo {
67 CNT.fetch_add(1, SeqCst);
68 Foo()
69 }
70 }
71
72 impl Drop for Foo {
73 fn drop(&mut self) {
74 CNT.fetch_sub(1, SeqCst);
75 }
76 }
77
78 impl Default for Foo {
79 fn default() -> Foo {
80 Foo::new()
81 }
82 }
83
84 let a = AtomicCell::new(Foo::new());
85
86 assert_eq!(a.swap(Foo::new()), Foo::new());
87 assert_eq!(CNT.load(SeqCst), 1);
88
89 a.store(Foo::new());
90 assert_eq!(CNT.load(SeqCst), 1);
91
92 assert_eq!(a.swap(Foo::default()), Foo::new());
93 assert_eq!(CNT.load(SeqCst), 1);
94
95 drop(a);
96 assert_eq!(CNT.load(SeqCst), 0);
97}
98
99#[test]
100fn drops_u8() {
101 static CNT: AtomicUsize = AtomicUsize::new(0);
102 CNT.store(0, SeqCst);
103
104 #[derive(Debug, PartialEq, Eq)]
105 struct Foo(u8);
106
107 impl Foo {
108 fn new(val: u8) -> Foo {
109 CNT.fetch_add(1, SeqCst);
110 Foo(val)
111 }
112 }
113
114 impl Drop for Foo {
115 fn drop(&mut self) {
116 CNT.fetch_sub(1, SeqCst);
117 }
118 }
119
120 impl Default for Foo {
121 fn default() -> Foo {
122 Foo::new(0)
123 }
124 }
125
126 let a = AtomicCell::new(Foo::new(5));
127
128 assert_eq!(a.swap(Foo::new(6)), Foo::new(5));
129 assert_eq!(a.swap(Foo::new(1)), Foo::new(6));
130 assert_eq!(CNT.load(SeqCst), 1);
131
132 a.store(Foo::new(2));
133 assert_eq!(CNT.load(SeqCst), 1);
134
135 assert_eq!(a.swap(Foo::default()), Foo::new(2));
136 assert_eq!(CNT.load(SeqCst), 1);
137
138 assert_eq!(a.swap(Foo::default()), Foo::new(0));
139 assert_eq!(CNT.load(SeqCst), 1);
140
141 drop(a);
142 assert_eq!(CNT.load(SeqCst), 0);
143}
144
145#[test]
146fn drops_usize() {
147 static CNT: AtomicUsize = AtomicUsize::new(0);
148 CNT.store(0, SeqCst);
149
150 #[derive(Debug, PartialEq, Eq)]
151 struct Foo(usize);
152
153 impl Foo {
154 fn new(val: usize) -> Foo {
155 CNT.fetch_add(1, SeqCst);
156 Foo(val)
157 }
158 }
159
160 impl Drop for Foo {
161 fn drop(&mut self) {
162 CNT.fetch_sub(1, SeqCst);
163 }
164 }
165
166 impl Default for Foo {
167 fn default() -> Foo {
168 Foo::new(0)
169 }
170 }
171
172 let a = AtomicCell::new(Foo::new(5));
173
174 assert_eq!(a.swap(Foo::new(6)), Foo::new(5));
175 assert_eq!(a.swap(Foo::new(1)), Foo::new(6));
176 assert_eq!(CNT.load(SeqCst), 1);
177
178 a.store(Foo::new(2));
179 assert_eq!(CNT.load(SeqCst), 1);
180
181 assert_eq!(a.swap(Foo::default()), Foo::new(2));
182 assert_eq!(CNT.load(SeqCst), 1);
183
184 assert_eq!(a.swap(Foo::default()), Foo::new(0));
185 assert_eq!(CNT.load(SeqCst), 1);
186
187 drop(a);
188 assert_eq!(CNT.load(SeqCst), 0);
189}
190
191#[test]
192fn modular_u8() {
193 #[derive(Clone, Copy, Eq, Debug, Default)]
194 struct Foo(u8);
195
196 impl PartialEq for Foo {
197 fn eq(&self, other: &Foo) -> bool {
198 self.0 % 5 == other.0 % 5
199 }
200 }
201
202 let a = AtomicCell::new(Foo(1));
203
204 assert_eq!(a.load(), Foo(1));
205 assert_eq!(a.swap(Foo(2)), Foo(11));
206 assert_eq!(a.load(), Foo(52));
207
208 a.store(Foo(0));
209 assert_eq!(a.compare_exchange(Foo(0), Foo(5)), Ok(Foo(100)));
210 assert_eq!(a.load().0, 5);
211 assert_eq!(a.compare_exchange(Foo(10), Foo(15)), Ok(Foo(100)));
212 assert_eq!(a.load().0, 15);
213}
214
215#[test]
216fn modular_usize() {
217 #[derive(Clone, Copy, Eq, Debug, Default)]
218 struct Foo(usize);
219
220 impl PartialEq for Foo {
221 fn eq(&self, other: &Foo) -> bool {
222 self.0 % 5 == other.0 % 5
223 }
224 }
225
226 let a = AtomicCell::new(Foo(1));
227
228 assert_eq!(a.load(), Foo(1));
229 assert_eq!(a.swap(Foo(2)), Foo(11));
230 assert_eq!(a.load(), Foo(52));
231
232 a.store(Foo(0));
233 assert_eq!(a.compare_exchange(Foo(0), Foo(5)), Ok(Foo(100)));
234 assert_eq!(a.load().0, 5);
235 assert_eq!(a.compare_exchange(Foo(10), Foo(15)), Ok(Foo(100)));
236 assert_eq!(a.load().0, 15);
237}
238
239#[test]
240fn garbage_padding() {
241 #[derive(Copy, Clone, Eq, PartialEq)]
242 struct Object {
243 a: i64,
244 b: i32,
245 }
246
247 let cell = AtomicCell::new(Object { a: 0, b: 0 });
248 let _garbage = [0xfe, 0xfe, 0xfe, 0xfe, 0xfe]; // Needed
249 let next = Object { a: 0, b: 0 };
250
251 let prev = cell.load();
252 assert!(cell.compare_exchange(prev, next).is_ok());
253 println!();
254}
255
256#[test]
257fn const_atomic_cell_new() {
258 static CELL: AtomicCell<usize> = AtomicCell::new(0);
259
260 CELL.store(1);
261 assert_eq!(CELL.load(), 1);
262}
263
264// https://github.com/crossbeam-rs/crossbeam/pull/767
265macro_rules! test_arithmetic {
266 ($test_name:ident, $ty:ident) => {
267 #[test]
268 fn $test_name() {
269 let a: AtomicCell<$ty> = AtomicCell::new(7);
270
271 assert_eq!(a.fetch_add(3), 7);
272 assert_eq!(a.load(), 10);
273
274 assert_eq!(a.fetch_sub(3), 10);
275 assert_eq!(a.load(), 7);
276
277 assert_eq!(a.fetch_and(3), 7);
278 assert_eq!(a.load(), 3);
279
280 assert_eq!(a.fetch_or(16), 3);
281 assert_eq!(a.load(), 19);
282
283 assert_eq!(a.fetch_xor(2), 19);
284 assert_eq!(a.load(), 17);
285
286 assert_eq!(a.fetch_max(18), 17);
287 assert_eq!(a.load(), 18);
288
289 assert_eq!(a.fetch_min(17), 18);
290 assert_eq!(a.load(), 17);
291
292 assert_eq!(a.fetch_nand(7), 17);
293 assert_eq!(a.load(), !(17 & 7));
294 }
295 };
296}
297test_arithmetic!(arithmetic_u8, u8);
298test_arithmetic!(arithmetic_i8, i8);
299test_arithmetic!(arithmetic_u16, u16);
300test_arithmetic!(arithmetic_i16, i16);
301test_arithmetic!(arithmetic_u32, u32);
302test_arithmetic!(arithmetic_i32, i32);
303test_arithmetic!(arithmetic_u64, u64);
304test_arithmetic!(arithmetic_i64, i64);
305test_arithmetic!(arithmetic_u128, u128);
306test_arithmetic!(arithmetic_i128, i128);
307
308// https://github.com/crossbeam-rs/crossbeam/issues/748
309#[cfg_attr(miri, ignore)] // TODO
310#[test]
311fn issue_748() {
312 #[allow(dead_code)]
313 #[repr(align(8))]
314 #[derive(Debug, Clone, Copy, PartialEq, Eq)]
315 enum Test {
316 Field(u32),
317 FieldLess,
318 }
319
320 assert_eq!(mem::size_of::<Test>(), 8);
321 assert_eq!(
322 AtomicCell::<Test>::is_lock_free(),
323 cfg!(target_has_atomic = "64")
324 );
325 let x = AtomicCell::new(Test::FieldLess);
326 assert_eq!(x.load(), Test::FieldLess);
327}
328
329// https://github.com/crossbeam-rs/crossbeam/issues/833
330#[test]
331fn issue_833() {
332 use std::num::NonZeroU128;
333 use std::sync::atomic::{AtomicBool, Ordering};
334 use std::thread;
335
336 #[cfg(miri)]
337 const N: usize = 10_000;
338 #[cfg(not(miri))]
339 const N: usize = 1_000_000;
340
341 #[allow(dead_code)]
342 enum Enum {
343 NeverConstructed,
344 Cell(AtomicCell<NonZeroU128>),
345 }
346
347 static STATIC: Enum = Enum::Cell(AtomicCell::new(match NonZeroU128::new(1) {
348 Some(nonzero) => nonzero,
349 None => unreachable!(),
350 }));
351 static FINISHED: AtomicBool = AtomicBool::new(false);
352
353 let handle = thread::spawn(|| {
354 let cell = match &STATIC {
355 Enum::NeverConstructed => unreachable!(),
356 Enum::Cell(cell) => cell,
357 };
358 let x = NonZeroU128::new(0xFFFF_FFFF_FFFF_FFFF_0000_0000_0000_0000).unwrap();
359 let y = NonZeroU128::new(0x0000_0000_0000_0000_FFFF_FFFF_FFFF_FFFF).unwrap();
360 while !FINISHED.load(Ordering::Relaxed) {
361 cell.store(x);
362 cell.store(y);
363 }
364 });
365
366 for _ in 0..N {
367 if let Enum::NeverConstructed = STATIC {
368 unreachable!(":(");
369 }
370 }
371
372 FINISHED.store(true, Ordering::Relaxed);
373 handle.join().unwrap();
374}
375