| 1 | // Copyright 2016 Amanieu d'Antras |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or |
| 4 | // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or |
| 5 | // http://opensource.org/licenses/MIT>, at your option. This file may not be |
| 6 | // copied, modified, or distributed except according to those terms. |
| 7 | |
| 8 | use std::sync::atomic::AtomicUsize; |
| 9 | |
| 10 | // Extension trait to add lock elision primitives to atomic types |
| 11 | pub trait AtomicElisionExt { |
| 12 | type IntType; |
| 13 | |
| 14 | // Perform a compare_exchange and start a transaction |
| 15 | fn elision_compare_exchange_acquire( |
| 16 | &self, |
| 17 | current: Self::IntType, |
| 18 | new: Self::IntType, |
| 19 | ) -> Result<Self::IntType, Self::IntType>; |
| 20 | |
| 21 | // Perform a fetch_sub and end a transaction |
| 22 | fn elision_fetch_sub_release(&self, val: Self::IntType) -> Self::IntType; |
| 23 | } |
| 24 | |
| 25 | // Indicates whether the target architecture supports lock elision |
| 26 | #[inline ] |
| 27 | pub fn have_elision() -> bool { |
| 28 | cfg!(all( |
| 29 | feature = "hardware-lock-elision" , |
| 30 | any(target_arch = "x86" , target_arch = "x86_64" ), |
| 31 | )) |
| 32 | } |
| 33 | |
| 34 | // This implementation is never actually called because it is guarded by |
| 35 | // have_elision(). |
| 36 | #[cfg (not(all( |
| 37 | feature = "hardware-lock-elision" , |
| 38 | any(target_arch = "x86" , target_arch = "x86_64" ) |
| 39 | )))] |
| 40 | impl AtomicElisionExt for AtomicUsize { |
| 41 | type IntType = usize; |
| 42 | |
| 43 | #[inline ] |
| 44 | fn elision_compare_exchange_acquire(&self, _: usize, _: usize) -> Result<usize, usize> { |
| 45 | unreachable!(); |
| 46 | } |
| 47 | |
| 48 | #[inline ] |
| 49 | fn elision_fetch_sub_release(&self, _: usize) -> usize { |
| 50 | unreachable!(); |
| 51 | } |
| 52 | } |
| 53 | |
| 54 | #[cfg (all( |
| 55 | feature = "hardware-lock-elision" , |
| 56 | any(target_arch = "x86" , target_arch = "x86_64" ) |
| 57 | ))] |
| 58 | impl AtomicElisionExt for AtomicUsize { |
| 59 | type IntType = usize; |
| 60 | |
| 61 | #[inline ] |
| 62 | fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result<usize, usize> { |
| 63 | unsafe { |
| 64 | use core::arch::asm; |
| 65 | let prev: usize; |
| 66 | #[cfg (target_pointer_width = "32" )] |
| 67 | asm!( |
| 68 | "xacquire" , |
| 69 | "lock" , |
| 70 | "cmpxchg [{:e}], {:e}" , |
| 71 | in(reg) self, |
| 72 | in(reg) new, |
| 73 | inout("eax" ) current => prev, |
| 74 | ); |
| 75 | #[cfg (target_pointer_width = "64" )] |
| 76 | asm!( |
| 77 | "xacquire" , |
| 78 | "lock" , |
| 79 | "cmpxchg [{}], {}" , |
| 80 | in(reg) self, |
| 81 | in(reg) new, |
| 82 | inout("rax" ) current => prev, |
| 83 | ); |
| 84 | if prev == current { |
| 85 | Ok(prev) |
| 86 | } else { |
| 87 | Err(prev) |
| 88 | } |
| 89 | } |
| 90 | } |
| 91 | |
| 92 | #[inline ] |
| 93 | fn elision_fetch_sub_release(&self, val: usize) -> usize { |
| 94 | unsafe { |
| 95 | use core::arch::asm; |
| 96 | let prev: usize; |
| 97 | #[cfg (target_pointer_width = "32" )] |
| 98 | asm!( |
| 99 | "xrelease" , |
| 100 | "lock" , |
| 101 | "xadd [{:e}], {:e}" , |
| 102 | in(reg) self, |
| 103 | inout(reg) val.wrapping_neg() => prev, |
| 104 | ); |
| 105 | #[cfg (target_pointer_width = "64" )] |
| 106 | asm!( |
| 107 | "xrelease" , |
| 108 | "lock" , |
| 109 | "xadd [{}], {}" , |
| 110 | in(reg) self, |
| 111 | inout(reg) val.wrapping_neg() => prev, |
| 112 | ); |
| 113 | prev |
| 114 | } |
| 115 | } |
| 116 | } |
| 117 | |