| 1 | use std::sync::atomic; |
| 2 | |
| 3 | /// Prevents other operations from affecting timing measurements. |
| 4 | #[inline (always)] |
| 5 | pub fn full_fence() { |
| 6 | asm_fence(); |
| 7 | atomic::fence(order:atomic::Ordering::SeqCst); |
| 8 | } |
| 9 | |
| 10 | /// Prevents the compiler from reordering operations. |
| 11 | #[inline (always)] |
| 12 | pub fn compiler_fence() { |
| 13 | asm_fence(); |
| 14 | atomic::compiler_fence(order:atomic::Ordering::SeqCst); |
| 15 | } |
| 16 | |
| 17 | /// Stronger compiler fence on [platforms with stable `asm!`](https://doc.rust-lang.org/nightly/reference/inline-assembly.html). |
| 18 | /// |
| 19 | /// This prevents LLVM from removing loops or hoisting logic out of the |
| 20 | /// benchmark loop. |
| 21 | #[inline (always)] |
| 22 | fn asm_fence() { |
| 23 | // Miri does not support inline assembly. |
| 24 | if cfg!(miri) { |
| 25 | return; |
| 26 | } |
| 27 | |
| 28 | #[cfg (any( |
| 29 | target_arch = "x86" , |
| 30 | target_arch = "x86_64" , |
| 31 | target_arch = "arm" , |
| 32 | target_arch = "aarch64" , |
| 33 | target_arch = "riscv32" , |
| 34 | target_arch = "riscv64" , |
| 35 | target_arch = "loongarch64" , |
| 36 | ))] |
| 37 | // SAFETY: The inline assembly is a no-op. |
| 38 | unsafe { |
| 39 | // Preserve flags because we don't want to pessimize user logic. |
| 40 | std::arch::asm!("" , options(nostack, preserves_flags)); |
| 41 | } |
| 42 | } |
| 43 | |