| 1 | // ---------------------------------------------------------------------- |
| 2 | // CycleClock |
| 3 | // A CycleClock tells you the current time in Cycles. The "time" |
| 4 | // is actually time since power-on. This is like time() but doesn't |
| 5 | // involve a system call and is much more precise. |
| 6 | // |
| 7 | // NOTE: Not all cpu/platform/kernel combinations guarantee that this |
| 8 | // clock increments at a constant rate or is synchronized across all logical |
| 9 | // cpus in a system. |
| 10 | // |
| 11 | // If you need the above guarantees, please consider using a different |
| 12 | // API. There are efforts to provide an interface which provides a millisecond |
| 13 | // granularity and implemented as a memory read. A memory read is generally |
| 14 | // cheaper than the CycleClock for many architectures. |
| 15 | // |
| 16 | // Also, in some out of order CPU implementations, the CycleClock is not |
| 17 | // serializing. So if you're trying to count at cycles granularity, your |
| 18 | // data might be inaccurate due to out of order instruction execution. |
| 19 | // ---------------------------------------------------------------------- |
| 20 | |
| 21 | #ifndef BENCHMARK_CYCLECLOCK_H_ |
| 22 | #define BENCHMARK_CYCLECLOCK_H_ |
| 23 | |
| 24 | #include <cstdint> |
| 25 | |
| 26 | #include "benchmark/benchmark.h" |
| 27 | #include "internal_macros.h" |
| 28 | |
| 29 | #if defined(BENCHMARK_OS_MACOSX) |
| 30 | #include <mach/mach_time.h> |
| 31 | #endif |
| 32 | // For MSVC, we want to use '_asm rdtsc' when possible (since it works |
| 33 | // with even ancient MSVC compilers), and when not possible the |
| 34 | // __rdtsc intrinsic, declared in <intrin.h>. Unfortunately, in some |
| 35 | // environments, <windows.h> and <intrin.h> have conflicting |
| 36 | // declarations of some other intrinsics, breaking compilation. |
| 37 | // Therefore, we simply declare __rdtsc ourselves. See also |
| 38 | // http://connect.microsoft.com/VisualStudio/feedback/details/262047 |
| 39 | #if defined(COMPILER_MSVC) && !defined(_M_IX86) && !defined(_M_ARM64) && \ |
| 40 | !defined(_M_ARM64EC) |
| 41 | extern "C" uint64_t __rdtsc(); |
| 42 | #pragma intrinsic(__rdtsc) |
| 43 | #endif |
| 44 | |
| 45 | #if !defined(BENCHMARK_OS_WINDOWS) || defined(BENCHMARK_OS_MINGW) |
| 46 | #include <sys/time.h> |
| 47 | #include <time.h> |
| 48 | #endif |
| 49 | |
| 50 | #ifdef BENCHMARK_OS_EMSCRIPTEN |
| 51 | #include <emscripten.h> |
| 52 | #endif |
| 53 | |
| 54 | namespace benchmark { |
| 55 | // NOTE: only i386 and x86_64 have been well tested. |
| 56 | // PPC, sparc, alpha, and ia64 are based on |
| 57 | // http://peter.kuscsik.com/wordpress/?p=14 |
| 58 | // with modifications by m3b. See also |
| 59 | // https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h |
| 60 | namespace cycleclock { |
| 61 | // This should return the number of cycles since power-on. Thread-safe. |
| 62 | inline BENCHMARK_ALWAYS_INLINE int64_t Now() { |
| 63 | #if defined(BENCHMARK_OS_MACOSX) |
| 64 | // this goes at the top because we need ALL Macs, regardless of |
| 65 | // architecture, to return the number of "mach time units" that |
| 66 | // have passed since startup. See sysinfo.cc where |
| 67 | // InitializeSystemInfo() sets the supposed cpu clock frequency of |
| 68 | // macs to the number of mach time units per second, not actual |
| 69 | // CPU clock frequency (which can change in the face of CPU |
| 70 | // frequency scaling). Also note that when the Mac sleeps, this |
| 71 | // counter pauses; it does not continue counting, nor does it |
| 72 | // reset to zero. |
| 73 | return mach_absolute_time(); |
| 74 | #elif defined(BENCHMARK_OS_EMSCRIPTEN) |
| 75 | // this goes above x86-specific code because old versions of Emscripten |
| 76 | // define __x86_64__, although they have nothing to do with it. |
| 77 | return static_cast<int64_t>(emscripten_get_now() * 1e+6); |
| 78 | #elif defined(__i386__) |
| 79 | int64_t ret; |
| 80 | __asm__ volatile("rdtsc" : "=A" (ret)); |
| 81 | return ret; |
| 82 | #elif defined(__x86_64__) || defined(__amd64__) |
| 83 | uint64_t low, high; |
| 84 | __asm__ volatile("rdtsc" : "=a" (low), "=d" (high)); |
| 85 | return (high << 32) | low; |
| 86 | #elif defined(__powerpc__) || defined(__ppc__) |
| 87 | // This returns a time-base, which is not always precisely a cycle-count. |
| 88 | #if defined(__powerpc64__) || defined(__ppc64__) |
| 89 | int64_t tb; |
| 90 | asm volatile("mfspr %0, 268" : "=r" (tb)); |
| 91 | return tb; |
| 92 | #else |
| 93 | uint32_t tbl, tbu0, tbu1; |
| 94 | asm volatile( |
| 95 | "mftbu %0\n" |
| 96 | "mftb %1\n" |
| 97 | "mftbu %2" |
| 98 | : "=r" (tbu0), "=r" (tbl), "=r" (tbu1)); |
| 99 | tbl &= -static_cast<int32_t>(tbu0 == tbu1); |
| 100 | // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is no longer needed) |
| 101 | return (static_cast<uint64_t>(tbu1) << 32) | tbl; |
| 102 | #endif |
| 103 | #elif defined(__sparc__) |
| 104 | int64_t tick; |
| 105 | asm(".byte 0x83, 0x41, 0x00, 0x00" ); |
| 106 | asm("mov %%g1, %0" : "=r" (tick)); |
| 107 | return tick; |
| 108 | #elif defined(__ia64__) |
| 109 | int64_t itc; |
| 110 | asm("mov %0 = ar.itc" : "=r" (itc)); |
| 111 | return itc; |
| 112 | #elif defined(COMPILER_MSVC) && defined(_M_IX86) |
| 113 | // Older MSVC compilers (like 7.x) don't seem to support the |
| 114 | // __rdtsc intrinsic properly, so I prefer to use _asm instead |
| 115 | // when I know it will work. Otherwise, I'll use __rdtsc and hope |
| 116 | // the code is being compiled with a non-ancient compiler. |
| 117 | _asm rdtsc |
| 118 | #elif defined(COMPILER_MSVC) && (defined(_M_ARM64) || defined(_M_ARM64EC)) |
| 119 | // See // https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics |
| 120 | // and https://reviews.llvm.org/D53115 |
| 121 | int64_t virtual_timer_value; |
| 122 | virtual_timer_value = _ReadStatusReg(ARM64_CNTVCT); |
| 123 | return virtual_timer_value; |
| 124 | #elif defined(COMPILER_MSVC) |
| 125 | return __rdtsc(); |
| 126 | #elif defined(BENCHMARK_OS_NACL) |
| 127 | // Native Client validator on x86/x86-64 allows RDTSC instructions, |
| 128 | // and this case is handled above. Native Client validator on ARM |
| 129 | // rejects MRC instructions (used in the ARM-specific sequence below), |
| 130 | // so we handle it here. Portable Native Client compiles to |
| 131 | // architecture-agnostic bytecode, which doesn't provide any |
| 132 | // cycle counter access mnemonics. |
| 133 | |
| 134 | // Native Client does not provide any API to access cycle counter. |
| 135 | // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday |
| 136 | // because is provides nanosecond resolution (which is noticeable at |
| 137 | // least for PNaCl modules running on x86 Mac & Linux). |
| 138 | // Initialize to always return 0 if clock_gettime fails. |
| 139 | struct timespec ts = {0, 0}; |
| 140 | clock_gettime(CLOCK_MONOTONIC, &ts); |
| 141 | return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec; |
| 142 | #elif defined(__aarch64__) |
| 143 | // System timer of ARMv8 runs at a different frequency than the CPU's. |
| 144 | // The frequency is fixed, typically in the range 1-50MHz. It can be |
| 145 | // read at CNTFRQ special register. We assume the OS has set up |
| 146 | // the virtual timer properly. |
| 147 | int64_t virtual_timer_value; |
| 148 | asm volatile("mrs %0, cntvct_el0" : "=r" (virtual_timer_value)); |
| 149 | return virtual_timer_value; |
| 150 | #elif defined(__ARM_ARCH) |
| 151 | // V6 is the earliest arch that has a standard cyclecount |
| 152 | // Native Client validator doesn't allow MRC instructions. |
| 153 | #if (__ARM_ARCH >= 6) |
| 154 | uint32_t pmccntr; |
| 155 | uint32_t pmuseren; |
| 156 | uint32_t pmcntenset; |
| 157 | // Read the user mode perf monitor counter access permissions. |
| 158 | asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r" (pmuseren)); |
| 159 | if (pmuseren & 1) { // Allows reading perfmon counters for user mode code. |
| 160 | asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (pmcntenset)); |
| 161 | if (pmcntenset & 0x80000000ul) { // Is it counting? |
| 162 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (pmccntr)); |
| 163 | // The counter is set up to count every 64th cycle |
| 164 | return static_cast<int64_t>(pmccntr) * 64; // Should optimize to << 6 |
| 165 | } |
| 166 | } |
| 167 | #endif |
| 168 | struct timeval tv; |
| 169 | gettimeofday(&tv, nullptr); |
| 170 | return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec; |
| 171 | #elif defined(__mips__) || defined(__m68k__) |
| 172 | // mips apparently only allows rdtsc for superusers, so we fall |
| 173 | // back to gettimeofday. It's possible clock_gettime would be better. |
| 174 | struct timeval tv; |
| 175 | gettimeofday(&tv, nullptr); |
| 176 | return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec; |
| 177 | #elif defined(__loongarch__) || defined(__csky__) |
| 178 | struct timeval tv; |
| 179 | gettimeofday(&tv, nullptr); |
| 180 | return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec; |
| 181 | #elif defined(__s390__) // Covers both s390 and s390x. |
| 182 | // Return the CPU clock. |
| 183 | uint64_t tsc; |
| 184 | #if defined(BENCHMARK_OS_ZOS) |
| 185 | // z/OS HLASM syntax. |
| 186 | asm(" stck %0" : "=m" (tsc) : : "cc" ); |
| 187 | #else |
| 188 | // Linux on Z syntax. |
| 189 | asm("stck %0" : "=Q" (tsc) : : "cc" ); |
| 190 | #endif |
| 191 | return tsc; |
| 192 | #elif defined(__riscv) // RISC-V |
| 193 | // Use RDTIME (and RDTIMEH on riscv32). |
| 194 | // RDCYCLE is a privileged instruction since Linux 6.6. |
| 195 | #if __riscv_xlen == 32 |
| 196 | uint32_t cycles_lo, cycles_hi0, cycles_hi1; |
| 197 | // This asm also includes the PowerPC overflow handling strategy, as above. |
| 198 | // Implemented in assembly because Clang insisted on branching. |
| 199 | asm volatile( |
| 200 | "rdtimeh %0\n" |
| 201 | "rdtime %1\n" |
| 202 | "rdtimeh %2\n" |
| 203 | "sub %0, %0, %2\n" |
| 204 | "seqz %0, %0\n" |
| 205 | "sub %0, zero, %0\n" |
| 206 | "and %1, %1, %0\n" |
| 207 | : "=r" (cycles_hi0), "=r" (cycles_lo), "=r" (cycles_hi1)); |
| 208 | return (static_cast<uint64_t>(cycles_hi1) << 32) | cycles_lo; |
| 209 | #else |
| 210 | uint64_t cycles; |
| 211 | asm volatile("rdtime %0" : "=r" (cycles)); |
| 212 | return cycles; |
| 213 | #endif |
| 214 | #elif defined(__e2k__) || defined(__elbrus__) |
| 215 | struct timeval tv; |
| 216 | gettimeofday(&tv, nullptr); |
| 217 | return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec; |
| 218 | #elif defined(__hexagon__) |
| 219 | uint64_t pcycle; |
| 220 | asm volatile("%0 = C15:14" : "=r" (pcycle)); |
| 221 | return static_cast<double>(pcycle); |
| 222 | #elif defined(__alpha__) |
| 223 | // Alpha has a cycle counter, the PCC register, but it is an unsigned 32-bit |
| 224 | // integer and thus wraps every ~4s, making using it for tick counts |
| 225 | // unreliable beyond this time range. The real-time clock is low-precision, |
| 226 | // roughtly ~1ms, but it is the only option that can reasonable count |
| 227 | // indefinitely. |
| 228 | struct timeval tv; |
| 229 | gettimeofday(&tv, nullptr); |
| 230 | return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec; |
| 231 | #else |
| 232 | // The soft failover to a generic implementation is automatic only for ARM. |
| 233 | // For other platforms the developer is expected to make an attempt to create |
| 234 | // a fast implementation and use generic version if nothing better is |
| 235 | // available. |
| 236 | #error You need to define CycleTimer for your OS and CPU |
| 237 | #endif |
| 238 | } |
| 239 | } // end namespace cycleclock |
| 240 | } // end namespace benchmark |
| 241 | |
| 242 | #endif // BENCHMARK_CYCLECLOCK_H_ |
| 243 | |