| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _ASM_X86_PERF_EVENT_H |
| 3 | #define _ASM_X86_PERF_EVENT_H |
| 4 | |
| 5 | #include <linux/static_call.h> |
| 6 | |
| 7 | /* |
| 8 | * Performance event hw details: |
| 9 | */ |
| 10 | |
| 11 | #define INTEL_PMC_MAX_GENERIC 32 |
| 12 | #define INTEL_PMC_MAX_FIXED 16 |
| 13 | #define INTEL_PMC_IDX_FIXED 32 |
| 14 | |
| 15 | #define X86_PMC_IDX_MAX 64 |
| 16 | |
| 17 | #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 |
| 18 | #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 |
| 19 | |
| 20 | #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 |
| 21 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 |
| 22 | |
| 23 | #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL |
| 24 | #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL |
| 25 | #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16) |
| 26 | #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17) |
| 27 | #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18) |
| 28 | #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19) |
| 29 | #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20) |
| 30 | #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21) |
| 31 | #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22) |
| 32 | #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) |
| 33 | #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL |
| 34 | #define ARCH_PERFMON_EVENTSEL_BR_CNTR (1ULL << 35) |
| 35 | #define ARCH_PERFMON_EVENTSEL_EQ (1ULL << 36) |
| 36 | #define ARCH_PERFMON_EVENTSEL_UMASK2 (0xFFULL << 40) |
| 37 | |
| 38 | #define INTEL_FIXED_BITS_STRIDE 4 |
| 39 | #define INTEL_FIXED_0_KERNEL (1ULL << 0) |
| 40 | #define INTEL_FIXED_0_USER (1ULL << 1) |
| 41 | #define INTEL_FIXED_0_ANYTHREAD (1ULL << 2) |
| 42 | #define INTEL_FIXED_0_ENABLE_PMI (1ULL << 3) |
| 43 | #define INTEL_FIXED_3_METRICS_CLEAR (1ULL << 2) |
| 44 | |
| 45 | #define HSW_IN_TX (1ULL << 32) |
| 46 | #define HSW_IN_TX_CHECKPOINTED (1ULL << 33) |
| 47 | #define ICL_EVENTSEL_ADAPTIVE (1ULL << 34) |
| 48 | #define ICL_FIXED_0_ADAPTIVE (1ULL << 32) |
| 49 | |
| 50 | #define INTEL_FIXED_BITS_MASK \ |
| 51 | (INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER | \ |
| 52 | INTEL_FIXED_0_ANYTHREAD | INTEL_FIXED_0_ENABLE_PMI | \ |
| 53 | ICL_FIXED_0_ADAPTIVE) |
| 54 | |
| 55 | #define intel_fixed_bits_by_idx(_idx, _bits) \ |
| 56 | ((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE)) |
| 57 | |
| 58 | #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36) |
| 59 | #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40) |
| 60 | #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41) |
| 61 | |
| 62 | #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37 |
| 63 | #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \ |
| 64 | (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT) |
| 65 | |
| 66 | #define AMD64_EVENTSEL_EVENT \ |
| 67 | (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) |
| 68 | #define INTEL_ARCH_EVENT_MASK \ |
| 69 | (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) |
| 70 | |
| 71 | #define AMD64_L3_SLICE_SHIFT 48 |
| 72 | #define AMD64_L3_SLICE_MASK \ |
| 73 | (0xFULL << AMD64_L3_SLICE_SHIFT) |
| 74 | #define AMD64_L3_SLICEID_MASK \ |
| 75 | (0x7ULL << AMD64_L3_SLICE_SHIFT) |
| 76 | |
| 77 | #define AMD64_L3_THREAD_SHIFT 56 |
| 78 | #define AMD64_L3_THREAD_MASK \ |
| 79 | (0xFFULL << AMD64_L3_THREAD_SHIFT) |
| 80 | #define AMD64_L3_F19H_THREAD_MASK \ |
| 81 | (0x3ULL << AMD64_L3_THREAD_SHIFT) |
| 82 | |
| 83 | #define AMD64_L3_EN_ALL_CORES BIT_ULL(47) |
| 84 | #define AMD64_L3_EN_ALL_SLICES BIT_ULL(46) |
| 85 | |
| 86 | #define AMD64_L3_COREID_SHIFT 42 |
| 87 | #define AMD64_L3_COREID_MASK \ |
| 88 | (0x7ULL << AMD64_L3_COREID_SHIFT) |
| 89 | |
| 90 | #define X86_RAW_EVENT_MASK \ |
| 91 | (ARCH_PERFMON_EVENTSEL_EVENT | \ |
| 92 | ARCH_PERFMON_EVENTSEL_UMASK | \ |
| 93 | ARCH_PERFMON_EVENTSEL_EDGE | \ |
| 94 | ARCH_PERFMON_EVENTSEL_INV | \ |
| 95 | ARCH_PERFMON_EVENTSEL_CMASK) |
| 96 | #define X86_ALL_EVENT_FLAGS \ |
| 97 | (ARCH_PERFMON_EVENTSEL_EDGE | \ |
| 98 | ARCH_PERFMON_EVENTSEL_INV | \ |
| 99 | ARCH_PERFMON_EVENTSEL_CMASK | \ |
| 100 | ARCH_PERFMON_EVENTSEL_ANY | \ |
| 101 | ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \ |
| 102 | HSW_IN_TX | \ |
| 103 | HSW_IN_TX_CHECKPOINTED) |
| 104 | #define AMD64_RAW_EVENT_MASK \ |
| 105 | (X86_RAW_EVENT_MASK | \ |
| 106 | AMD64_EVENTSEL_EVENT) |
| 107 | #define AMD64_RAW_EVENT_MASK_NB \ |
| 108 | (AMD64_EVENTSEL_EVENT | \ |
| 109 | ARCH_PERFMON_EVENTSEL_UMASK) |
| 110 | |
| 111 | #define AMD64_PERFMON_V2_EVENTSEL_EVENT_NB \ |
| 112 | (AMD64_EVENTSEL_EVENT | \ |
| 113 | GENMASK_ULL(37, 36)) |
| 114 | |
| 115 | #define AMD64_PERFMON_V2_EVENTSEL_UMASK_NB \ |
| 116 | (ARCH_PERFMON_EVENTSEL_UMASK | \ |
| 117 | GENMASK_ULL(27, 24)) |
| 118 | |
| 119 | #define AMD64_PERFMON_V2_RAW_EVENT_MASK_NB \ |
| 120 | (AMD64_PERFMON_V2_EVENTSEL_EVENT_NB | \ |
| 121 | AMD64_PERFMON_V2_EVENTSEL_UMASK_NB) |
| 122 | |
| 123 | #define AMD64_PERFMON_V2_ENABLE_UMC BIT_ULL(31) |
| 124 | #define AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC GENMASK_ULL(7, 0) |
| 125 | #define AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC GENMASK_ULL(9, 8) |
| 126 | #define AMD64_PERFMON_V2_RAW_EVENT_MASK_UMC \ |
| 127 | (AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC | \ |
| 128 | AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC) |
| 129 | |
| 130 | #define AMD64_NUM_COUNTERS 4 |
| 131 | #define AMD64_NUM_COUNTERS_CORE 6 |
| 132 | #define AMD64_NUM_COUNTERS_NB 4 |
| 133 | |
| 134 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c |
| 135 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) |
| 136 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 |
| 137 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ |
| 138 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) |
| 139 | |
| 140 | #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 |
| 141 | #define ARCH_PERFMON_EVENTS_COUNT 7 |
| 142 | |
| 143 | #define PEBS_DATACFG_MEMINFO BIT_ULL(0) |
| 144 | #define PEBS_DATACFG_GP BIT_ULL(1) |
| 145 | #define PEBS_DATACFG_XMMS BIT_ULL(2) |
| 146 | #define PEBS_DATACFG_LBRS BIT_ULL(3) |
| 147 | #define PEBS_DATACFG_CNTR BIT_ULL(4) |
| 148 | #define PEBS_DATACFG_METRICS BIT_ULL(5) |
| 149 | #define PEBS_DATACFG_LBR_SHIFT 24 |
| 150 | #define PEBS_DATACFG_CNTR_SHIFT 32 |
| 151 | #define PEBS_DATACFG_CNTR_MASK GENMASK_ULL(15, 0) |
| 152 | #define PEBS_DATACFG_FIX_SHIFT 48 |
| 153 | #define PEBS_DATACFG_FIX_MASK GENMASK_ULL(7, 0) |
| 154 | |
| 155 | /* Steal the highest bit of pebs_data_cfg for SW usage */ |
| 156 | #define PEBS_UPDATE_DS_SW BIT_ULL(63) |
| 157 | |
| 158 | /* |
| 159 | * Intel "Architectural Performance Monitoring" CPUID |
| 160 | * detection/enumeration details: |
| 161 | */ |
| 162 | union cpuid10_eax { |
| 163 | struct { |
| 164 | unsigned int version_id:8; |
| 165 | unsigned int num_counters:8; |
| 166 | unsigned int bit_width:8; |
| 167 | unsigned int mask_length:8; |
| 168 | } split; |
| 169 | unsigned int full; |
| 170 | }; |
| 171 | |
| 172 | union cpuid10_ebx { |
| 173 | struct { |
| 174 | unsigned int no_unhalted_core_cycles:1; |
| 175 | unsigned int no_instructions_retired:1; |
| 176 | unsigned int no_unhalted_reference_cycles:1; |
| 177 | unsigned int no_llc_reference:1; |
| 178 | unsigned int no_llc_misses:1; |
| 179 | unsigned int no_branch_instruction_retired:1; |
| 180 | unsigned int no_branch_misses_retired:1; |
| 181 | } split; |
| 182 | unsigned int full; |
| 183 | }; |
| 184 | |
| 185 | union cpuid10_edx { |
| 186 | struct { |
| 187 | unsigned int num_counters_fixed:5; |
| 188 | unsigned int bit_width_fixed:8; |
| 189 | unsigned int reserved1:2; |
| 190 | unsigned int anythread_deprecated:1; |
| 191 | unsigned int reserved2:16; |
| 192 | } split; |
| 193 | unsigned int full; |
| 194 | }; |
| 195 | |
| 196 | /* |
| 197 | * Intel "Architectural Performance Monitoring extension" CPUID |
| 198 | * detection/enumeration details: |
| 199 | */ |
| 200 | #define ARCH_PERFMON_EXT_LEAF 0x00000023 |
| 201 | #define ARCH_PERFMON_NUM_COUNTER_LEAF 0x1 |
| 202 | #define ARCH_PERFMON_ACR_LEAF 0x2 |
| 203 | #define ARCH_PERFMON_PEBS_CAP_LEAF 0x4 |
| 204 | #define ARCH_PERFMON_PEBS_COUNTER_LEAF 0x5 |
| 205 | |
| 206 | union cpuid35_eax { |
| 207 | struct { |
| 208 | unsigned int leaf0:1; |
| 209 | /* Counters Sub-Leaf */ |
| 210 | unsigned int cntr_subleaf:1; |
| 211 | /* Auto Counter Reload Sub-Leaf */ |
| 212 | unsigned int acr_subleaf:1; |
| 213 | /* Events Sub-Leaf */ |
| 214 | unsigned int events_subleaf:1; |
| 215 | /* arch-PEBS Sub-Leaves */ |
| 216 | unsigned int pebs_caps_subleaf:1; |
| 217 | unsigned int pebs_cnts_subleaf:1; |
| 218 | unsigned int reserved:26; |
| 219 | } split; |
| 220 | unsigned int full; |
| 221 | }; |
| 222 | |
| 223 | union cpuid35_ebx { |
| 224 | struct { |
| 225 | /* UnitMask2 Supported */ |
| 226 | unsigned int umask2:1; |
| 227 | /* EQ-bit Supported */ |
| 228 | unsigned int eq:1; |
| 229 | unsigned int reserved:30; |
| 230 | } split; |
| 231 | unsigned int full; |
| 232 | }; |
| 233 | |
| 234 | /* |
| 235 | * Intel Architectural LBR CPUID detection/enumeration details: |
| 236 | */ |
| 237 | union cpuid28_eax { |
| 238 | struct { |
| 239 | /* Supported LBR depth values */ |
| 240 | unsigned int lbr_depth_mask:8; |
| 241 | unsigned int reserved:22; |
| 242 | /* Deep C-state Reset */ |
| 243 | unsigned int lbr_deep_c_reset:1; |
| 244 | /* IP values contain LIP */ |
| 245 | unsigned int lbr_lip:1; |
| 246 | } split; |
| 247 | unsigned int full; |
| 248 | }; |
| 249 | |
| 250 | union cpuid28_ebx { |
| 251 | struct { |
| 252 | /* CPL Filtering Supported */ |
| 253 | unsigned int lbr_cpl:1; |
| 254 | /* Branch Filtering Supported */ |
| 255 | unsigned int lbr_filter:1; |
| 256 | /* Call-stack Mode Supported */ |
| 257 | unsigned int lbr_call_stack:1; |
| 258 | } split; |
| 259 | unsigned int full; |
| 260 | }; |
| 261 | |
| 262 | union cpuid28_ecx { |
| 263 | struct { |
| 264 | /* Mispredict Bit Supported */ |
| 265 | unsigned int lbr_mispred:1; |
| 266 | /* Timed LBRs Supported */ |
| 267 | unsigned int lbr_timed_lbr:1; |
| 268 | /* Branch Type Field Supported */ |
| 269 | unsigned int lbr_br_type:1; |
| 270 | unsigned int reserved:13; |
| 271 | /* Branch counters (Event Logging) Supported */ |
| 272 | unsigned int lbr_counters:4; |
| 273 | } split; |
| 274 | unsigned int full; |
| 275 | }; |
| 276 | |
| 277 | /* |
| 278 | * AMD "Extended Performance Monitoring and Debug" CPUID |
| 279 | * detection/enumeration details: |
| 280 | */ |
| 281 | union cpuid_0x80000022_ebx { |
| 282 | struct { |
| 283 | /* Number of Core Performance Counters */ |
| 284 | unsigned int num_core_pmc:4; |
| 285 | /* Number of available LBR Stack Entries */ |
| 286 | unsigned int lbr_v2_stack_sz:6; |
| 287 | /* Number of Data Fabric Counters */ |
| 288 | unsigned int num_df_pmc:6; |
| 289 | /* Number of Unified Memory Controller Counters */ |
| 290 | unsigned int num_umc_pmc:6; |
| 291 | } split; |
| 292 | unsigned int full; |
| 293 | }; |
| 294 | |
| 295 | struct x86_pmu_capability { |
| 296 | int version; |
| 297 | int num_counters_gp; |
| 298 | int num_counters_fixed; |
| 299 | int bit_width_gp; |
| 300 | int bit_width_fixed; |
| 301 | unsigned int events_mask; |
| 302 | int events_mask_len; |
| 303 | unsigned int pebs_ept :1; |
| 304 | }; |
| 305 | |
| 306 | /* |
| 307 | * Fixed-purpose performance events: |
| 308 | */ |
| 309 | |
| 310 | /* RDPMC offset for Fixed PMCs */ |
| 311 | #define INTEL_PMC_FIXED_RDPMC_BASE (1 << 30) |
| 312 | #define INTEL_PMC_FIXED_RDPMC_METRICS (1 << 29) |
| 313 | |
| 314 | /* |
| 315 | * All the fixed-mode PMCs are configured via this single MSR: |
| 316 | */ |
| 317 | #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d |
| 318 | |
| 319 | /* |
| 320 | * There is no event-code assigned to the fixed-mode PMCs. |
| 321 | * |
| 322 | * For a fixed-mode PMC, which has an equivalent event on a general-purpose |
| 323 | * PMC, the event-code of the equivalent event is used for the fixed-mode PMC, |
| 324 | * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core. |
| 325 | * |
| 326 | * For a fixed-mode PMC, which doesn't have an equivalent event, a |
| 327 | * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS. |
| 328 | * The pseudo event-code for a fixed-mode PMC must be 0x00. |
| 329 | * The pseudo umask-code is 0xX. The X equals the index of the fixed |
| 330 | * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300. |
| 331 | * |
| 332 | * The counts are available in separate MSRs: |
| 333 | */ |
| 334 | |
| 335 | /* Instr_Retired.Any: */ |
| 336 | #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 |
| 337 | #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0) |
| 338 | |
| 339 | /* CPU_CLK_Unhalted.Core: */ |
| 340 | #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a |
| 341 | #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1) |
| 342 | |
| 343 | /* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */ |
| 344 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b |
| 345 | #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2) |
| 346 | #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES) |
| 347 | |
| 348 | /* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */ |
| 349 | #define MSR_ARCH_PERFMON_FIXED_CTR3 0x30c |
| 350 | #define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3) |
| 351 | #define INTEL_PMC_MSK_FIXED_SLOTS (1ULL << INTEL_PMC_IDX_FIXED_SLOTS) |
| 352 | |
| 353 | /* TOPDOWN_BAD_SPECULATION.ALL: fixed counter 4 (Atom only) */ |
| 354 | /* TOPDOWN_FE_BOUND.ALL: fixed counter 5 (Atom only) */ |
| 355 | /* TOPDOWN_RETIRING.ALL: fixed counter 6 (Atom only) */ |
| 356 | |
| 357 | static inline bool use_fixed_pseudo_encoding(u64 code) |
| 358 | { |
| 359 | return !(code & 0xff); |
| 360 | } |
| 361 | |
| 362 | /* |
| 363 | * We model BTS tracing as another fixed-mode PMC. |
| 364 | * |
| 365 | * We choose the value 47 for the fixed index of BTS, since lower |
| 366 | * values are used by actual fixed events and higher values are used |
| 367 | * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. |
| 368 | */ |
| 369 | #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 15) |
| 370 | |
| 371 | /* |
| 372 | * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for |
| 373 | * each TopDown metric event. |
| 374 | * |
| 375 | * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS). |
| 376 | */ |
| 377 | #define INTEL_PMC_IDX_METRIC_BASE (INTEL_PMC_IDX_FIXED + 16) |
| 378 | #define INTEL_PMC_IDX_TD_RETIRING (INTEL_PMC_IDX_METRIC_BASE + 0) |
| 379 | #define INTEL_PMC_IDX_TD_BAD_SPEC (INTEL_PMC_IDX_METRIC_BASE + 1) |
| 380 | #define INTEL_PMC_IDX_TD_FE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 2) |
| 381 | #define INTEL_PMC_IDX_TD_BE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 3) |
| 382 | #define INTEL_PMC_IDX_TD_HEAVY_OPS (INTEL_PMC_IDX_METRIC_BASE + 4) |
| 383 | #define INTEL_PMC_IDX_TD_BR_MISPREDICT (INTEL_PMC_IDX_METRIC_BASE + 5) |
| 384 | #define INTEL_PMC_IDX_TD_FETCH_LAT (INTEL_PMC_IDX_METRIC_BASE + 6) |
| 385 | #define INTEL_PMC_IDX_TD_MEM_BOUND (INTEL_PMC_IDX_METRIC_BASE + 7) |
| 386 | #define INTEL_PMC_IDX_METRIC_END INTEL_PMC_IDX_TD_MEM_BOUND |
| 387 | #define INTEL_PMC_MSK_TOPDOWN ((0xffull << INTEL_PMC_IDX_METRIC_BASE) | \ |
| 388 | INTEL_PMC_MSK_FIXED_SLOTS) |
| 389 | |
| 390 | /* |
| 391 | * There is no event-code assigned to the TopDown events. |
| 392 | * |
| 393 | * For the slots event, use the pseudo code of the fixed counter 3. |
| 394 | * |
| 395 | * For the metric events, the pseudo event-code is 0x00. |
| 396 | * The pseudo umask-code starts from the middle of the pseudo event |
| 397 | * space, 0x80. |
| 398 | */ |
| 399 | #define INTEL_TD_SLOTS 0x0400 /* TOPDOWN.SLOTS */ |
| 400 | /* Level 1 metrics */ |
| 401 | #define INTEL_TD_METRIC_RETIRING 0x8000 /* Retiring metric */ |
| 402 | #define INTEL_TD_METRIC_BAD_SPEC 0x8100 /* Bad speculation metric */ |
| 403 | #define INTEL_TD_METRIC_FE_BOUND 0x8200 /* FE bound metric */ |
| 404 | #define INTEL_TD_METRIC_BE_BOUND 0x8300 /* BE bound metric */ |
| 405 | /* Level 2 metrics */ |
| 406 | #define INTEL_TD_METRIC_HEAVY_OPS 0x8400 /* Heavy Operations metric */ |
| 407 | #define INTEL_TD_METRIC_BR_MISPREDICT 0x8500 /* Branch Mispredict metric */ |
| 408 | #define INTEL_TD_METRIC_FETCH_LAT 0x8600 /* Fetch Latency metric */ |
| 409 | #define INTEL_TD_METRIC_MEM_BOUND 0x8700 /* Memory bound metric */ |
| 410 | |
| 411 | #define INTEL_TD_METRIC_MAX INTEL_TD_METRIC_MEM_BOUND |
| 412 | #define INTEL_TD_METRIC_NUM 8 |
| 413 | |
| 414 | #define INTEL_TD_CFG_METRIC_CLEAR_BIT 0 |
| 415 | #define INTEL_TD_CFG_METRIC_CLEAR BIT_ULL(INTEL_TD_CFG_METRIC_CLEAR_BIT) |
| 416 | |
| 417 | static inline bool is_metric_idx(int idx) |
| 418 | { |
| 419 | return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM; |
| 420 | } |
| 421 | |
| 422 | static inline bool is_topdown_idx(int idx) |
| 423 | { |
| 424 | return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS; |
| 425 | } |
| 426 | |
| 427 | #define INTEL_PMC_OTHER_TOPDOWN_BITS(bit) \ |
| 428 | (~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN) |
| 429 | |
| 430 | #define GLOBAL_STATUS_COND_CHG BIT_ULL(63) |
| 431 | #define GLOBAL_STATUS_BUFFER_OVF_BIT 62 |
| 432 | #define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT) |
| 433 | #define GLOBAL_STATUS_UNC_OVF BIT_ULL(61) |
| 434 | #define GLOBAL_STATUS_ASIF BIT_ULL(60) |
| 435 | #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59) |
| 436 | #define GLOBAL_STATUS_LBRS_FROZEN_BIT 58 |
| 437 | #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT) |
| 438 | #define GLOBAL_STATUS_TRACE_TOPAPMI_BIT 55 |
| 439 | #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT) |
| 440 | #define GLOBAL_STATUS_ARCH_PEBS_THRESHOLD_BIT 54 |
| 441 | #define GLOBAL_STATUS_ARCH_PEBS_THRESHOLD BIT_ULL(GLOBAL_STATUS_ARCH_PEBS_THRESHOLD_BIT) |
| 442 | #define GLOBAL_STATUS_PERF_METRICS_OVF_BIT 48 |
| 443 | |
| 444 | #define GLOBAL_CTRL_EN_PERF_METRICS BIT_ULL(48) |
| 445 | /* |
| 446 | * We model guest LBR event tracing as another fixed-mode PMC like BTS. |
| 447 | * |
| 448 | * We choose bit 58 because it's used to indicate LBR stack frozen state |
| 449 | * for architectural perfmon v4, also we unconditionally mask that bit in |
| 450 | * the handle_pmi_common(), so it'll never be set in the overflow handling. |
| 451 | * |
| 452 | * With this fake counter assigned, the guest LBR event user (such as KVM), |
| 453 | * can program the LBR registers on its own, and we don't actually do anything |
| 454 | * with then in the host context. |
| 455 | */ |
| 456 | #define INTEL_PMC_IDX_FIXED_VLBR (GLOBAL_STATUS_LBRS_FROZEN_BIT) |
| 457 | |
| 458 | /* |
| 459 | * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b, |
| 460 | * since it would claim bit 58 which is effectively Fixed26. |
| 461 | */ |
| 462 | #define INTEL_FIXED_VLBR_EVENT 0x1b00 |
| 463 | |
| 464 | /* |
| 465 | * Adaptive PEBS v4 |
| 466 | */ |
| 467 | |
| 468 | struct pebs_basic { |
| 469 | u64 format_group:32, |
| 470 | retire_latency:16, |
| 471 | format_size:16; |
| 472 | u64 ip; |
| 473 | u64 applicable_counters; |
| 474 | u64 tsc; |
| 475 | }; |
| 476 | |
| 477 | struct pebs_meminfo { |
| 478 | u64 address; |
| 479 | u64 aux; |
| 480 | union { |
| 481 | /* pre Alder Lake */ |
| 482 | u64 mem_latency; |
| 483 | /* Alder Lake and later */ |
| 484 | struct { |
| 485 | u64 instr_latency:16; |
| 486 | u64 pad2:16; |
| 487 | u64 cache_latency:16; |
| 488 | u64 pad3:16; |
| 489 | }; |
| 490 | }; |
| 491 | u64 tsx_tuning; |
| 492 | }; |
| 493 | |
| 494 | struct pebs_gprs { |
| 495 | u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di; |
| 496 | u64 r8, r9, r10, r11, r12, r13, r14, r15; |
| 497 | }; |
| 498 | |
| 499 | struct pebs_xmm { |
| 500 | u64 xmm[16*2]; /* two entries for each register */ |
| 501 | }; |
| 502 | |
| 503 | struct { |
| 504 | u32 ; |
| 505 | u32 ; |
| 506 | u32 ; |
| 507 | u32 ; |
| 508 | }; |
| 509 | |
| 510 | #define INTEL_CNTR_METRICS 0x3 |
| 511 | |
| 512 | /* |
| 513 | * Arch PEBS |
| 514 | */ |
| 515 | union arch_pebs_index { |
| 516 | struct { |
| 517 | u64 rsvd:4, |
| 518 | wr:23, |
| 519 | rsvd2:4, |
| 520 | full:1, |
| 521 | en:1, |
| 522 | rsvd3:3, |
| 523 | thresh:23, |
| 524 | rsvd4:5; |
| 525 | }; |
| 526 | u64 whole; |
| 527 | }; |
| 528 | |
| 529 | struct { |
| 530 | union { |
| 531 | u64 ; |
| 532 | struct { |
| 533 | u64 :16, /* Record size */ |
| 534 | :14, |
| 535 | :1, /* 64BIT_MODE */ |
| 536 | :1, |
| 537 | :3, |
| 538 | :5, |
| 539 | :2, |
| 540 | :7, |
| 541 | :1, |
| 542 | :1, |
| 543 | :2, |
| 544 | :1, |
| 545 | :1, |
| 546 | :1, |
| 547 | :5, |
| 548 | :1, |
| 549 | :1, |
| 550 | :1; |
| 551 | }; |
| 552 | }; |
| 553 | u64 ; |
| 554 | }; |
| 555 | |
| 556 | struct arch_pebs_basic { |
| 557 | u64 ip; |
| 558 | u64 applicable_counters; |
| 559 | u64 tsc; |
| 560 | u64 retire :16, /* Retire Latency */ |
| 561 | valid :1, |
| 562 | rsvd :47; |
| 563 | u64 rsvd2; |
| 564 | u64 rsvd3; |
| 565 | }; |
| 566 | |
| 567 | struct arch_pebs_aux { |
| 568 | u64 address; |
| 569 | u64 rsvd; |
| 570 | u64 rsvd2; |
| 571 | u64 rsvd3; |
| 572 | u64 rsvd4; |
| 573 | u64 aux; |
| 574 | u64 instr_latency :16, |
| 575 | pad2 :16, |
| 576 | cache_latency :16, |
| 577 | pad3 :16; |
| 578 | u64 tsx_tuning; |
| 579 | }; |
| 580 | |
| 581 | struct arch_pebs_gprs { |
| 582 | u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di; |
| 583 | u64 r8, r9, r10, r11, r12, r13, r14, r15, ssp; |
| 584 | u64 rsvd; |
| 585 | }; |
| 586 | |
| 587 | struct { |
| 588 | u64 ; |
| 589 | u64 ; |
| 590 | }; |
| 591 | |
| 592 | #define ARCH_PEBS_LBR_NAN 0x0 |
| 593 | #define ARCH_PEBS_LBR_NUM_8 0x1 |
| 594 | #define ARCH_PEBS_LBR_NUM_16 0x2 |
| 595 | #define ARCH_PEBS_LBR_NUM_VAR 0x3 |
| 596 | #define ARCH_PEBS_BASE_LBR_ENTRIES 8 |
| 597 | struct { |
| 598 | u64 ; |
| 599 | u64 ; |
| 600 | u64 ; |
| 601 | u64 ; |
| 602 | u64 ; |
| 603 | u64 ; |
| 604 | }; |
| 605 | |
| 606 | struct { |
| 607 | u32 ; |
| 608 | u32 ; |
| 609 | u32 ; |
| 610 | u32 ; |
| 611 | }; |
| 612 | |
| 613 | /* |
| 614 | * AMD Extended Performance Monitoring and Debug cpuid feature detection |
| 615 | */ |
| 616 | #define EXT_PERFMON_DEBUG_FEATURES 0x80000022 |
| 617 | |
| 618 | /* |
| 619 | * IBS cpuid feature detection |
| 620 | */ |
| 621 | |
| 622 | #define IBS_CPUID_FEATURES 0x8000001b |
| 623 | |
| 624 | /* |
| 625 | * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but |
| 626 | * bit 0 is used to indicate the existence of IBS. |
| 627 | */ |
| 628 | #define IBS_CAPS_AVAIL (1U<<0) |
| 629 | #define IBS_CAPS_FETCHSAM (1U<<1) |
| 630 | #define IBS_CAPS_OPSAM (1U<<2) |
| 631 | #define IBS_CAPS_RDWROPCNT (1U<<3) |
| 632 | #define IBS_CAPS_OPCNT (1U<<4) |
| 633 | #define IBS_CAPS_BRNTRGT (1U<<5) |
| 634 | #define IBS_CAPS_OPCNTEXT (1U<<6) |
| 635 | #define IBS_CAPS_RIPINVALIDCHK (1U<<7) |
| 636 | #define IBS_CAPS_OPBRNFUSE (1U<<8) |
| 637 | #define IBS_CAPS_FETCHCTLEXTD (1U<<9) |
| 638 | #define IBS_CAPS_OPDATA4 (1U<<10) |
| 639 | #define IBS_CAPS_ZEN4 (1U<<11) |
| 640 | #define IBS_CAPS_OPLDLAT (1U<<12) |
| 641 | #define IBS_CAPS_OPDTLBPGSIZE (1U<<19) |
| 642 | |
| 643 | #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ |
| 644 | | IBS_CAPS_FETCHSAM \ |
| 645 | | IBS_CAPS_OPSAM) |
| 646 | |
| 647 | /* |
| 648 | * IBS APIC setup |
| 649 | */ |
| 650 | #define IBSCTL 0x1cc |
| 651 | #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) |
| 652 | #define IBSCTL_LVT_OFFSET_MASK 0x0F |
| 653 | |
| 654 | /* IBS fetch bits/masks */ |
| 655 | #define IBS_FETCH_L3MISSONLY (1ULL<<59) |
| 656 | #define IBS_FETCH_RAND_EN (1ULL<<57) |
| 657 | #define IBS_FETCH_VAL (1ULL<<49) |
| 658 | #define IBS_FETCH_ENABLE (1ULL<<48) |
| 659 | #define IBS_FETCH_CNT 0xFFFF0000ULL |
| 660 | #define IBS_FETCH_MAX_CNT 0x0000FFFFULL |
| 661 | |
| 662 | /* |
| 663 | * IBS op bits/masks |
| 664 | * The lower 7 bits of the current count are random bits |
| 665 | * preloaded by hardware and ignored in software |
| 666 | */ |
| 667 | #define IBS_OP_LDLAT_EN (1ULL<<63) |
| 668 | #define IBS_OP_LDLAT_THRSH (0xFULL<<59) |
| 669 | #define IBS_OP_CUR_CNT (0xFFF80ULL<<32) |
| 670 | #define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32) |
| 671 | #define IBS_OP_CUR_CNT_EXT_MASK (0x7FULL<<52) |
| 672 | #define IBS_OP_CNT_CTL (1ULL<<19) |
| 673 | #define IBS_OP_VAL (1ULL<<18) |
| 674 | #define IBS_OP_ENABLE (1ULL<<17) |
| 675 | #define IBS_OP_L3MISSONLY (1ULL<<16) |
| 676 | #define IBS_OP_MAX_CNT 0x0000FFFFULL |
| 677 | #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ |
| 678 | #define IBS_OP_MAX_CNT_EXT_MASK (0x7FULL<<20) /* separate upper 7 bits */ |
| 679 | #define IBS_RIP_INVALID (1ULL<<38) |
| 680 | |
| 681 | #ifdef CONFIG_X86_LOCAL_APIC |
| 682 | extern u32 get_ibs_caps(void); |
| 683 | extern int forward_event_to_ibs(struct perf_event *event); |
| 684 | #else |
| 685 | static inline u32 get_ibs_caps(void) { return 0; } |
| 686 | static inline int forward_event_to_ibs(struct perf_event *event) { return -ENOENT; } |
| 687 | #endif |
| 688 | |
| 689 | #ifdef CONFIG_PERF_EVENTS |
| 690 | extern void perf_events_lapic_init(void); |
| 691 | |
| 692 | /* |
| 693 | * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise |
| 694 | * unused and ABI specified to be 0, so nobody should care what we do with |
| 695 | * them. |
| 696 | * |
| 697 | * EXACT - the IP points to the exact instruction that triggered the |
| 698 | * event (HW bugs exempt). |
| 699 | * VM - original X86_VM_MASK; see set_linear_ip(). |
| 700 | */ |
| 701 | #define PERF_EFLAGS_EXACT (1UL << 3) |
| 702 | #define PERF_EFLAGS_VM (1UL << 5) |
| 703 | |
| 704 | struct pt_regs; |
| 705 | struct x86_perf_regs { |
| 706 | struct pt_regs regs; |
| 707 | u64 *xmm_regs; |
| 708 | }; |
| 709 | |
| 710 | extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs); |
| 711 | extern unsigned long perf_arch_misc_flags(struct pt_regs *regs); |
| 712 | extern unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs); |
| 713 | #define perf_arch_misc_flags(regs) perf_arch_misc_flags(regs) |
| 714 | #define perf_arch_guest_misc_flags(regs) perf_arch_guest_misc_flags(regs) |
| 715 | |
| 716 | #include <asm/stacktrace.h> |
| 717 | |
| 718 | /* |
| 719 | * We abuse bit 3 from flags to pass exact information, see |
| 720 | * perf_arch_misc_flags() and the comment with PERF_EFLAGS_EXACT. |
| 721 | */ |
| 722 | #define perf_arch_fetch_caller_regs(regs, __ip) { \ |
| 723 | (regs)->ip = (__ip); \ |
| 724 | (regs)->sp = (unsigned long)__builtin_frame_address(0); \ |
| 725 | (regs)->cs = __KERNEL_CS; \ |
| 726 | regs->flags = 0; \ |
| 727 | } |
| 728 | |
| 729 | struct perf_guest_switch_msr { |
| 730 | unsigned msr; |
| 731 | u64 host, guest; |
| 732 | }; |
| 733 | |
| 734 | struct x86_pmu_lbr { |
| 735 | unsigned int nr; |
| 736 | unsigned int from; |
| 737 | unsigned int to; |
| 738 | unsigned int info; |
| 739 | bool has_callstack; |
| 740 | }; |
| 741 | |
| 742 | extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); |
| 743 | extern u64 perf_get_hw_event_config(int hw_event); |
| 744 | extern void perf_check_microcode(void); |
| 745 | extern void perf_clear_dirty_counters(void); |
| 746 | extern int x86_perf_rdpmc_index(struct perf_event *event); |
| 747 | #else |
| 748 | static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) |
| 749 | { |
| 750 | memset(cap, 0, sizeof(*cap)); |
| 751 | } |
| 752 | |
| 753 | static inline u64 perf_get_hw_event_config(int hw_event) |
| 754 | { |
| 755 | return 0; |
| 756 | } |
| 757 | |
| 758 | static inline void perf_events_lapic_init(void) { } |
| 759 | static inline void perf_check_microcode(void) { } |
| 760 | #endif |
| 761 | |
| 762 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) |
| 763 | extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data); |
| 764 | extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr); |
| 765 | #else |
| 766 | struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data); |
| 767 | static inline void x86_perf_get_lbr(struct x86_pmu_lbr *lbr) |
| 768 | { |
| 769 | memset(lbr, 0, sizeof(*lbr)); |
| 770 | } |
| 771 | #endif |
| 772 | |
| 773 | #ifdef CONFIG_CPU_SUP_INTEL |
| 774 | extern void intel_pt_handle_vmx(int on); |
| 775 | #else |
| 776 | static inline void intel_pt_handle_vmx(int on) |
| 777 | { |
| 778 | |
| 779 | } |
| 780 | #endif |
| 781 | |
| 782 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) |
| 783 | extern void amd_pmu_enable_virt(void); |
| 784 | extern void amd_pmu_disable_virt(void); |
| 785 | |
| 786 | #if defined(CONFIG_PERF_EVENTS_AMD_BRS) |
| 787 | |
| 788 | #define PERF_NEEDS_LOPWR_CB 1 |
| 789 | |
| 790 | /* |
| 791 | * architectural low power callback impacts |
| 792 | * drivers/acpi/processor_idle.c |
| 793 | * drivers/acpi/acpi_pad.c |
| 794 | */ |
| 795 | extern void perf_amd_brs_lopwr_cb(bool lopwr_in); |
| 796 | |
| 797 | DECLARE_STATIC_CALL(perf_lopwr_cb, perf_amd_brs_lopwr_cb); |
| 798 | |
| 799 | static __always_inline void perf_lopwr_cb(bool lopwr_in) |
| 800 | { |
| 801 | static_call_mod(perf_lopwr_cb)(lopwr_in); |
| 802 | } |
| 803 | |
| 804 | #endif /* PERF_NEEDS_LOPWR_CB */ |
| 805 | |
| 806 | #else |
| 807 | static inline void amd_pmu_enable_virt(void) { } |
| 808 | static inline void amd_pmu_disable_virt(void) { } |
| 809 | #endif |
| 810 | |
| 811 | #define arch_perf_out_copy_user copy_from_user_nmi |
| 812 | |
| 813 | #endif /* _ASM_X86_PERF_EVENT_H */ |
| 814 | |