1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_PERF_EVENT_H |
3 | #define _ASM_X86_PERF_EVENT_H |
4 | |
5 | #include <linux/static_call.h> |
6 | |
7 | /* |
8 | * Performance event hw details: |
9 | */ |
10 | |
11 | #define INTEL_PMC_MAX_GENERIC 32 |
12 | #define INTEL_PMC_MAX_FIXED 16 |
13 | #define INTEL_PMC_IDX_FIXED 32 |
14 | |
15 | #define X86_PMC_IDX_MAX 64 |
16 | |
17 | #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 |
18 | #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 |
19 | |
20 | #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 |
21 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 |
22 | |
23 | #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL |
24 | #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL |
25 | #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16) |
26 | #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17) |
27 | #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18) |
28 | #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19) |
29 | #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20) |
30 | #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21) |
31 | #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22) |
32 | #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) |
33 | #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL |
34 | |
35 | #define INTEL_FIXED_BITS_MASK 0xFULL |
36 | #define INTEL_FIXED_BITS_STRIDE 4 |
37 | #define INTEL_FIXED_0_KERNEL (1ULL << 0) |
38 | #define INTEL_FIXED_0_USER (1ULL << 1) |
39 | #define INTEL_FIXED_0_ANYTHREAD (1ULL << 2) |
40 | #define INTEL_FIXED_0_ENABLE_PMI (1ULL << 3) |
41 | |
42 | #define HSW_IN_TX (1ULL << 32) |
43 | #define HSW_IN_TX_CHECKPOINTED (1ULL << 33) |
44 | #define ICL_EVENTSEL_ADAPTIVE (1ULL << 34) |
45 | #define ICL_FIXED_0_ADAPTIVE (1ULL << 32) |
46 | |
47 | #define intel_fixed_bits_by_idx(_idx, _bits) \ |
48 | ((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE)) |
49 | |
50 | #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36) |
51 | #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40) |
52 | #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41) |
53 | |
54 | #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37 |
55 | #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \ |
56 | (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT) |
57 | |
58 | #define AMD64_EVENTSEL_EVENT \ |
59 | (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) |
60 | #define INTEL_ARCH_EVENT_MASK \ |
61 | (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) |
62 | |
63 | #define AMD64_L3_SLICE_SHIFT 48 |
64 | #define AMD64_L3_SLICE_MASK \ |
65 | (0xFULL << AMD64_L3_SLICE_SHIFT) |
66 | #define AMD64_L3_SLICEID_MASK \ |
67 | (0x7ULL << AMD64_L3_SLICE_SHIFT) |
68 | |
69 | #define AMD64_L3_THREAD_SHIFT 56 |
70 | #define AMD64_L3_THREAD_MASK \ |
71 | (0xFFULL << AMD64_L3_THREAD_SHIFT) |
72 | #define AMD64_L3_F19H_THREAD_MASK \ |
73 | (0x3ULL << AMD64_L3_THREAD_SHIFT) |
74 | |
75 | #define AMD64_L3_EN_ALL_CORES BIT_ULL(47) |
76 | #define AMD64_L3_EN_ALL_SLICES BIT_ULL(46) |
77 | |
78 | #define AMD64_L3_COREID_SHIFT 42 |
79 | #define AMD64_L3_COREID_MASK \ |
80 | (0x7ULL << AMD64_L3_COREID_SHIFT) |
81 | |
82 | #define X86_RAW_EVENT_MASK \ |
83 | (ARCH_PERFMON_EVENTSEL_EVENT | \ |
84 | ARCH_PERFMON_EVENTSEL_UMASK | \ |
85 | ARCH_PERFMON_EVENTSEL_EDGE | \ |
86 | ARCH_PERFMON_EVENTSEL_INV | \ |
87 | ARCH_PERFMON_EVENTSEL_CMASK) |
88 | #define X86_ALL_EVENT_FLAGS \ |
89 | (ARCH_PERFMON_EVENTSEL_EDGE | \ |
90 | ARCH_PERFMON_EVENTSEL_INV | \ |
91 | ARCH_PERFMON_EVENTSEL_CMASK | \ |
92 | ARCH_PERFMON_EVENTSEL_ANY | \ |
93 | ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \ |
94 | HSW_IN_TX | \ |
95 | HSW_IN_TX_CHECKPOINTED) |
96 | #define AMD64_RAW_EVENT_MASK \ |
97 | (X86_RAW_EVENT_MASK | \ |
98 | AMD64_EVENTSEL_EVENT) |
99 | #define AMD64_RAW_EVENT_MASK_NB \ |
100 | (AMD64_EVENTSEL_EVENT | \ |
101 | ARCH_PERFMON_EVENTSEL_UMASK) |
102 | |
103 | #define AMD64_PERFMON_V2_EVENTSEL_EVENT_NB \ |
104 | (AMD64_EVENTSEL_EVENT | \ |
105 | GENMASK_ULL(37, 36)) |
106 | |
107 | #define AMD64_PERFMON_V2_EVENTSEL_UMASK_NB \ |
108 | (ARCH_PERFMON_EVENTSEL_UMASK | \ |
109 | GENMASK_ULL(27, 24)) |
110 | |
111 | #define AMD64_PERFMON_V2_RAW_EVENT_MASK_NB \ |
112 | (AMD64_PERFMON_V2_EVENTSEL_EVENT_NB | \ |
113 | AMD64_PERFMON_V2_EVENTSEL_UMASK_NB) |
114 | |
115 | #define AMD64_PERFMON_V2_ENABLE_UMC BIT_ULL(31) |
116 | #define AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC GENMASK_ULL(7, 0) |
117 | #define AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC GENMASK_ULL(9, 8) |
118 | #define AMD64_PERFMON_V2_RAW_EVENT_MASK_UMC \ |
119 | (AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC | \ |
120 | AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC) |
121 | |
122 | #define AMD64_NUM_COUNTERS 4 |
123 | #define AMD64_NUM_COUNTERS_CORE 6 |
124 | #define AMD64_NUM_COUNTERS_NB 4 |
125 | |
126 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c |
127 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) |
128 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 |
129 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ |
130 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) |
131 | |
132 | #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 |
133 | #define ARCH_PERFMON_EVENTS_COUNT 7 |
134 | |
135 | #define PEBS_DATACFG_MEMINFO BIT_ULL(0) |
136 | #define PEBS_DATACFG_GP BIT_ULL(1) |
137 | #define PEBS_DATACFG_XMMS BIT_ULL(2) |
138 | #define PEBS_DATACFG_LBRS BIT_ULL(3) |
139 | #define PEBS_DATACFG_LBR_SHIFT 24 |
140 | |
141 | /* Steal the highest bit of pebs_data_cfg for SW usage */ |
142 | #define PEBS_UPDATE_DS_SW BIT_ULL(63) |
143 | |
144 | /* |
145 | * Intel "Architectural Performance Monitoring" CPUID |
146 | * detection/enumeration details: |
147 | */ |
148 | union cpuid10_eax { |
149 | struct { |
150 | unsigned int version_id:8; |
151 | unsigned int num_counters:8; |
152 | unsigned int bit_width:8; |
153 | unsigned int mask_length:8; |
154 | } split; |
155 | unsigned int full; |
156 | }; |
157 | |
158 | union cpuid10_ebx { |
159 | struct { |
160 | unsigned int no_unhalted_core_cycles:1; |
161 | unsigned int no_instructions_retired:1; |
162 | unsigned int no_unhalted_reference_cycles:1; |
163 | unsigned int no_llc_reference:1; |
164 | unsigned int no_llc_misses:1; |
165 | unsigned int no_branch_instruction_retired:1; |
166 | unsigned int no_branch_misses_retired:1; |
167 | } split; |
168 | unsigned int full; |
169 | }; |
170 | |
171 | union cpuid10_edx { |
172 | struct { |
173 | unsigned int num_counters_fixed:5; |
174 | unsigned int bit_width_fixed:8; |
175 | unsigned int reserved1:2; |
176 | unsigned int anythread_deprecated:1; |
177 | unsigned int reserved2:16; |
178 | } split; |
179 | unsigned int full; |
180 | }; |
181 | |
182 | /* |
183 | * Intel "Architectural Performance Monitoring extension" CPUID |
184 | * detection/enumeration details: |
185 | */ |
186 | #define ARCH_PERFMON_EXT_LEAF 0x00000023 |
187 | #define ARCH_PERFMON_NUM_COUNTER_LEAF_BIT 0x1 |
188 | #define ARCH_PERFMON_NUM_COUNTER_LEAF 0x1 |
189 | |
190 | /* |
191 | * Intel Architectural LBR CPUID detection/enumeration details: |
192 | */ |
193 | union cpuid28_eax { |
194 | struct { |
195 | /* Supported LBR depth values */ |
196 | unsigned int lbr_depth_mask:8; |
197 | unsigned int reserved:22; |
198 | /* Deep C-state Reset */ |
199 | unsigned int lbr_deep_c_reset:1; |
200 | /* IP values contain LIP */ |
201 | unsigned int lbr_lip:1; |
202 | } split; |
203 | unsigned int full; |
204 | }; |
205 | |
206 | union cpuid28_ebx { |
207 | struct { |
208 | /* CPL Filtering Supported */ |
209 | unsigned int lbr_cpl:1; |
210 | /* Branch Filtering Supported */ |
211 | unsigned int lbr_filter:1; |
212 | /* Call-stack Mode Supported */ |
213 | unsigned int lbr_call_stack:1; |
214 | } split; |
215 | unsigned int full; |
216 | }; |
217 | |
218 | union cpuid28_ecx { |
219 | struct { |
220 | /* Mispredict Bit Supported */ |
221 | unsigned int lbr_mispred:1; |
222 | /* Timed LBRs Supported */ |
223 | unsigned int lbr_timed_lbr:1; |
224 | /* Branch Type Field Supported */ |
225 | unsigned int lbr_br_type:1; |
226 | } split; |
227 | unsigned int full; |
228 | }; |
229 | |
230 | /* |
231 | * AMD "Extended Performance Monitoring and Debug" CPUID |
232 | * detection/enumeration details: |
233 | */ |
234 | union cpuid_0x80000022_ebx { |
235 | struct { |
236 | /* Number of Core Performance Counters */ |
237 | unsigned int num_core_pmc:4; |
238 | /* Number of available LBR Stack Entries */ |
239 | unsigned int lbr_v2_stack_sz:6; |
240 | /* Number of Data Fabric Counters */ |
241 | unsigned int num_df_pmc:6; |
242 | /* Number of Unified Memory Controller Counters */ |
243 | unsigned int num_umc_pmc:6; |
244 | } split; |
245 | unsigned int full; |
246 | }; |
247 | |
248 | struct x86_pmu_capability { |
249 | int version; |
250 | int num_counters_gp; |
251 | int num_counters_fixed; |
252 | int bit_width_gp; |
253 | int bit_width_fixed; |
254 | unsigned int events_mask; |
255 | int events_mask_len; |
256 | unsigned int pebs_ept :1; |
257 | }; |
258 | |
259 | /* |
260 | * Fixed-purpose performance events: |
261 | */ |
262 | |
263 | /* RDPMC offset for Fixed PMCs */ |
264 | #define INTEL_PMC_FIXED_RDPMC_BASE (1 << 30) |
265 | #define INTEL_PMC_FIXED_RDPMC_METRICS (1 << 29) |
266 | |
267 | /* |
268 | * All the fixed-mode PMCs are configured via this single MSR: |
269 | */ |
270 | #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d |
271 | |
272 | /* |
273 | * There is no event-code assigned to the fixed-mode PMCs. |
274 | * |
275 | * For a fixed-mode PMC, which has an equivalent event on a general-purpose |
276 | * PMC, the event-code of the equivalent event is used for the fixed-mode PMC, |
277 | * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core. |
278 | * |
279 | * For a fixed-mode PMC, which doesn't have an equivalent event, a |
280 | * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS. |
281 | * The pseudo event-code for a fixed-mode PMC must be 0x00. |
282 | * The pseudo umask-code is 0xX. The X equals the index of the fixed |
283 | * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300. |
284 | * |
285 | * The counts are available in separate MSRs: |
286 | */ |
287 | |
288 | /* Instr_Retired.Any: */ |
289 | #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 |
290 | #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0) |
291 | |
292 | /* CPU_CLK_Unhalted.Core: */ |
293 | #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a |
294 | #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1) |
295 | |
296 | /* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */ |
297 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b |
298 | #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2) |
299 | #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES) |
300 | |
301 | /* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */ |
302 | #define MSR_ARCH_PERFMON_FIXED_CTR3 0x30c |
303 | #define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3) |
304 | #define INTEL_PMC_MSK_FIXED_SLOTS (1ULL << INTEL_PMC_IDX_FIXED_SLOTS) |
305 | |
306 | static inline bool use_fixed_pseudo_encoding(u64 code) |
307 | { |
308 | return !(code & 0xff); |
309 | } |
310 | |
311 | /* |
312 | * We model BTS tracing as another fixed-mode PMC. |
313 | * |
314 | * We choose the value 47 for the fixed index of BTS, since lower |
315 | * values are used by actual fixed events and higher values are used |
316 | * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. |
317 | */ |
318 | #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 15) |
319 | |
320 | /* |
321 | * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for |
322 | * each TopDown metric event. |
323 | * |
324 | * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS). |
325 | */ |
326 | #define INTEL_PMC_IDX_METRIC_BASE (INTEL_PMC_IDX_FIXED + 16) |
327 | #define INTEL_PMC_IDX_TD_RETIRING (INTEL_PMC_IDX_METRIC_BASE + 0) |
328 | #define INTEL_PMC_IDX_TD_BAD_SPEC (INTEL_PMC_IDX_METRIC_BASE + 1) |
329 | #define INTEL_PMC_IDX_TD_FE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 2) |
330 | #define INTEL_PMC_IDX_TD_BE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 3) |
331 | #define INTEL_PMC_IDX_TD_HEAVY_OPS (INTEL_PMC_IDX_METRIC_BASE + 4) |
332 | #define INTEL_PMC_IDX_TD_BR_MISPREDICT (INTEL_PMC_IDX_METRIC_BASE + 5) |
333 | #define INTEL_PMC_IDX_TD_FETCH_LAT (INTEL_PMC_IDX_METRIC_BASE + 6) |
334 | #define INTEL_PMC_IDX_TD_MEM_BOUND (INTEL_PMC_IDX_METRIC_BASE + 7) |
335 | #define INTEL_PMC_IDX_METRIC_END INTEL_PMC_IDX_TD_MEM_BOUND |
336 | #define INTEL_PMC_MSK_TOPDOWN ((0xffull << INTEL_PMC_IDX_METRIC_BASE) | \ |
337 | INTEL_PMC_MSK_FIXED_SLOTS) |
338 | |
339 | /* |
340 | * There is no event-code assigned to the TopDown events. |
341 | * |
342 | * For the slots event, use the pseudo code of the fixed counter 3. |
343 | * |
344 | * For the metric events, the pseudo event-code is 0x00. |
345 | * The pseudo umask-code starts from the middle of the pseudo event |
346 | * space, 0x80. |
347 | */ |
348 | #define INTEL_TD_SLOTS 0x0400 /* TOPDOWN.SLOTS */ |
349 | /* Level 1 metrics */ |
350 | #define INTEL_TD_METRIC_RETIRING 0x8000 /* Retiring metric */ |
351 | #define INTEL_TD_METRIC_BAD_SPEC 0x8100 /* Bad speculation metric */ |
352 | #define INTEL_TD_METRIC_FE_BOUND 0x8200 /* FE bound metric */ |
353 | #define INTEL_TD_METRIC_BE_BOUND 0x8300 /* BE bound metric */ |
354 | /* Level 2 metrics */ |
355 | #define INTEL_TD_METRIC_HEAVY_OPS 0x8400 /* Heavy Operations metric */ |
356 | #define INTEL_TD_METRIC_BR_MISPREDICT 0x8500 /* Branch Mispredict metric */ |
357 | #define INTEL_TD_METRIC_FETCH_LAT 0x8600 /* Fetch Latency metric */ |
358 | #define INTEL_TD_METRIC_MEM_BOUND 0x8700 /* Memory bound metric */ |
359 | |
360 | #define INTEL_TD_METRIC_MAX INTEL_TD_METRIC_MEM_BOUND |
361 | #define INTEL_TD_METRIC_NUM 8 |
362 | |
363 | static inline bool is_metric_idx(int idx) |
364 | { |
365 | return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM; |
366 | } |
367 | |
368 | static inline bool is_topdown_idx(int idx) |
369 | { |
370 | return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS; |
371 | } |
372 | |
373 | #define INTEL_PMC_OTHER_TOPDOWN_BITS(bit) \ |
374 | (~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN) |
375 | |
376 | #define GLOBAL_STATUS_COND_CHG BIT_ULL(63) |
377 | #define GLOBAL_STATUS_BUFFER_OVF_BIT 62 |
378 | #define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT) |
379 | #define GLOBAL_STATUS_UNC_OVF BIT_ULL(61) |
380 | #define GLOBAL_STATUS_ASIF BIT_ULL(60) |
381 | #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59) |
382 | #define GLOBAL_STATUS_LBRS_FROZEN_BIT 58 |
383 | #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT) |
384 | #define GLOBAL_STATUS_TRACE_TOPAPMI_BIT 55 |
385 | #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT) |
386 | #define GLOBAL_STATUS_PERF_METRICS_OVF_BIT 48 |
387 | |
388 | #define GLOBAL_CTRL_EN_PERF_METRICS 48 |
389 | /* |
390 | * We model guest LBR event tracing as another fixed-mode PMC like BTS. |
391 | * |
392 | * We choose bit 58 because it's used to indicate LBR stack frozen state |
393 | * for architectural perfmon v4, also we unconditionally mask that bit in |
394 | * the handle_pmi_common(), so it'll never be set in the overflow handling. |
395 | * |
396 | * With this fake counter assigned, the guest LBR event user (such as KVM), |
397 | * can program the LBR registers on its own, and we don't actually do anything |
398 | * with then in the host context. |
399 | */ |
400 | #define INTEL_PMC_IDX_FIXED_VLBR (GLOBAL_STATUS_LBRS_FROZEN_BIT) |
401 | |
402 | /* |
403 | * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b, |
404 | * since it would claim bit 58 which is effectively Fixed26. |
405 | */ |
406 | #define INTEL_FIXED_VLBR_EVENT 0x1b00 |
407 | |
408 | /* |
409 | * Adaptive PEBS v4 |
410 | */ |
411 | |
412 | struct pebs_basic { |
413 | u64 format_size; |
414 | u64 ip; |
415 | u64 applicable_counters; |
416 | u64 tsc; |
417 | }; |
418 | |
419 | struct pebs_meminfo { |
420 | u64 address; |
421 | u64 aux; |
422 | u64 latency; |
423 | u64 tsx_tuning; |
424 | }; |
425 | |
426 | struct pebs_gprs { |
427 | u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di; |
428 | u64 r8, r9, r10, r11, r12, r13, r14, r15; |
429 | }; |
430 | |
431 | struct pebs_xmm { |
432 | u64 xmm[16*2]; /* two entries for each register */ |
433 | }; |
434 | |
435 | /* |
436 | * AMD Extended Performance Monitoring and Debug cpuid feature detection |
437 | */ |
438 | #define EXT_PERFMON_DEBUG_FEATURES 0x80000022 |
439 | |
440 | /* |
441 | * IBS cpuid feature detection |
442 | */ |
443 | |
444 | #define IBS_CPUID_FEATURES 0x8000001b |
445 | |
446 | /* |
447 | * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but |
448 | * bit 0 is used to indicate the existence of IBS. |
449 | */ |
450 | #define IBS_CAPS_AVAIL (1U<<0) |
451 | #define IBS_CAPS_FETCHSAM (1U<<1) |
452 | #define IBS_CAPS_OPSAM (1U<<2) |
453 | #define IBS_CAPS_RDWROPCNT (1U<<3) |
454 | #define IBS_CAPS_OPCNT (1U<<4) |
455 | #define IBS_CAPS_BRNTRGT (1U<<5) |
456 | #define IBS_CAPS_OPCNTEXT (1U<<6) |
457 | #define IBS_CAPS_RIPINVALIDCHK (1U<<7) |
458 | #define IBS_CAPS_OPBRNFUSE (1U<<8) |
459 | #define IBS_CAPS_FETCHCTLEXTD (1U<<9) |
460 | #define IBS_CAPS_OPDATA4 (1U<<10) |
461 | #define IBS_CAPS_ZEN4 (1U<<11) |
462 | |
463 | #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ |
464 | | IBS_CAPS_FETCHSAM \ |
465 | | IBS_CAPS_OPSAM) |
466 | |
467 | /* |
468 | * IBS APIC setup |
469 | */ |
470 | #define IBSCTL 0x1cc |
471 | #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) |
472 | #define IBSCTL_LVT_OFFSET_MASK 0x0F |
473 | |
474 | /* IBS fetch bits/masks */ |
475 | #define IBS_FETCH_L3MISSONLY (1ULL<<59) |
476 | #define IBS_FETCH_RAND_EN (1ULL<<57) |
477 | #define IBS_FETCH_VAL (1ULL<<49) |
478 | #define IBS_FETCH_ENABLE (1ULL<<48) |
479 | #define IBS_FETCH_CNT 0xFFFF0000ULL |
480 | #define IBS_FETCH_MAX_CNT 0x0000FFFFULL |
481 | |
482 | /* |
483 | * IBS op bits/masks |
484 | * The lower 7 bits of the current count are random bits |
485 | * preloaded by hardware and ignored in software |
486 | */ |
487 | #define IBS_OP_CUR_CNT (0xFFF80ULL<<32) |
488 | #define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32) |
489 | #define IBS_OP_CNT_CTL (1ULL<<19) |
490 | #define IBS_OP_VAL (1ULL<<18) |
491 | #define IBS_OP_ENABLE (1ULL<<17) |
492 | #define IBS_OP_L3MISSONLY (1ULL<<16) |
493 | #define IBS_OP_MAX_CNT 0x0000FFFFULL |
494 | #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ |
495 | #define IBS_OP_MAX_CNT_EXT_MASK (0x7FULL<<20) /* separate upper 7 bits */ |
496 | #define IBS_RIP_INVALID (1ULL<<38) |
497 | |
498 | #ifdef CONFIG_X86_LOCAL_APIC |
499 | extern u32 get_ibs_caps(void); |
500 | extern int forward_event_to_ibs(struct perf_event *event); |
501 | #else |
502 | static inline u32 get_ibs_caps(void) { return 0; } |
503 | static inline int forward_event_to_ibs(struct perf_event *event) { return -ENOENT; } |
504 | #endif |
505 | |
506 | #ifdef CONFIG_PERF_EVENTS |
507 | extern void perf_events_lapic_init(void); |
508 | |
509 | /* |
510 | * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise |
511 | * unused and ABI specified to be 0, so nobody should care what we do with |
512 | * them. |
513 | * |
514 | * EXACT - the IP points to the exact instruction that triggered the |
515 | * event (HW bugs exempt). |
516 | * VM - original X86_VM_MASK; see set_linear_ip(). |
517 | */ |
518 | #define PERF_EFLAGS_EXACT (1UL << 3) |
519 | #define PERF_EFLAGS_VM (1UL << 5) |
520 | |
521 | struct pt_regs; |
522 | struct x86_perf_regs { |
523 | struct pt_regs regs; |
524 | u64 *xmm_regs; |
525 | }; |
526 | |
527 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); |
528 | extern unsigned long perf_misc_flags(struct pt_regs *regs); |
529 | #define perf_misc_flags(regs) perf_misc_flags(regs) |
530 | |
531 | #include <asm/stacktrace.h> |
532 | |
533 | /* |
534 | * We abuse bit 3 from flags to pass exact information, see perf_misc_flags |
535 | * and the comment with PERF_EFLAGS_EXACT. |
536 | */ |
537 | #define perf_arch_fetch_caller_regs(regs, __ip) { \ |
538 | (regs)->ip = (__ip); \ |
539 | (regs)->sp = (unsigned long)__builtin_frame_address(0); \ |
540 | (regs)->cs = __KERNEL_CS; \ |
541 | regs->flags = 0; \ |
542 | } |
543 | |
544 | struct perf_guest_switch_msr { |
545 | unsigned msr; |
546 | u64 host, guest; |
547 | }; |
548 | |
549 | struct x86_pmu_lbr { |
550 | unsigned int nr; |
551 | unsigned int from; |
552 | unsigned int to; |
553 | unsigned int info; |
554 | }; |
555 | |
556 | extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); |
557 | extern u64 perf_get_hw_event_config(int hw_event); |
558 | extern void perf_check_microcode(void); |
559 | extern void perf_clear_dirty_counters(void); |
560 | extern int x86_perf_rdpmc_index(struct perf_event *event); |
561 | #else |
562 | static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) |
563 | { |
564 | memset(cap, 0, sizeof(*cap)); |
565 | } |
566 | |
567 | static inline u64 perf_get_hw_event_config(int hw_event) |
568 | { |
569 | return 0; |
570 | } |
571 | |
572 | static inline void perf_events_lapic_init(void) { } |
573 | static inline void perf_check_microcode(void) { } |
574 | #endif |
575 | |
576 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) |
577 | extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data); |
578 | extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr); |
579 | #else |
580 | struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data); |
581 | static inline void x86_perf_get_lbr(struct x86_pmu_lbr *lbr) |
582 | { |
583 | memset(lbr, 0, sizeof(*lbr)); |
584 | } |
585 | #endif |
586 | |
587 | #ifdef CONFIG_CPU_SUP_INTEL |
588 | extern void intel_pt_handle_vmx(int on); |
589 | #else |
590 | static inline void intel_pt_handle_vmx(int on) |
591 | { |
592 | |
593 | } |
594 | #endif |
595 | |
596 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) |
597 | extern void amd_pmu_enable_virt(void); |
598 | extern void amd_pmu_disable_virt(void); |
599 | |
600 | #if defined(CONFIG_PERF_EVENTS_AMD_BRS) |
601 | |
602 | #define PERF_NEEDS_LOPWR_CB 1 |
603 | |
604 | /* |
605 | * architectural low power callback impacts |
606 | * drivers/acpi/processor_idle.c |
607 | * drivers/acpi/acpi_pad.c |
608 | */ |
609 | extern void perf_amd_brs_lopwr_cb(bool lopwr_in); |
610 | |
611 | DECLARE_STATIC_CALL(perf_lopwr_cb, perf_amd_brs_lopwr_cb); |
612 | |
613 | static __always_inline void perf_lopwr_cb(bool lopwr_in) |
614 | { |
615 | static_call_mod(perf_lopwr_cb)(lopwr_in); |
616 | } |
617 | |
618 | #endif /* PERF_NEEDS_LOPWR_CB */ |
619 | |
620 | #else |
621 | static inline void amd_pmu_enable_virt(void) { } |
622 | static inline void amd_pmu_disable_virt(void) { } |
623 | #endif |
624 | |
625 | #define arch_perf_out_copy_user copy_from_user_nmi |
626 | |
627 | #endif /* _ASM_X86_PERF_EVENT_H */ |
628 | |