1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * KVM PMU support for Intel CPUs |
4 | * |
5 | * Copyright 2011 Red Hat, Inc. and/or its affiliates. |
6 | * |
7 | * Authors: |
8 | * Avi Kivity <avi@redhat.com> |
9 | * Gleb Natapov <gleb@redhat.com> |
10 | */ |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
12 | |
13 | #include <linux/types.h> |
14 | #include <linux/kvm_host.h> |
15 | #include <linux/perf_event.h> |
16 | #include <asm/perf_event.h> |
17 | #include "x86.h" |
18 | #include "cpuid.h" |
19 | #include "lapic.h" |
20 | #include "nested.h" |
21 | #include "pmu.h" |
22 | |
23 | /* |
24 | * Perf's "BASE" is wildly misleading, architectural PMUs use bits 31:16 of ECX |
25 | * to encode the "type" of counter to read, i.e. this is not a "base". And to |
26 | * further confuse things, non-architectural PMUs use bit 31 as a flag for |
27 | * "fast" reads, whereas the "type" is an explicit value. |
28 | */ |
29 | #define INTEL_RDPMC_GP 0 |
30 | #define INTEL_RDPMC_FIXED INTEL_PMC_FIXED_RDPMC_BASE |
31 | |
32 | #define INTEL_RDPMC_TYPE_MASK GENMASK(31, 16) |
33 | #define INTEL_RDPMC_INDEX_MASK GENMASK(15, 0) |
34 | |
35 | #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0) |
36 | |
37 | static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) |
38 | { |
39 | struct kvm_pmc *pmc; |
40 | u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl; |
41 | int i; |
42 | |
43 | pmu->fixed_ctr_ctrl = data; |
44 | for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { |
45 | u8 new_ctrl = fixed_ctrl_field(data, i); |
46 | u8 old_ctrl = fixed_ctrl_field(old_fixed_ctr_ctrl, i); |
47 | |
48 | if (old_ctrl == new_ctrl) |
49 | continue; |
50 | |
51 | pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); |
52 | |
53 | __set_bit(KVM_FIXED_PMC_BASE_IDX + i, pmu->pmc_in_use); |
54 | kvm_pmu_request_counter_reprogram(pmc); |
55 | } |
56 | } |
57 | |
58 | static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, |
59 | unsigned int idx, u64 *mask) |
60 | { |
61 | unsigned int type = idx & INTEL_RDPMC_TYPE_MASK; |
62 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
63 | struct kvm_pmc *counters; |
64 | unsigned int num_counters; |
65 | u64 bitmask; |
66 | |
67 | /* |
68 | * The encoding of ECX for RDPMC is different for architectural versus |
69 | * non-architecturals PMUs (PMUs with version '0'). For architectural |
70 | * PMUs, bits 31:16 specify the PMC type and bits 15:0 specify the PMC |
71 | * index. For non-architectural PMUs, bit 31 is a "fast" flag, and |
72 | * bits 30:0 specify the PMC index. |
73 | * |
74 | * Yell and reject attempts to read PMCs for a non-architectural PMU, |
75 | * as KVM doesn't support such PMUs. |
76 | */ |
77 | if (WARN_ON_ONCE(!pmu->version)) |
78 | return NULL; |
79 | |
80 | /* |
81 | * General Purpose (GP) PMCs are supported on all PMUs, and fixed PMCs |
82 | * are supported on all architectural PMUs, i.e. on all virtual PMUs |
83 | * supported by KVM. Note, KVM only emulates fixed PMCs for PMU v2+, |
84 | * but the type itself is still valid, i.e. let RDPMC fail due to |
85 | * accessing a non-existent counter. Reject attempts to read all other |
86 | * types, which are unknown/unsupported. |
87 | */ |
88 | switch (type) { |
89 | case INTEL_RDPMC_FIXED: |
90 | counters = pmu->fixed_counters; |
91 | num_counters = pmu->nr_arch_fixed_counters; |
92 | bitmask = pmu->counter_bitmask[KVM_PMC_FIXED]; |
93 | break; |
94 | case INTEL_RDPMC_GP: |
95 | counters = pmu->gp_counters; |
96 | num_counters = pmu->nr_arch_gp_counters; |
97 | bitmask = pmu->counter_bitmask[KVM_PMC_GP]; |
98 | break; |
99 | default: |
100 | return NULL; |
101 | } |
102 | |
103 | idx &= INTEL_RDPMC_INDEX_MASK; |
104 | if (idx >= num_counters) |
105 | return NULL; |
106 | |
107 | *mask &= bitmask; |
108 | return &counters[array_index_nospec(idx, num_counters)]; |
109 | } |
110 | |
111 | static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu) |
112 | { |
113 | if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) |
114 | return 0; |
115 | |
116 | return vcpu->arch.perf_capabilities; |
117 | } |
118 | |
119 | static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu) |
120 | { |
121 | return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0; |
122 | } |
123 | |
124 | static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) |
125 | { |
126 | if (!fw_writes_is_enabled(pmu_to_vcpu(pmu))) |
127 | return NULL; |
128 | |
129 | return get_gp_pmc(pmu, msr, MSR_IA32_PMC0); |
130 | } |
131 | |
132 | static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index) |
133 | { |
134 | struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu); |
135 | bool ret = false; |
136 | |
137 | if (!intel_pmu_lbr_is_enabled(vcpu)) |
138 | return ret; |
139 | |
140 | ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) || |
141 | (index >= records->from && index < records->from + records->nr) || |
142 | (index >= records->to && index < records->to + records->nr); |
143 | |
144 | if (!ret && records->info) |
145 | ret = (index >= records->info && index < records->info + records->nr); |
146 | |
147 | return ret; |
148 | } |
149 | |
150 | static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) |
151 | { |
152 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
153 | u64 perf_capabilities; |
154 | int ret; |
155 | |
156 | switch (msr) { |
157 | case MSR_CORE_PERF_FIXED_CTR_CTRL: |
158 | return kvm_pmu_has_perf_global_ctrl(pmu); |
159 | case MSR_IA32_PEBS_ENABLE: |
160 | ret = vcpu_get_perf_capabilities(vcpu) & PERF_CAP_PEBS_FORMAT; |
161 | break; |
162 | case MSR_IA32_DS_AREA: |
163 | ret = guest_cpuid_has(vcpu, X86_FEATURE_DS); |
164 | break; |
165 | case MSR_PEBS_DATA_CFG: |
166 | perf_capabilities = vcpu_get_perf_capabilities(vcpu); |
167 | ret = (perf_capabilities & PERF_CAP_PEBS_BASELINE) && |
168 | ((perf_capabilities & PERF_CAP_PEBS_FORMAT) > 3); |
169 | break; |
170 | default: |
171 | ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || |
172 | get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || |
173 | get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) || |
174 | intel_pmu_is_valid_lbr_msr(vcpu, index: msr); |
175 | break; |
176 | } |
177 | |
178 | return ret; |
179 | } |
180 | |
181 | static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) |
182 | { |
183 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
184 | struct kvm_pmc *pmc; |
185 | |
186 | pmc = get_fixed_pmc(pmu, msr); |
187 | pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0); |
188 | pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0); |
189 | |
190 | return pmc; |
191 | } |
192 | |
193 | static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu) |
194 | { |
195 | struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); |
196 | |
197 | if (lbr_desc->event) { |
198 | perf_event_release_kernel(event: lbr_desc->event); |
199 | lbr_desc->event = NULL; |
200 | vcpu_to_pmu(vcpu)->event_count--; |
201 | } |
202 | } |
203 | |
204 | int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu) |
205 | { |
206 | struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); |
207 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
208 | struct perf_event *event; |
209 | |
210 | /* |
211 | * The perf_event_attr is constructed in the minimum efficient way: |
212 | * - set 'pinned = true' to make it task pinned so that if another |
213 | * cpu pinned event reclaims LBR, the event->oncpu will be set to -1; |
214 | * - set '.exclude_host = true' to record guest branches behavior; |
215 | * |
216 | * - set '.config = INTEL_FIXED_VLBR_EVENT' to indicates host perf |
217 | * schedule the event without a real HW counter but a fake one; |
218 | * check is_guest_lbr_event() and __intel_get_event_constraints(); |
219 | * |
220 | * - set 'sample_type = PERF_SAMPLE_BRANCH_STACK' and |
221 | * 'branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK | |
222 | * PERF_SAMPLE_BRANCH_USER' to configure it as a LBR callstack |
223 | * event, which helps KVM to save/restore guest LBR records |
224 | * during host context switches and reduces quite a lot overhead, |
225 | * check branch_user_callstack() and intel_pmu_lbr_sched_task(); |
226 | */ |
227 | struct perf_event_attr attr = { |
228 | .type = PERF_TYPE_RAW, |
229 | .size = sizeof(attr), |
230 | .config = INTEL_FIXED_VLBR_EVENT, |
231 | .sample_type = PERF_SAMPLE_BRANCH_STACK, |
232 | .pinned = true, |
233 | .exclude_host = true, |
234 | .branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK | |
235 | PERF_SAMPLE_BRANCH_USER, |
236 | }; |
237 | |
238 | if (unlikely(lbr_desc->event)) { |
239 | __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); |
240 | return 0; |
241 | } |
242 | |
243 | event = perf_event_create_kernel_counter(attr: &attr, cpu: -1, |
244 | current, NULL, NULL); |
245 | if (IS_ERR(ptr: event)) { |
246 | pr_debug_ratelimited("%s: failed %ld\n" , |
247 | __func__, PTR_ERR(event)); |
248 | return PTR_ERR(ptr: event); |
249 | } |
250 | lbr_desc->event = event; |
251 | pmu->event_count++; |
252 | __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); |
253 | return 0; |
254 | } |
255 | |
256 | /* |
257 | * It's safe to access LBR msrs from guest when they have not |
258 | * been passthrough since the host would help restore or reset |
259 | * the LBR msrs records when the guest LBR event is scheduled in. |
260 | */ |
261 | static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu, |
262 | struct msr_data *msr_info, bool read) |
263 | { |
264 | struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); |
265 | u32 index = msr_info->index; |
266 | |
267 | if (!intel_pmu_is_valid_lbr_msr(vcpu, index)) |
268 | return false; |
269 | |
270 | if (!lbr_desc->event && intel_pmu_create_guest_lbr_event(vcpu) < 0) |
271 | goto dummy; |
272 | |
273 | /* |
274 | * Disable irq to ensure the LBR feature doesn't get reclaimed by the |
275 | * host at the time the value is read from the msr, and this avoids the |
276 | * host LBR value to be leaked to the guest. If LBR has been reclaimed, |
277 | * return 0 on guest reads. |
278 | */ |
279 | local_irq_disable(); |
280 | if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) { |
281 | if (read) |
282 | rdmsrl(index, msr_info->data); |
283 | else |
284 | wrmsrl(msr: index, val: msr_info->data); |
285 | __set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use); |
286 | local_irq_enable(); |
287 | return true; |
288 | } |
289 | clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use); |
290 | local_irq_enable(); |
291 | |
292 | dummy: |
293 | if (read) |
294 | msr_info->data = 0; |
295 | return true; |
296 | } |
297 | |
298 | static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
299 | { |
300 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
301 | struct kvm_pmc *pmc; |
302 | u32 msr = msr_info->index; |
303 | |
304 | switch (msr) { |
305 | case MSR_CORE_PERF_FIXED_CTR_CTRL: |
306 | msr_info->data = pmu->fixed_ctr_ctrl; |
307 | break; |
308 | case MSR_IA32_PEBS_ENABLE: |
309 | msr_info->data = pmu->pebs_enable; |
310 | break; |
311 | case MSR_IA32_DS_AREA: |
312 | msr_info->data = pmu->ds_area; |
313 | break; |
314 | case MSR_PEBS_DATA_CFG: |
315 | msr_info->data = pmu->pebs_data_cfg; |
316 | break; |
317 | default: |
318 | if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || |
319 | (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { |
320 | u64 val = pmc_read_counter(pmc); |
321 | msr_info->data = |
322 | val & pmu->counter_bitmask[KVM_PMC_GP]; |
323 | break; |
324 | } else if ((pmc = get_fixed_pmc(pmu, msr))) { |
325 | u64 val = pmc_read_counter(pmc); |
326 | msr_info->data = |
327 | val & pmu->counter_bitmask[KVM_PMC_FIXED]; |
328 | break; |
329 | } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { |
330 | msr_info->data = pmc->eventsel; |
331 | break; |
332 | } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, read: true)) { |
333 | break; |
334 | } |
335 | return 1; |
336 | } |
337 | |
338 | return 0; |
339 | } |
340 | |
341 | static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
342 | { |
343 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
344 | struct kvm_pmc *pmc; |
345 | u32 msr = msr_info->index; |
346 | u64 data = msr_info->data; |
347 | u64 reserved_bits, diff; |
348 | |
349 | switch (msr) { |
350 | case MSR_CORE_PERF_FIXED_CTR_CTRL: |
351 | if (data & pmu->fixed_ctr_ctrl_mask) |
352 | return 1; |
353 | |
354 | if (pmu->fixed_ctr_ctrl != data) |
355 | reprogram_fixed_counters(pmu, data); |
356 | break; |
357 | case MSR_IA32_PEBS_ENABLE: |
358 | if (data & pmu->pebs_enable_mask) |
359 | return 1; |
360 | |
361 | if (pmu->pebs_enable != data) { |
362 | diff = pmu->pebs_enable ^ data; |
363 | pmu->pebs_enable = data; |
364 | reprogram_counters(pmu, diff); |
365 | } |
366 | break; |
367 | case MSR_IA32_DS_AREA: |
368 | if (is_noncanonical_address(la: data, vcpu)) |
369 | return 1; |
370 | |
371 | pmu->ds_area = data; |
372 | break; |
373 | case MSR_PEBS_DATA_CFG: |
374 | if (data & pmu->pebs_data_cfg_mask) |
375 | return 1; |
376 | |
377 | pmu->pebs_data_cfg = data; |
378 | break; |
379 | default: |
380 | if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || |
381 | (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { |
382 | if ((msr & MSR_PMC_FULL_WIDTH_BIT) && |
383 | (data & ~pmu->counter_bitmask[KVM_PMC_GP])) |
384 | return 1; |
385 | |
386 | if (!msr_info->host_initiated && |
387 | !(msr & MSR_PMC_FULL_WIDTH_BIT)) |
388 | data = (s64)(s32)data; |
389 | pmc_write_counter(pmc, val: data); |
390 | break; |
391 | } else if ((pmc = get_fixed_pmc(pmu, msr))) { |
392 | pmc_write_counter(pmc, val: data); |
393 | break; |
394 | } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { |
395 | reserved_bits = pmu->reserved_bits; |
396 | if ((pmc->idx == 2) && |
397 | (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED)) |
398 | reserved_bits ^= HSW_IN_TX_CHECKPOINTED; |
399 | if (data & reserved_bits) |
400 | return 1; |
401 | |
402 | if (data != pmc->eventsel) { |
403 | pmc->eventsel = data; |
404 | kvm_pmu_request_counter_reprogram(pmc); |
405 | } |
406 | break; |
407 | } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, read: false)) { |
408 | break; |
409 | } |
410 | /* Not a known PMU MSR. */ |
411 | return 1; |
412 | } |
413 | |
414 | return 0; |
415 | } |
416 | |
417 | /* |
418 | * Map fixed counter events to architectural general purpose event encodings. |
419 | * Perf doesn't provide APIs to allow KVM to directly program a fixed counter, |
420 | * and so KVM instead programs the architectural event to effectively request |
421 | * the fixed counter. Perf isn't guaranteed to use a fixed counter and may |
422 | * instead program the encoding into a general purpose counter, e.g. if a |
423 | * different perf_event is already utilizing the requested counter, but the end |
424 | * result is the same (ignoring the fact that using a general purpose counter |
425 | * will likely exacerbate counter contention). |
426 | * |
427 | * Forcibly inlined to allow asserting on @index at build time, and there should |
428 | * never be more than one user. |
429 | */ |
430 | static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index) |
431 | { |
432 | const enum perf_hw_id fixed_pmc_perf_ids[] = { |
433 | [0] = PERF_COUNT_HW_INSTRUCTIONS, |
434 | [1] = PERF_COUNT_HW_CPU_CYCLES, |
435 | [2] = PERF_COUNT_HW_REF_CPU_CYCLES, |
436 | }; |
437 | u64 eventsel; |
438 | |
439 | BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_perf_ids) != KVM_PMC_MAX_FIXED); |
440 | BUILD_BUG_ON(index >= KVM_PMC_MAX_FIXED); |
441 | |
442 | /* |
443 | * Yell if perf reports support for a fixed counter but perf doesn't |
444 | * have a known encoding for the associated general purpose event. |
445 | */ |
446 | eventsel = perf_get_hw_event_config(hw_event: fixed_pmc_perf_ids[index]); |
447 | WARN_ON_ONCE(!eventsel && index < kvm_pmu_cap.num_counters_fixed); |
448 | return eventsel; |
449 | } |
450 | |
451 | static void intel_pmu_refresh(struct kvm_vcpu *vcpu) |
452 | { |
453 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
454 | struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); |
455 | struct kvm_cpuid_entry2 *entry; |
456 | union cpuid10_eax eax; |
457 | union cpuid10_edx edx; |
458 | u64 perf_capabilities; |
459 | u64 counter_mask; |
460 | int i; |
461 | |
462 | memset(&lbr_desc->records, 0, sizeof(lbr_desc->records)); |
463 | |
464 | /* |
465 | * Setting passthrough of LBR MSRs is done only in the VM-Entry loop, |
466 | * and PMU refresh is disallowed after the vCPU has run, i.e. this code |
467 | * should never be reached while KVM is passing through MSRs. |
468 | */ |
469 | if (KVM_BUG_ON(lbr_desc->msr_passthrough, vcpu->kvm)) |
470 | return; |
471 | |
472 | entry = kvm_find_cpuid_entry(vcpu, function: 0xa); |
473 | if (!entry) |
474 | return; |
475 | |
476 | eax.full = entry->eax; |
477 | edx.full = entry->edx; |
478 | |
479 | pmu->version = eax.split.version_id; |
480 | if (!pmu->version) |
481 | return; |
482 | |
483 | pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, |
484 | kvm_pmu_cap.num_counters_gp); |
485 | eax.split.bit_width = min_t(int, eax.split.bit_width, |
486 | kvm_pmu_cap.bit_width_gp); |
487 | pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; |
488 | eax.split.mask_length = min_t(int, eax.split.mask_length, |
489 | kvm_pmu_cap.events_mask_len); |
490 | pmu->available_event_types = ~entry->ebx & |
491 | ((1ull << eax.split.mask_length) - 1); |
492 | |
493 | if (pmu->version == 1) { |
494 | pmu->nr_arch_fixed_counters = 0; |
495 | } else { |
496 | pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed, |
497 | kvm_pmu_cap.num_counters_fixed); |
498 | edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed, |
499 | kvm_pmu_cap.bit_width_fixed); |
500 | pmu->counter_bitmask[KVM_PMC_FIXED] = |
501 | ((u64)1 << edx.split.bit_width_fixed) - 1; |
502 | } |
503 | |
504 | for (i = 0; i < pmu->nr_arch_fixed_counters; i++) |
505 | pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4)); |
506 | counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) | |
507 | (((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX)); |
508 | pmu->global_ctrl_mask = counter_mask; |
509 | |
510 | /* |
511 | * GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET) |
512 | * share reserved bit definitions. The kernel just happens to use |
513 | * OVF_CTRL for the names. |
514 | */ |
515 | pmu->global_status_mask = pmu->global_ctrl_mask |
516 | & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF | |
517 | MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD); |
518 | if (vmx_pt_mode_is_host_guest()) |
519 | pmu->global_status_mask &= |
520 | ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI; |
521 | |
522 | entry = kvm_find_cpuid_entry_index(vcpu, function: 7, index: 0); |
523 | if (entry && |
524 | (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) && |
525 | (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) { |
526 | pmu->reserved_bits ^= HSW_IN_TX; |
527 | pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); |
528 | } |
529 | |
530 | bitmap_set(map: pmu->all_valid_pmc_idx, |
531 | start: 0, nbits: pmu->nr_arch_gp_counters); |
532 | bitmap_set(map: pmu->all_valid_pmc_idx, |
533 | INTEL_PMC_MAX_GENERIC, nbits: pmu->nr_arch_fixed_counters); |
534 | |
535 | perf_capabilities = vcpu_get_perf_capabilities(vcpu); |
536 | if (cpuid_model_is_consistent(vcpu) && |
537 | (perf_capabilities & PMU_CAP_LBR_FMT)) |
538 | memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps)); |
539 | else |
540 | lbr_desc->records.nr = 0; |
541 | |
542 | if (lbr_desc->records.nr) |
543 | bitmap_set(map: pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, nbits: 1); |
544 | |
545 | if (perf_capabilities & PERF_CAP_PEBS_FORMAT) { |
546 | if (perf_capabilities & PERF_CAP_PEBS_BASELINE) { |
547 | pmu->pebs_enable_mask = counter_mask; |
548 | pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE; |
549 | for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { |
550 | pmu->fixed_ctr_ctrl_mask &= |
551 | ~(1ULL << (KVM_FIXED_PMC_BASE_IDX + i * 4)); |
552 | } |
553 | pmu->pebs_data_cfg_mask = ~0xff00000full; |
554 | } else { |
555 | pmu->pebs_enable_mask = |
556 | ~((1ull << pmu->nr_arch_gp_counters) - 1); |
557 | } |
558 | } |
559 | } |
560 | |
561 | static void intel_pmu_init(struct kvm_vcpu *vcpu) |
562 | { |
563 | int i; |
564 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
565 | struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); |
566 | |
567 | for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) { |
568 | pmu->gp_counters[i].type = KVM_PMC_GP; |
569 | pmu->gp_counters[i].vcpu = vcpu; |
570 | pmu->gp_counters[i].idx = i; |
571 | pmu->gp_counters[i].current_config = 0; |
572 | } |
573 | |
574 | for (i = 0; i < KVM_PMC_MAX_FIXED; i++) { |
575 | pmu->fixed_counters[i].type = KVM_PMC_FIXED; |
576 | pmu->fixed_counters[i].vcpu = vcpu; |
577 | pmu->fixed_counters[i].idx = i + KVM_FIXED_PMC_BASE_IDX; |
578 | pmu->fixed_counters[i].current_config = 0; |
579 | pmu->fixed_counters[i].eventsel = intel_get_fixed_pmc_eventsel(index: i); |
580 | } |
581 | |
582 | lbr_desc->records.nr = 0; |
583 | lbr_desc->event = NULL; |
584 | lbr_desc->msr_passthrough = false; |
585 | } |
586 | |
587 | static void intel_pmu_reset(struct kvm_vcpu *vcpu) |
588 | { |
589 | intel_pmu_release_guest_lbr_event(vcpu); |
590 | } |
591 | |
592 | /* |
593 | * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4. |
594 | * |
595 | * If Freeze_LBR_On_PMI = 1, the LBR is frozen on PMI and |
596 | * the KVM emulates to clear the LBR bit (bit 0) in IA32_DEBUGCTL. |
597 | * |
598 | * Guest needs to re-enable LBR to resume branches recording. |
599 | */ |
600 | static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu) |
601 | { |
602 | u64 data = vmcs_read64(field: GUEST_IA32_DEBUGCTL); |
603 | |
604 | if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) { |
605 | data &= ~DEBUGCTLMSR_LBR; |
606 | vmcs_write64(field: GUEST_IA32_DEBUGCTL, value: data); |
607 | } |
608 | } |
609 | |
610 | static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu) |
611 | { |
612 | u8 version = vcpu_to_pmu(vcpu)->version; |
613 | |
614 | if (!intel_pmu_lbr_is_enabled(vcpu)) |
615 | return; |
616 | |
617 | if (version > 1 && version < 4) |
618 | intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu); |
619 | } |
620 | |
621 | static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set) |
622 | { |
623 | struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu); |
624 | int i; |
625 | |
626 | for (i = 0; i < lbr->nr; i++) { |
627 | vmx_set_intercept_for_msr(vcpu, msr: lbr->from + i, MSR_TYPE_RW, value: set); |
628 | vmx_set_intercept_for_msr(vcpu, msr: lbr->to + i, MSR_TYPE_RW, value: set); |
629 | if (lbr->info) |
630 | vmx_set_intercept_for_msr(vcpu, msr: lbr->info + i, MSR_TYPE_RW, value: set); |
631 | } |
632 | |
633 | vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, value: set); |
634 | vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, value: set); |
635 | } |
636 | |
637 | static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu) |
638 | { |
639 | struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); |
640 | |
641 | if (!lbr_desc->msr_passthrough) |
642 | return; |
643 | |
644 | vmx_update_intercept_for_lbr_msrs(vcpu, set: true); |
645 | lbr_desc->msr_passthrough = false; |
646 | } |
647 | |
648 | static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu) |
649 | { |
650 | struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); |
651 | |
652 | if (lbr_desc->msr_passthrough) |
653 | return; |
654 | |
655 | vmx_update_intercept_for_lbr_msrs(vcpu, set: false); |
656 | lbr_desc->msr_passthrough = true; |
657 | } |
658 | |
659 | /* |
660 | * Higher priority host perf events (e.g. cpu pinned) could reclaim the |
661 | * pmu resources (e.g. LBR) that were assigned to the guest. This is |
662 | * usually done via ipi calls (more details in perf_install_in_context). |
663 | * |
664 | * Before entering the non-root mode (with irq disabled here), double |
665 | * confirm that the pmu features enabled to the guest are not reclaimed |
666 | * by higher priority host events. Otherwise, disallow vcpu's access to |
667 | * the reclaimed features. |
668 | */ |
669 | void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu) |
670 | { |
671 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
672 | struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); |
673 | |
674 | if (!lbr_desc->event) { |
675 | vmx_disable_lbr_msrs_passthrough(vcpu); |
676 | if (vmcs_read64(field: GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR) |
677 | goto warn; |
678 | if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use)) |
679 | goto warn; |
680 | return; |
681 | } |
682 | |
683 | if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) { |
684 | vmx_disable_lbr_msrs_passthrough(vcpu); |
685 | __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); |
686 | goto warn; |
687 | } else |
688 | vmx_enable_lbr_msrs_passthrough(vcpu); |
689 | |
690 | return; |
691 | |
692 | warn: |
693 | pr_warn_ratelimited("vcpu-%d: fail to passthrough LBR.\n" , vcpu->vcpu_id); |
694 | } |
695 | |
696 | static void intel_pmu_cleanup(struct kvm_vcpu *vcpu) |
697 | { |
698 | if (!(vmcs_read64(field: GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)) |
699 | intel_pmu_release_guest_lbr_event(vcpu); |
700 | } |
701 | |
702 | void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu) |
703 | { |
704 | struct kvm_pmc *pmc = NULL; |
705 | int bit, hw_idx; |
706 | |
707 | kvm_for_each_pmc(pmu, pmc, bit, (unsigned long *)&pmu->global_ctrl) { |
708 | if (!pmc_speculative_in_use(pmc) || |
709 | !pmc_is_globally_enabled(pmc) || !pmc->perf_event) |
710 | continue; |
711 | |
712 | /* |
713 | * A negative index indicates the event isn't mapped to a |
714 | * physical counter in the host, e.g. due to contention. |
715 | */ |
716 | hw_idx = pmc->perf_event->hw.idx; |
717 | if (hw_idx != pmc->idx && hw_idx > -1) |
718 | pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx); |
719 | } |
720 | } |
721 | |
722 | struct kvm_pmu_ops intel_pmu_ops __initdata = { |
723 | .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc, |
724 | .msr_idx_to_pmc = intel_msr_idx_to_pmc, |
725 | .is_valid_msr = intel_is_valid_msr, |
726 | .get_msr = intel_pmu_get_msr, |
727 | .set_msr = intel_pmu_set_msr, |
728 | .refresh = intel_pmu_refresh, |
729 | .init = intel_pmu_init, |
730 | .reset = intel_pmu_reset, |
731 | .deliver_pmi = intel_pmu_deliver_pmi, |
732 | .cleanup = intel_pmu_cleanup, |
733 | .EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT, |
734 | .MAX_NR_GP_COUNTERS = KVM_INTEL_PMC_MAX_GENERIC, |
735 | .MIN_NR_GP_COUNTERS = 1, |
736 | }; |
737 | |