1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/types.h> |
3 | #include <linux/interrupt.h> |
4 | |
5 | #include <asm/xen/hypercall.h> |
6 | #include <xen/xen.h> |
7 | #include <xen/page.h> |
8 | #include <xen/interface/xen.h> |
9 | #include <xen/interface/vcpu.h> |
10 | #include <xen/interface/xenpmu.h> |
11 | |
12 | #include "xen-ops.h" |
13 | #include "pmu.h" |
14 | |
15 | /* x86_pmu.handle_irq definition */ |
16 | #include "../events/perf_event.h" |
17 | |
18 | #define XENPMU_IRQ_PROCESSING 1 |
19 | struct xenpmu { |
20 | /* Shared page between hypervisor and domain */ |
21 | struct xen_pmu_data *xenpmu_data; |
22 | |
23 | uint8_t flags; |
24 | }; |
25 | static DEFINE_PER_CPU(struct xenpmu, xenpmu_shared); |
26 | #define get_xenpmu_data() (this_cpu_ptr(&xenpmu_shared)->xenpmu_data) |
27 | #define get_xenpmu_flags() (this_cpu_ptr(&xenpmu_shared)->flags) |
28 | |
29 | /* Macro for computing address of a PMU MSR bank */ |
30 | #define field_offset(ctxt, field) ((void *)((uintptr_t)ctxt + \ |
31 | (uintptr_t)ctxt->field)) |
32 | |
33 | /* AMD PMU */ |
34 | #define F15H_NUM_COUNTERS 6 |
35 | #define F10H_NUM_COUNTERS 4 |
36 | |
37 | static __read_mostly uint32_t amd_counters_base; |
38 | static __read_mostly uint32_t amd_ctrls_base; |
39 | static __read_mostly int amd_msr_step; |
40 | static __read_mostly int k7_counters_mirrored; |
41 | static __read_mostly int amd_num_counters; |
42 | |
43 | /* Intel PMU */ |
44 | #define MSR_TYPE_COUNTER 0 |
45 | #define MSR_TYPE_CTRL 1 |
46 | #define MSR_TYPE_GLOBAL 2 |
47 | #define MSR_TYPE_ARCH_COUNTER 3 |
48 | #define MSR_TYPE_ARCH_CTRL 4 |
49 | |
50 | /* Number of general pmu registers (CPUID.EAX[0xa].EAX[8..15]) */ |
51 | #define PMU_GENERAL_NR_SHIFT 8 |
52 | #define PMU_GENERAL_NR_BITS 8 |
53 | #define PMU_GENERAL_NR_MASK (((1 << PMU_GENERAL_NR_BITS) - 1) \ |
54 | << PMU_GENERAL_NR_SHIFT) |
55 | |
56 | /* Number of fixed pmu registers (CPUID.EDX[0xa].EDX[0..4]) */ |
57 | #define PMU_FIXED_NR_SHIFT 0 |
58 | #define PMU_FIXED_NR_BITS 5 |
59 | #define PMU_FIXED_NR_MASK (((1 << PMU_FIXED_NR_BITS) - 1) \ |
60 | << PMU_FIXED_NR_SHIFT) |
61 | |
62 | /* Alias registers (0x4c1) for full-width writes to PMCs */ |
63 | #define MSR_PMC_ALIAS_MASK (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_PMC0)) |
64 | |
65 | #define INTEL_PMC_TYPE_SHIFT 30 |
66 | |
67 | static __read_mostly int intel_num_arch_counters, intel_num_fixed_counters; |
68 | |
69 | |
70 | static void xen_pmu_arch_init(void) |
71 | { |
72 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { |
73 | |
74 | switch (boot_cpu_data.x86) { |
75 | case 0x15: |
76 | amd_num_counters = F15H_NUM_COUNTERS; |
77 | amd_counters_base = MSR_F15H_PERF_CTR; |
78 | amd_ctrls_base = MSR_F15H_PERF_CTL; |
79 | amd_msr_step = 2; |
80 | k7_counters_mirrored = 1; |
81 | break; |
82 | case 0x10: |
83 | case 0x12: |
84 | case 0x14: |
85 | case 0x16: |
86 | default: |
87 | amd_num_counters = F10H_NUM_COUNTERS; |
88 | amd_counters_base = MSR_K7_PERFCTR0; |
89 | amd_ctrls_base = MSR_K7_EVNTSEL0; |
90 | amd_msr_step = 1; |
91 | k7_counters_mirrored = 0; |
92 | break; |
93 | } |
94 | } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { |
95 | amd_num_counters = F10H_NUM_COUNTERS; |
96 | amd_counters_base = MSR_K7_PERFCTR0; |
97 | amd_ctrls_base = MSR_K7_EVNTSEL0; |
98 | amd_msr_step = 1; |
99 | k7_counters_mirrored = 0; |
100 | } else { |
101 | uint32_t eax, ebx, ecx, edx; |
102 | |
103 | cpuid(op: 0xa, eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); |
104 | |
105 | intel_num_arch_counters = (eax & PMU_GENERAL_NR_MASK) >> |
106 | PMU_GENERAL_NR_SHIFT; |
107 | intel_num_fixed_counters = (edx & PMU_FIXED_NR_MASK) >> |
108 | PMU_FIXED_NR_SHIFT; |
109 | } |
110 | } |
111 | |
112 | static inline uint32_t get_fam15h_addr(u32 addr) |
113 | { |
114 | switch (addr) { |
115 | case MSR_K7_PERFCTR0: |
116 | case MSR_K7_PERFCTR1: |
117 | case MSR_K7_PERFCTR2: |
118 | case MSR_K7_PERFCTR3: |
119 | return MSR_F15H_PERF_CTR + (addr - MSR_K7_PERFCTR0); |
120 | case MSR_K7_EVNTSEL0: |
121 | case MSR_K7_EVNTSEL1: |
122 | case MSR_K7_EVNTSEL2: |
123 | case MSR_K7_EVNTSEL3: |
124 | return MSR_F15H_PERF_CTL + (addr - MSR_K7_EVNTSEL0); |
125 | default: |
126 | break; |
127 | } |
128 | |
129 | return addr; |
130 | } |
131 | |
132 | static inline bool is_amd_pmu_msr(unsigned int msr) |
133 | { |
134 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
135 | boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) |
136 | return false; |
137 | |
138 | if ((msr >= MSR_F15H_PERF_CTL && |
139 | msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) || |
140 | (msr >= MSR_K7_EVNTSEL0 && |
141 | msr < MSR_K7_PERFCTR0 + amd_num_counters)) |
142 | return true; |
143 | |
144 | return false; |
145 | } |
146 | |
147 | static bool is_intel_pmu_msr(u32 msr_index, int *type, int *index) |
148 | { |
149 | u32 msr_index_pmc; |
150 | |
151 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && |
152 | boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR && |
153 | boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) |
154 | return false; |
155 | |
156 | switch (msr_index) { |
157 | case MSR_CORE_PERF_FIXED_CTR_CTRL: |
158 | case MSR_IA32_DS_AREA: |
159 | case MSR_IA32_PEBS_ENABLE: |
160 | *type = MSR_TYPE_CTRL; |
161 | return true; |
162 | |
163 | case MSR_CORE_PERF_GLOBAL_CTRL: |
164 | case MSR_CORE_PERF_GLOBAL_STATUS: |
165 | case MSR_CORE_PERF_GLOBAL_OVF_CTRL: |
166 | *type = MSR_TYPE_GLOBAL; |
167 | return true; |
168 | |
169 | default: |
170 | |
171 | if ((msr_index >= MSR_CORE_PERF_FIXED_CTR0) && |
172 | (msr_index < MSR_CORE_PERF_FIXED_CTR0 + |
173 | intel_num_fixed_counters)) { |
174 | *index = msr_index - MSR_CORE_PERF_FIXED_CTR0; |
175 | *type = MSR_TYPE_COUNTER; |
176 | return true; |
177 | } |
178 | |
179 | if ((msr_index >= MSR_P6_EVNTSEL0) && |
180 | (msr_index < MSR_P6_EVNTSEL0 + intel_num_arch_counters)) { |
181 | *index = msr_index - MSR_P6_EVNTSEL0; |
182 | *type = MSR_TYPE_ARCH_CTRL; |
183 | return true; |
184 | } |
185 | |
186 | msr_index_pmc = msr_index & MSR_PMC_ALIAS_MASK; |
187 | if ((msr_index_pmc >= MSR_IA32_PERFCTR0) && |
188 | (msr_index_pmc < MSR_IA32_PERFCTR0 + |
189 | intel_num_arch_counters)) { |
190 | *type = MSR_TYPE_ARCH_COUNTER; |
191 | *index = msr_index_pmc - MSR_IA32_PERFCTR0; |
192 | return true; |
193 | } |
194 | return false; |
195 | } |
196 | } |
197 | |
198 | static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type, |
199 | int index, bool is_read) |
200 | { |
201 | uint64_t *reg = NULL; |
202 | struct xen_pmu_intel_ctxt *ctxt; |
203 | uint64_t *fix_counters; |
204 | struct xen_pmu_cntr_pair *arch_cntr_pair; |
205 | struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); |
206 | uint8_t xenpmu_flags = get_xenpmu_flags(); |
207 | |
208 | |
209 | if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) |
210 | return false; |
211 | |
212 | ctxt = &xenpmu_data->pmu.c.intel; |
213 | |
214 | switch (msr) { |
215 | case MSR_CORE_PERF_GLOBAL_OVF_CTRL: |
216 | reg = &ctxt->global_ovf_ctrl; |
217 | break; |
218 | case MSR_CORE_PERF_GLOBAL_STATUS: |
219 | reg = &ctxt->global_status; |
220 | break; |
221 | case MSR_CORE_PERF_GLOBAL_CTRL: |
222 | reg = &ctxt->global_ctrl; |
223 | break; |
224 | case MSR_CORE_PERF_FIXED_CTR_CTRL: |
225 | reg = &ctxt->fixed_ctrl; |
226 | break; |
227 | default: |
228 | switch (type) { |
229 | case MSR_TYPE_COUNTER: |
230 | fix_counters = field_offset(ctxt, fixed_counters); |
231 | reg = &fix_counters[index]; |
232 | break; |
233 | case MSR_TYPE_ARCH_COUNTER: |
234 | arch_cntr_pair = field_offset(ctxt, arch_counters); |
235 | reg = &arch_cntr_pair[index].counter; |
236 | break; |
237 | case MSR_TYPE_ARCH_CTRL: |
238 | arch_cntr_pair = field_offset(ctxt, arch_counters); |
239 | reg = &arch_cntr_pair[index].control; |
240 | break; |
241 | default: |
242 | return false; |
243 | } |
244 | } |
245 | |
246 | if (reg) { |
247 | if (is_read) |
248 | *val = *reg; |
249 | else { |
250 | *reg = *val; |
251 | |
252 | if (msr == MSR_CORE_PERF_GLOBAL_OVF_CTRL) |
253 | ctxt->global_status &= (~(*val)); |
254 | } |
255 | return true; |
256 | } |
257 | |
258 | return false; |
259 | } |
260 | |
261 | static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) |
262 | { |
263 | uint64_t *reg = NULL; |
264 | int i, off = 0; |
265 | struct xen_pmu_amd_ctxt *ctxt; |
266 | uint64_t *counter_regs, *ctrl_regs; |
267 | struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); |
268 | uint8_t xenpmu_flags = get_xenpmu_flags(); |
269 | |
270 | if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) |
271 | return false; |
272 | |
273 | if (k7_counters_mirrored && |
274 | ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3))) |
275 | msr = get_fam15h_addr(addr: msr); |
276 | |
277 | ctxt = &xenpmu_data->pmu.c.amd; |
278 | for (i = 0; i < amd_num_counters; i++) { |
279 | if (msr == amd_ctrls_base + off) { |
280 | ctrl_regs = field_offset(ctxt, ctrls); |
281 | reg = &ctrl_regs[i]; |
282 | break; |
283 | } else if (msr == amd_counters_base + off) { |
284 | counter_regs = field_offset(ctxt, counters); |
285 | reg = &counter_regs[i]; |
286 | break; |
287 | } |
288 | off += amd_msr_step; |
289 | } |
290 | |
291 | if (reg) { |
292 | if (is_read) |
293 | *val = *reg; |
294 | else |
295 | *reg = *val; |
296 | |
297 | return true; |
298 | } |
299 | return false; |
300 | } |
301 | |
302 | static bool pmu_msr_chk_emulated(unsigned int msr, uint64_t *val, bool is_read, |
303 | bool *emul) |
304 | { |
305 | int type, index = 0; |
306 | |
307 | if (is_amd_pmu_msr(msr)) |
308 | *emul = xen_amd_pmu_emulate(msr, val, is_read); |
309 | else if (is_intel_pmu_msr(msr_index: msr, type: &type, index: &index)) |
310 | *emul = xen_intel_pmu_emulate(msr, val, type, index, is_read); |
311 | else |
312 | return false; |
313 | |
314 | return true; |
315 | } |
316 | |
317 | bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err) |
318 | { |
319 | bool emulated; |
320 | |
321 | if (!pmu_msr_chk_emulated(msr, val, is_read: true, emul: &emulated)) |
322 | return false; |
323 | |
324 | if (!emulated) { |
325 | *val = err ? native_read_msr_safe(msr, err) |
326 | : native_read_msr(msr); |
327 | } |
328 | |
329 | return true; |
330 | } |
331 | |
332 | bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err) |
333 | { |
334 | uint64_t val = ((uint64_t)high << 32) | low; |
335 | bool emulated; |
336 | |
337 | if (!pmu_msr_chk_emulated(msr, val: &val, is_read: false, emul: &emulated)) |
338 | return false; |
339 | |
340 | if (!emulated) { |
341 | if (err) |
342 | *err = native_write_msr_safe(msr, low, high); |
343 | else |
344 | native_write_msr(msr, low, high); |
345 | } |
346 | |
347 | return true; |
348 | } |
349 | |
350 | static unsigned long long xen_amd_read_pmc(int counter) |
351 | { |
352 | struct xen_pmu_amd_ctxt *ctxt; |
353 | uint64_t *counter_regs; |
354 | struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); |
355 | uint8_t xenpmu_flags = get_xenpmu_flags(); |
356 | |
357 | if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) { |
358 | uint32_t msr; |
359 | int err; |
360 | |
361 | msr = amd_counters_base + (counter * amd_msr_step); |
362 | return native_read_msr_safe(msr, err: &err); |
363 | } |
364 | |
365 | ctxt = &xenpmu_data->pmu.c.amd; |
366 | counter_regs = field_offset(ctxt, counters); |
367 | return counter_regs[counter]; |
368 | } |
369 | |
370 | static unsigned long long xen_intel_read_pmc(int counter) |
371 | { |
372 | struct xen_pmu_intel_ctxt *ctxt; |
373 | uint64_t *fixed_counters; |
374 | struct xen_pmu_cntr_pair *arch_cntr_pair; |
375 | struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); |
376 | uint8_t xenpmu_flags = get_xenpmu_flags(); |
377 | |
378 | if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) { |
379 | uint32_t msr; |
380 | int err; |
381 | |
382 | if (counter & (1 << INTEL_PMC_TYPE_SHIFT)) |
383 | msr = MSR_CORE_PERF_FIXED_CTR0 + (counter & 0xffff); |
384 | else |
385 | msr = MSR_IA32_PERFCTR0 + counter; |
386 | |
387 | return native_read_msr_safe(msr, err: &err); |
388 | } |
389 | |
390 | ctxt = &xenpmu_data->pmu.c.intel; |
391 | if (counter & (1 << INTEL_PMC_TYPE_SHIFT)) { |
392 | fixed_counters = field_offset(ctxt, fixed_counters); |
393 | return fixed_counters[counter & 0xffff]; |
394 | } |
395 | |
396 | arch_cntr_pair = field_offset(ctxt, arch_counters); |
397 | return arch_cntr_pair[counter].counter; |
398 | } |
399 | |
400 | unsigned long long xen_read_pmc(int counter) |
401 | { |
402 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) |
403 | return xen_amd_read_pmc(counter); |
404 | else |
405 | return xen_intel_read_pmc(counter); |
406 | } |
407 | |
408 | int pmu_apic_update(uint32_t val) |
409 | { |
410 | int ret; |
411 | struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); |
412 | |
413 | if (!xenpmu_data) { |
414 | pr_warn_once("%s: pmudata not initialized\n" , __func__); |
415 | return -EINVAL; |
416 | } |
417 | |
418 | xenpmu_data->pmu.l.lapic_lvtpc = val; |
419 | |
420 | if (get_xenpmu_flags() & XENPMU_IRQ_PROCESSING) |
421 | return 0; |
422 | |
423 | ret = HYPERVISOR_xenpmu_op(XENPMU_lvtpc_set, NULL); |
424 | |
425 | return ret; |
426 | } |
427 | |
428 | /* perf callbacks */ |
429 | static unsigned int xen_guest_state(void) |
430 | { |
431 | const struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); |
432 | unsigned int state = 0; |
433 | |
434 | if (!xenpmu_data) { |
435 | pr_warn_once("%s: pmudata not initialized\n" , __func__); |
436 | return state; |
437 | } |
438 | |
439 | if (!xen_initial_domain() || (xenpmu_data->domain_id >= DOMID_SELF)) |
440 | return state; |
441 | |
442 | state |= PERF_GUEST_ACTIVE; |
443 | |
444 | if (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_PV) { |
445 | if (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_USER) |
446 | state |= PERF_GUEST_USER; |
447 | } else if (xenpmu_data->pmu.r.regs.cpl & 3) { |
448 | state |= PERF_GUEST_USER; |
449 | } |
450 | |
451 | return state; |
452 | } |
453 | |
454 | static unsigned long xen_get_guest_ip(void) |
455 | { |
456 | const struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); |
457 | |
458 | if (!xenpmu_data) { |
459 | pr_warn_once("%s: pmudata not initialized\n" , __func__); |
460 | return 0; |
461 | } |
462 | |
463 | return xenpmu_data->pmu.r.regs.ip; |
464 | } |
465 | |
466 | static struct perf_guest_info_callbacks xen_guest_cbs = { |
467 | .state = xen_guest_state, |
468 | .get_ip = xen_get_guest_ip, |
469 | }; |
470 | |
471 | /* Convert registers from Xen's format to Linux' */ |
472 | static void xen_convert_regs(const struct xen_pmu_regs *xen_regs, |
473 | struct pt_regs *regs, uint64_t pmu_flags) |
474 | { |
475 | regs->ip = xen_regs->ip; |
476 | regs->cs = xen_regs->cs; |
477 | regs->sp = xen_regs->sp; |
478 | |
479 | if (pmu_flags & PMU_SAMPLE_PV) { |
480 | if (pmu_flags & PMU_SAMPLE_USER) |
481 | regs->cs |= 3; |
482 | else |
483 | regs->cs &= ~3; |
484 | } else { |
485 | if (xen_regs->cpl) |
486 | regs->cs |= 3; |
487 | else |
488 | regs->cs &= ~3; |
489 | } |
490 | } |
491 | |
492 | irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id) |
493 | { |
494 | int err, ret = IRQ_NONE; |
495 | struct pt_regs regs = {0}; |
496 | const struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); |
497 | uint8_t xenpmu_flags = get_xenpmu_flags(); |
498 | |
499 | if (!xenpmu_data) { |
500 | pr_warn_once("%s: pmudata not initialized\n" , __func__); |
501 | return ret; |
502 | } |
503 | |
504 | this_cpu_ptr(&xenpmu_shared)->flags = |
505 | xenpmu_flags | XENPMU_IRQ_PROCESSING; |
506 | xen_convert_regs(xen_regs: &xenpmu_data->pmu.r.regs, regs: ®s, |
507 | pmu_flags: xenpmu_data->pmu.pmu_flags); |
508 | if (x86_pmu.handle_irq(®s)) |
509 | ret = IRQ_HANDLED; |
510 | |
511 | /* Write out cached context to HW */ |
512 | err = HYPERVISOR_xenpmu_op(XENPMU_flush, NULL); |
513 | this_cpu_ptr(&xenpmu_shared)->flags = xenpmu_flags; |
514 | if (err) { |
515 | pr_warn_once("%s: failed hypercall, err: %d\n" , __func__, err); |
516 | return IRQ_NONE; |
517 | } |
518 | |
519 | return ret; |
520 | } |
521 | |
522 | bool is_xen_pmu; |
523 | |
524 | void xen_pmu_init(int cpu) |
525 | { |
526 | int err; |
527 | struct xen_pmu_params xp; |
528 | unsigned long pfn; |
529 | struct xen_pmu_data *xenpmu_data; |
530 | |
531 | BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE); |
532 | |
533 | if (xen_hvm_domain() || (cpu != 0 && !is_xen_pmu)) |
534 | return; |
535 | |
536 | xenpmu_data = (struct xen_pmu_data *)get_zeroed_page(GFP_KERNEL); |
537 | if (!xenpmu_data) { |
538 | pr_err("VPMU init: No memory\n" ); |
539 | return; |
540 | } |
541 | pfn = virt_to_pfn(v: xenpmu_data); |
542 | |
543 | xp.val = pfn_to_mfn(pfn); |
544 | xp.vcpu = cpu; |
545 | xp.version.maj = XENPMU_VER_MAJ; |
546 | xp.version.min = XENPMU_VER_MIN; |
547 | err = HYPERVISOR_xenpmu_op(XENPMU_init, arg: &xp); |
548 | if (err) |
549 | goto fail; |
550 | |
551 | per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data; |
552 | per_cpu(xenpmu_shared, cpu).flags = 0; |
553 | |
554 | if (!is_xen_pmu) { |
555 | is_xen_pmu = true; |
556 | perf_register_guest_info_callbacks(cbs: &xen_guest_cbs); |
557 | xen_pmu_arch_init(); |
558 | } |
559 | |
560 | return; |
561 | |
562 | fail: |
563 | if (err == -EOPNOTSUPP || err == -ENOSYS) |
564 | pr_info_once("VPMU disabled by hypervisor.\n" ); |
565 | else |
566 | pr_info_once("Could not initialize VPMU for cpu %d, error %d\n" , |
567 | cpu, err); |
568 | free_pages(addr: (unsigned long)xenpmu_data, order: 0); |
569 | } |
570 | |
571 | void xen_pmu_finish(int cpu) |
572 | { |
573 | struct xen_pmu_params xp; |
574 | |
575 | if (xen_hvm_domain()) |
576 | return; |
577 | |
578 | xp.vcpu = cpu; |
579 | xp.version.maj = XENPMU_VER_MAJ; |
580 | xp.version.min = XENPMU_VER_MIN; |
581 | |
582 | (void)HYPERVISOR_xenpmu_op(XENPMU_finish, arg: &xp); |
583 | |
584 | free_pages(addr: (unsigned long)per_cpu(xenpmu_shared, cpu).xenpmu_data, order: 0); |
585 | per_cpu(xenpmu_shared, cpu).xenpmu_data = NULL; |
586 | } |
587 | |