1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (c) 2023 Rivos Inc |
4 | * |
5 | * Authors: |
6 | * Atish Patra <atishp@rivosinc.com> |
7 | */ |
8 | |
9 | #include <linux/errno.h> |
10 | #include <linux/err.h> |
11 | #include <linux/kvm_host.h> |
12 | #include <asm/csr.h> |
13 | #include <asm/sbi.h> |
14 | #include <asm/kvm_vcpu_sbi.h> |
15 | |
16 | static int kvm_sbi_ext_pmu_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, |
17 | struct kvm_vcpu_sbi_return *retdata) |
18 | { |
19 | int ret = 0; |
20 | struct kvm_cpu_context *cp = &vcpu->arch.guest_context; |
21 | struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); |
22 | unsigned long funcid = cp->a6; |
23 | u64 temp; |
24 | |
25 | if (!kvpmu->init_done) { |
26 | retdata->err_val = SBI_ERR_NOT_SUPPORTED; |
27 | return 0; |
28 | } |
29 | |
30 | switch (funcid) { |
31 | case SBI_EXT_PMU_NUM_COUNTERS: |
32 | ret = kvm_riscv_vcpu_pmu_num_ctrs(vcpu, retdata); |
33 | break; |
34 | case SBI_EXT_PMU_COUNTER_GET_INFO: |
35 | ret = kvm_riscv_vcpu_pmu_ctr_info(vcpu, cp->a0, retdata); |
36 | break; |
37 | case SBI_EXT_PMU_COUNTER_CFG_MATCH: |
38 | #if defined(CONFIG_32BIT) |
39 | temp = ((uint64_t)cp->a5 << 32) | cp->a4; |
40 | #else |
41 | temp = cp->a4; |
42 | #endif |
43 | /* |
44 | * This can fail if perf core framework fails to create an event. |
45 | * Forward the error to userspace because it's an error which |
46 | * happened within the host kernel. The other option would be |
47 | * to convert to an SBI error and forward to the guest. |
48 | */ |
49 | ret = kvm_riscv_vcpu_pmu_ctr_cfg_match(vcpu, cp->a0, cp->a1, |
50 | cp->a2, cp->a3, temp, retdata); |
51 | break; |
52 | case SBI_EXT_PMU_COUNTER_START: |
53 | #if defined(CONFIG_32BIT) |
54 | temp = ((uint64_t)cp->a4 << 32) | cp->a3; |
55 | #else |
56 | temp = cp->a3; |
57 | #endif |
58 | ret = kvm_riscv_vcpu_pmu_ctr_start(vcpu, cp->a0, cp->a1, cp->a2, |
59 | temp, retdata); |
60 | break; |
61 | case SBI_EXT_PMU_COUNTER_STOP: |
62 | ret = kvm_riscv_vcpu_pmu_ctr_stop(vcpu, cp->a0, cp->a1, cp->a2, retdata); |
63 | break; |
64 | case SBI_EXT_PMU_COUNTER_FW_READ: |
65 | ret = kvm_riscv_vcpu_pmu_ctr_read(vcpu, cp->a0, retdata); |
66 | break; |
67 | default: |
68 | retdata->err_val = SBI_ERR_NOT_SUPPORTED; |
69 | } |
70 | |
71 | return ret; |
72 | } |
73 | |
74 | static unsigned long kvm_sbi_ext_pmu_probe(struct kvm_vcpu *vcpu) |
75 | { |
76 | struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); |
77 | |
78 | return kvpmu->init_done; |
79 | } |
80 | |
81 | const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = { |
82 | .extid_start = SBI_EXT_PMU, |
83 | .extid_end = SBI_EXT_PMU, |
84 | .handler = kvm_sbi_ext_pmu_handler, |
85 | .probe = kvm_sbi_ext_pmu_probe, |
86 | }; |
87 | |