1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (c) 2021 Western Digital Corporation or its affiliates. |
4 | * |
5 | * Authors: |
6 | * Atish Patra <atish.patra@wdc.com> |
7 | */ |
8 | |
9 | #include <linux/errno.h> |
10 | #include <linux/err.h> |
11 | #include <linux/kvm_host.h> |
12 | #include <linux/version.h> |
13 | #include <asm/sbi.h> |
14 | #include <asm/kvm_vcpu_sbi.h> |
15 | |
16 | static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, |
17 | struct kvm_vcpu_sbi_return *retdata) |
18 | { |
19 | struct kvm_cpu_context *cp = &vcpu->arch.guest_context; |
20 | const struct kvm_vcpu_sbi_extension *sbi_ext; |
21 | unsigned long *out_val = &retdata->out_val; |
22 | |
23 | switch (cp->a6) { |
24 | case SBI_EXT_BASE_GET_SPEC_VERSION: |
25 | *out_val = (KVM_SBI_VERSION_MAJOR << |
26 | SBI_SPEC_VERSION_MAJOR_SHIFT) | |
27 | KVM_SBI_VERSION_MINOR; |
28 | break; |
29 | case SBI_EXT_BASE_GET_IMP_ID: |
30 | *out_val = KVM_SBI_IMPID; |
31 | break; |
32 | case SBI_EXT_BASE_GET_IMP_VERSION: |
33 | *out_val = LINUX_VERSION_CODE; |
34 | break; |
35 | case SBI_EXT_BASE_PROBE_EXT: |
36 | if ((cp->a0 >= SBI_EXT_EXPERIMENTAL_START && |
37 | cp->a0 <= SBI_EXT_EXPERIMENTAL_END) || |
38 | (cp->a0 >= SBI_EXT_VENDOR_START && |
39 | cp->a0 <= SBI_EXT_VENDOR_END)) { |
40 | /* |
41 | * For experimental/vendor extensions |
42 | * forward it to the userspace |
43 | */ |
44 | kvm_riscv_vcpu_sbi_forward(vcpu, run); |
45 | retdata->uexit = true; |
46 | } else { |
47 | sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a0); |
48 | *out_val = sbi_ext && sbi_ext->probe ? |
49 | sbi_ext->probe(vcpu) : !!sbi_ext; |
50 | } |
51 | break; |
52 | case SBI_EXT_BASE_GET_MVENDORID: |
53 | *out_val = vcpu->arch.mvendorid; |
54 | break; |
55 | case SBI_EXT_BASE_GET_MARCHID: |
56 | *out_val = vcpu->arch.marchid; |
57 | break; |
58 | case SBI_EXT_BASE_GET_MIMPID: |
59 | *out_val = vcpu->arch.mimpid; |
60 | break; |
61 | default: |
62 | retdata->err_val = SBI_ERR_NOT_SUPPORTED; |
63 | break; |
64 | } |
65 | |
66 | return 0; |
67 | } |
68 | |
69 | const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base = { |
70 | .extid_start = SBI_EXT_BASE, |
71 | .extid_end = SBI_EXT_BASE, |
72 | .handler = kvm_sbi_ext_base_handler, |
73 | }; |
74 | |
75 | static int kvm_sbi_ext_forward_handler(struct kvm_vcpu *vcpu, |
76 | struct kvm_run *run, |
77 | struct kvm_vcpu_sbi_return *retdata) |
78 | { |
79 | /* |
80 | * Both SBI experimental and vendor extensions are |
81 | * unconditionally forwarded to userspace. |
82 | */ |
83 | kvm_riscv_vcpu_sbi_forward(vcpu, run); |
84 | retdata->uexit = true; |
85 | return 0; |
86 | } |
87 | |
88 | const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental = { |
89 | .extid_start = SBI_EXT_EXPERIMENTAL_START, |
90 | .extid_end = SBI_EXT_EXPERIMENTAL_END, |
91 | .handler = kvm_sbi_ext_forward_handler, |
92 | }; |
93 | |
94 | const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor = { |
95 | .extid_start = SBI_EXT_VENDOR_START, |
96 | .extid_end = SBI_EXT_VENDOR_END, |
97 | .handler = kvm_sbi_ext_forward_handler, |
98 | }; |
99 | |