1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2021 Western Digital Corporation or its affiliates. |
4 | * |
5 | * Authors: |
6 | * Atish Patra <atish.patra@wdc.com> |
7 | * Anup Patel <anup.patel@wdc.com> |
8 | */ |
9 | |
10 | #include <linux/errno.h> |
11 | #include <linux/err.h> |
12 | #include <linux/kvm_host.h> |
13 | #include <linux/uaccess.h> |
14 | #include <asm/cpufeature.h> |
15 | |
16 | #ifdef CONFIG_FPU |
17 | void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) |
18 | { |
19 | struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; |
20 | |
21 | cntx->sstatus &= ~SR_FS; |
22 | if (riscv_isa_extension_available(vcpu->arch.isa, f) || |
23 | riscv_isa_extension_available(vcpu->arch.isa, d)) |
24 | cntx->sstatus |= SR_FS_INITIAL; |
25 | else |
26 | cntx->sstatus |= SR_FS_OFF; |
27 | } |
28 | |
29 | static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx) |
30 | { |
31 | cntx->sstatus &= ~SR_FS; |
32 | cntx->sstatus |= SR_FS_CLEAN; |
33 | } |
34 | |
35 | void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, |
36 | const unsigned long *isa) |
37 | { |
38 | if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) { |
39 | if (riscv_isa_extension_available(isa, d)) |
40 | __kvm_riscv_fp_d_save(cntx); |
41 | else if (riscv_isa_extension_available(isa, f)) |
42 | __kvm_riscv_fp_f_save(cntx); |
43 | kvm_riscv_vcpu_fp_clean(cntx); |
44 | } |
45 | } |
46 | |
47 | void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, |
48 | const unsigned long *isa) |
49 | { |
50 | if ((cntx->sstatus & SR_FS) != SR_FS_OFF) { |
51 | if (riscv_isa_extension_available(isa, d)) |
52 | __kvm_riscv_fp_d_restore(cntx); |
53 | else if (riscv_isa_extension_available(isa, f)) |
54 | __kvm_riscv_fp_f_restore(cntx); |
55 | kvm_riscv_vcpu_fp_clean(cntx); |
56 | } |
57 | } |
58 | |
59 | void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx) |
60 | { |
61 | /* No need to check host sstatus as it can be modified outside */ |
62 | if (riscv_isa_extension_available(NULL, d)) |
63 | __kvm_riscv_fp_d_save(cntx); |
64 | else if (riscv_isa_extension_available(NULL, f)) |
65 | __kvm_riscv_fp_f_save(cntx); |
66 | } |
67 | |
68 | void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx) |
69 | { |
70 | if (riscv_isa_extension_available(NULL, d)) |
71 | __kvm_riscv_fp_d_restore(cntx); |
72 | else if (riscv_isa_extension_available(NULL, f)) |
73 | __kvm_riscv_fp_f_restore(cntx); |
74 | } |
75 | #endif |
76 | |
77 | int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu, |
78 | const struct kvm_one_reg *reg, |
79 | unsigned long rtype) |
80 | { |
81 | struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; |
82 | unsigned long __user *uaddr = |
83 | (unsigned long __user *)(unsigned long)reg->addr; |
84 | unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | |
85 | KVM_REG_SIZE_MASK | |
86 | rtype); |
87 | void *reg_val; |
88 | |
89 | if ((rtype == KVM_REG_RISCV_FP_F) && |
90 | riscv_isa_extension_available(vcpu->arch.isa, f)) { |
91 | if (KVM_REG_SIZE(reg->id) != sizeof(u32)) |
92 | return -EINVAL; |
93 | if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) |
94 | reg_val = &cntx->fp.f.fcsr; |
95 | else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && |
96 | reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) |
97 | reg_val = &cntx->fp.f.f[reg_num]; |
98 | else |
99 | return -ENOENT; |
100 | } else if ((rtype == KVM_REG_RISCV_FP_D) && |
101 | riscv_isa_extension_available(vcpu->arch.isa, d)) { |
102 | if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) { |
103 | if (KVM_REG_SIZE(reg->id) != sizeof(u32)) |
104 | return -EINVAL; |
105 | reg_val = &cntx->fp.d.fcsr; |
106 | } else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) && |
107 | reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { |
108 | if (KVM_REG_SIZE(reg->id) != sizeof(u64)) |
109 | return -EINVAL; |
110 | reg_val = &cntx->fp.d.f[reg_num]; |
111 | } else |
112 | return -ENOENT; |
113 | } else |
114 | return -ENOENT; |
115 | |
116 | if (copy_to_user(to: uaddr, from: reg_val, n: KVM_REG_SIZE(reg->id))) |
117 | return -EFAULT; |
118 | |
119 | return 0; |
120 | } |
121 | |
122 | int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu, |
123 | const struct kvm_one_reg *reg, |
124 | unsigned long rtype) |
125 | { |
126 | struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; |
127 | unsigned long __user *uaddr = |
128 | (unsigned long __user *)(unsigned long)reg->addr; |
129 | unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | |
130 | KVM_REG_SIZE_MASK | |
131 | rtype); |
132 | void *reg_val; |
133 | |
134 | if ((rtype == KVM_REG_RISCV_FP_F) && |
135 | riscv_isa_extension_available(vcpu->arch.isa, f)) { |
136 | if (KVM_REG_SIZE(reg->id) != sizeof(u32)) |
137 | return -EINVAL; |
138 | if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) |
139 | reg_val = &cntx->fp.f.fcsr; |
140 | else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && |
141 | reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) |
142 | reg_val = &cntx->fp.f.f[reg_num]; |
143 | else |
144 | return -ENOENT; |
145 | } else if ((rtype == KVM_REG_RISCV_FP_D) && |
146 | riscv_isa_extension_available(vcpu->arch.isa, d)) { |
147 | if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) { |
148 | if (KVM_REG_SIZE(reg->id) != sizeof(u32)) |
149 | return -EINVAL; |
150 | reg_val = &cntx->fp.d.fcsr; |
151 | } else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) && |
152 | reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { |
153 | if (KVM_REG_SIZE(reg->id) != sizeof(u64)) |
154 | return -EINVAL; |
155 | reg_val = &cntx->fp.d.f[reg_num]; |
156 | } else |
157 | return -ENOENT; |
158 | } else |
159 | return -ENOENT; |
160 | |
161 | if (copy_from_user(to: reg_val, from: uaddr, n: KVM_REG_SIZE(reg->id))) |
162 | return -EFAULT; |
163 | |
164 | return 0; |
165 | } |
166 | |