1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2022 SiFive
4 *
5 * Authors:
6 * Vincent Chen <vincent.chen@sifive.com>
7 * Greentime Hu <greentime.hu@sifive.com>
8 */
9
10#include <linux/errno.h>
11#include <linux/err.h>
12#include <linux/kvm_host.h>
13#include <linux/uaccess.h>
14#include <asm/cpufeature.h>
15#include <asm/kvm_vcpu_vector.h>
16#include <asm/vector.h>
17
18#ifdef CONFIG_RISCV_ISA_V
19void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu)
20{
21 unsigned long *isa = vcpu->arch.isa;
22 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
23
24 cntx->sstatus &= ~SR_VS;
25 if (riscv_isa_extension_available(isa, v)) {
26 cntx->sstatus |= SR_VS_INITIAL;
27 WARN_ON(!cntx->vector.datap);
28 memset(cntx->vector.datap, 0, riscv_v_vsize);
29 } else {
30 cntx->sstatus |= SR_VS_OFF;
31 }
32}
33
34static void kvm_riscv_vcpu_vector_clean(struct kvm_cpu_context *cntx)
35{
36 cntx->sstatus &= ~SR_VS;
37 cntx->sstatus |= SR_VS_CLEAN;
38}
39
40void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx,
41 unsigned long *isa)
42{
43 if ((cntx->sstatus & SR_VS) == SR_VS_DIRTY) {
44 if (riscv_isa_extension_available(isa, v))
45 __kvm_riscv_vector_save(cntx);
46 kvm_riscv_vcpu_vector_clean(cntx);
47 }
48}
49
50void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
51 unsigned long *isa)
52{
53 if ((cntx->sstatus & SR_VS) != SR_VS_OFF) {
54 if (riscv_isa_extension_available(isa, v))
55 __kvm_riscv_vector_restore(cntx);
56 kvm_riscv_vcpu_vector_clean(cntx);
57 }
58}
59
60void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx)
61{
62 /* No need to check host sstatus as it can be modified outside */
63 if (riscv_isa_extension_available(NULL, v))
64 __kvm_riscv_vector_save(cntx);
65}
66
67void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx)
68{
69 if (riscv_isa_extension_available(NULL, v))
70 __kvm_riscv_vector_restore(cntx);
71}
72
73int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
74 struct kvm_cpu_context *cntx)
75{
76 cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL);
77 if (!cntx->vector.datap)
78 return -ENOMEM;
79 cntx->vector.vlenb = riscv_v_vsize / 32;
80
81 vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
82 if (!vcpu->arch.host_context.vector.datap)
83 return -ENOMEM;
84
85 return 0;
86}
87
88void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
89{
90 kfree(vcpu->arch.guest_reset_context.vector.datap);
91 kfree(vcpu->arch.host_context.vector.datap);
92}
93#endif
94
95static int kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu,
96 unsigned long reg_num,
97 size_t reg_size,
98 void **reg_addr)
99{
100 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
101 size_t vlenb = riscv_v_vsize / 32;
102
103 if (reg_num < KVM_REG_RISCV_VECTOR_REG(0)) {
104 if (reg_size != sizeof(unsigned long))
105 return -EINVAL;
106 switch (reg_num) {
107 case KVM_REG_RISCV_VECTOR_CSR_REG(vstart):
108 *reg_addr = &cntx->vector.vstart;
109 break;
110 case KVM_REG_RISCV_VECTOR_CSR_REG(vl):
111 *reg_addr = &cntx->vector.vl;
112 break;
113 case KVM_REG_RISCV_VECTOR_CSR_REG(vtype):
114 *reg_addr = &cntx->vector.vtype;
115 break;
116 case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr):
117 *reg_addr = &cntx->vector.vcsr;
118 break;
119 case KVM_REG_RISCV_VECTOR_CSR_REG(vlenb):
120 *reg_addr = &cntx->vector.vlenb;
121 break;
122 case KVM_REG_RISCV_VECTOR_CSR_REG(datap):
123 default:
124 return -ENOENT;
125 }
126 } else if (reg_num <= KVM_REG_RISCV_VECTOR_REG(31)) {
127 if (reg_size != vlenb)
128 return -EINVAL;
129 *reg_addr = cntx->vector.datap +
130 (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb;
131 } else {
132 return -ENOENT;
133 }
134
135 return 0;
136}
137
138int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu,
139 const struct kvm_one_reg *reg)
140{
141 unsigned long *isa = vcpu->arch.isa;
142 unsigned long __user *uaddr =
143 (unsigned long __user *)(unsigned long)reg->addr;
144 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
145 KVM_REG_SIZE_MASK |
146 KVM_REG_RISCV_VECTOR);
147 size_t reg_size = KVM_REG_SIZE(reg->id);
148 void *reg_addr;
149 int rc;
150
151 if (!riscv_isa_extension_available(isa, v))
152 return -ENOENT;
153
154 rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, reg_addr: &reg_addr);
155 if (rc)
156 return rc;
157
158 if (copy_to_user(to: uaddr, from: reg_addr, n: reg_size))
159 return -EFAULT;
160
161 return 0;
162}
163
164int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
165 const struct kvm_one_reg *reg)
166{
167 unsigned long *isa = vcpu->arch.isa;
168 unsigned long __user *uaddr =
169 (unsigned long __user *)(unsigned long)reg->addr;
170 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
171 KVM_REG_SIZE_MASK |
172 KVM_REG_RISCV_VECTOR);
173 size_t reg_size = KVM_REG_SIZE(reg->id);
174 void *reg_addr;
175 int rc;
176
177 if (!riscv_isa_extension_available(isa, v))
178 return -ENOENT;
179
180 if (reg_num == KVM_REG_RISCV_VECTOR_CSR_REG(vlenb)) {
181 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
182 unsigned long reg_val;
183
184 if (copy_from_user(to: &reg_val, from: uaddr, n: reg_size))
185 return -EFAULT;
186 if (reg_val != cntx->vector.vlenb)
187 return -EINVAL;
188
189 return 0;
190 }
191
192 rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, reg_addr: &reg_addr);
193 if (rc)
194 return rc;
195
196 if (copy_from_user(to: reg_addr, from: uaddr, n: reg_size))
197 return -EFAULT;
198
199 return 0;
200}
201

source code of linux/arch/riscv/kvm/vcpu_vector.c