1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2019 Western Digital Corporation or its affiliates. |
4 | * Copyright (c) 2022 Ventana Micro Systems Inc. |
5 | */ |
6 | |
7 | #include <linux/bitops.h> |
8 | #include <linux/kvm_host.h> |
9 | |
10 | #include <asm/cpufeature.h> |
11 | |
12 | #define INSN_OPCODE_MASK 0x007c |
13 | #define INSN_OPCODE_SHIFT 2 |
14 | #define INSN_OPCODE_SYSTEM 28 |
15 | |
16 | #define INSN_MASK_WFI 0xffffffff |
17 | #define INSN_MATCH_WFI 0x10500073 |
18 | |
19 | #define INSN_MATCH_CSRRW 0x1073 |
20 | #define INSN_MASK_CSRRW 0x707f |
21 | #define INSN_MATCH_CSRRS 0x2073 |
22 | #define INSN_MASK_CSRRS 0x707f |
23 | #define INSN_MATCH_CSRRC 0x3073 |
24 | #define INSN_MASK_CSRRC 0x707f |
25 | #define INSN_MATCH_CSRRWI 0x5073 |
26 | #define INSN_MASK_CSRRWI 0x707f |
27 | #define INSN_MATCH_CSRRSI 0x6073 |
28 | #define INSN_MASK_CSRRSI 0x707f |
29 | #define INSN_MATCH_CSRRCI 0x7073 |
30 | #define INSN_MASK_CSRRCI 0x707f |
31 | |
32 | #define INSN_MATCH_LB 0x3 |
33 | #define INSN_MASK_LB 0x707f |
34 | #define INSN_MATCH_LH 0x1003 |
35 | #define INSN_MASK_LH 0x707f |
36 | #define INSN_MATCH_LW 0x2003 |
37 | #define INSN_MASK_LW 0x707f |
38 | #define INSN_MATCH_LD 0x3003 |
39 | #define INSN_MASK_LD 0x707f |
40 | #define INSN_MATCH_LBU 0x4003 |
41 | #define INSN_MASK_LBU 0x707f |
42 | #define INSN_MATCH_LHU 0x5003 |
43 | #define INSN_MASK_LHU 0x707f |
44 | #define INSN_MATCH_LWU 0x6003 |
45 | #define INSN_MASK_LWU 0x707f |
46 | #define INSN_MATCH_SB 0x23 |
47 | #define INSN_MASK_SB 0x707f |
48 | #define INSN_MATCH_SH 0x1023 |
49 | #define INSN_MASK_SH 0x707f |
50 | #define INSN_MATCH_SW 0x2023 |
51 | #define INSN_MASK_SW 0x707f |
52 | #define INSN_MATCH_SD 0x3023 |
53 | #define INSN_MASK_SD 0x707f |
54 | |
55 | #define INSN_MATCH_C_LD 0x6000 |
56 | #define INSN_MASK_C_LD 0xe003 |
57 | #define INSN_MATCH_C_SD 0xe000 |
58 | #define INSN_MASK_C_SD 0xe003 |
59 | #define INSN_MATCH_C_LW 0x4000 |
60 | #define INSN_MASK_C_LW 0xe003 |
61 | #define INSN_MATCH_C_SW 0xc000 |
62 | #define INSN_MASK_C_SW 0xe003 |
63 | #define INSN_MATCH_C_LDSP 0x6002 |
64 | #define INSN_MASK_C_LDSP 0xe003 |
65 | #define INSN_MATCH_C_SDSP 0xe002 |
66 | #define INSN_MASK_C_SDSP 0xe003 |
67 | #define INSN_MATCH_C_LWSP 0x4002 |
68 | #define INSN_MASK_C_LWSP 0xe003 |
69 | #define INSN_MATCH_C_SWSP 0xc002 |
70 | #define INSN_MASK_C_SWSP 0xe003 |
71 | |
72 | #define INSN_16BIT_MASK 0x3 |
73 | |
74 | #define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK) |
75 | |
76 | #define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4) |
77 | |
78 | #ifdef CONFIG_64BIT |
79 | #define LOG_REGBYTES 3 |
80 | #else |
81 | #define LOG_REGBYTES 2 |
82 | #endif |
83 | #define REGBYTES (1 << LOG_REGBYTES) |
84 | |
85 | #define SH_RD 7 |
86 | #define SH_RS1 15 |
87 | #define SH_RS2 20 |
88 | #define SH_RS2C 2 |
89 | #define MASK_RX 0x1f |
90 | |
91 | #define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1)) |
92 | #define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \ |
93 | (RV_X(x, 10, 3) << 3) | \ |
94 | (RV_X(x, 5, 1) << 6)) |
95 | #define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \ |
96 | (RV_X(x, 5, 2) << 6)) |
97 | #define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \ |
98 | (RV_X(x, 12, 1) << 5) | \ |
99 | (RV_X(x, 2, 2) << 6)) |
100 | #define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \ |
101 | (RV_X(x, 12, 1) << 5) | \ |
102 | (RV_X(x, 2, 3) << 6)) |
103 | #define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \ |
104 | (RV_X(x, 7, 2) << 6)) |
105 | #define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \ |
106 | (RV_X(x, 7, 3) << 6)) |
107 | #define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3)) |
108 | #define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3)) |
109 | #define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5) |
110 | |
111 | #define SHIFT_RIGHT(x, y) \ |
112 | ((y) < 0 ? ((x) << -(y)) : ((x) >> (y))) |
113 | |
114 | #define REG_MASK \ |
115 | ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES)) |
116 | |
117 | #define REG_OFFSET(insn, pos) \ |
118 | (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK) |
119 | |
120 | #define REG_PTR(insn, pos, regs) \ |
121 | ((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))) |
122 | |
123 | #define GET_FUNCT3(insn) (((insn) >> 12) & 7) |
124 | |
125 | #define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs)) |
126 | #define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs)) |
127 | #define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs)) |
128 | #define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs)) |
129 | #define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs)) |
130 | #define GET_SP(regs) (*REG_PTR(2, 0, regs)) |
131 | #define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val)) |
132 | #define IMM_I(insn) ((s32)(insn) >> 20) |
133 | #define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \ |
134 | (s32)(((insn) >> 7) & 0x1f)) |
135 | |
136 | struct insn_func { |
137 | unsigned long mask; |
138 | unsigned long match; |
139 | /* |
140 | * Possible return values are as follows: |
141 | * 1) Returns < 0 for error case |
142 | * 2) Returns 0 for exit to user-space |
143 | * 3) Returns 1 to continue with next sepc |
144 | * 4) Returns 2 to continue with same sepc |
145 | * 5) Returns 3 to inject illegal instruction trap and continue |
146 | * 6) Returns 4 to inject virtual instruction trap and continue |
147 | * |
148 | * Use enum kvm_insn_return for return values |
149 | */ |
150 | int (*func)(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn); |
151 | }; |
152 | |
153 | static int truly_illegal_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, |
154 | ulong insn) |
155 | { |
156 | struct kvm_cpu_trap utrap = { 0 }; |
157 | |
158 | /* Redirect trap to Guest VCPU */ |
159 | utrap.sepc = vcpu->arch.guest_context.sepc; |
160 | utrap.scause = EXC_INST_ILLEGAL; |
161 | utrap.stval = insn; |
162 | utrap.htval = 0; |
163 | utrap.htinst = 0; |
164 | kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); |
165 | |
166 | return 1; |
167 | } |
168 | |
169 | static int truly_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, |
170 | ulong insn) |
171 | { |
172 | struct kvm_cpu_trap utrap = { 0 }; |
173 | |
174 | /* Redirect trap to Guest VCPU */ |
175 | utrap.sepc = vcpu->arch.guest_context.sepc; |
176 | utrap.scause = EXC_VIRTUAL_INST_FAULT; |
177 | utrap.stval = insn; |
178 | utrap.htval = 0; |
179 | utrap.htinst = 0; |
180 | kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); |
181 | |
182 | return 1; |
183 | } |
184 | |
185 | /** |
186 | * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour |
187 | * |
188 | * @vcpu: The VCPU pointer |
189 | */ |
190 | void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu) |
191 | { |
192 | if (!kvm_arch_vcpu_runnable(vcpu)) { |
193 | kvm_vcpu_srcu_read_unlock(vcpu); |
194 | kvm_vcpu_halt(vcpu); |
195 | kvm_vcpu_srcu_read_lock(vcpu); |
196 | } |
197 | } |
198 | |
199 | static int wfi_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn) |
200 | { |
201 | vcpu->stat.wfi_exit_stat++; |
202 | kvm_riscv_vcpu_wfi(vcpu); |
203 | return KVM_INSN_CONTINUE_NEXT_SEPC; |
204 | } |
205 | |
206 | struct csr_func { |
207 | unsigned int base; |
208 | unsigned int count; |
209 | /* |
210 | * Possible return values are as same as "func" callback in |
211 | * "struct insn_func". |
212 | */ |
213 | int (*func)(struct kvm_vcpu *vcpu, unsigned int csr_num, |
214 | unsigned long *val, unsigned long new_val, |
215 | unsigned long wr_mask); |
216 | }; |
217 | |
218 | static int seed_csr_rmw(struct kvm_vcpu *vcpu, unsigned int csr_num, |
219 | unsigned long *val, unsigned long new_val, |
220 | unsigned long wr_mask) |
221 | { |
222 | if (!riscv_isa_extension_available(vcpu->arch.isa, ZKR)) |
223 | return KVM_INSN_ILLEGAL_TRAP; |
224 | |
225 | return KVM_INSN_EXIT_TO_USER_SPACE; |
226 | } |
227 | |
228 | static const struct csr_func csr_funcs[] = { |
229 | KVM_RISCV_VCPU_AIA_CSR_FUNCS |
230 | KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS |
231 | { .base = CSR_SEED, .count = 1, .func = seed_csr_rmw }, |
232 | }; |
233 | |
234 | /** |
235 | * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space |
236 | * emulation or in-kernel emulation |
237 | * |
238 | * @vcpu: The VCPU pointer |
239 | * @run: The VCPU run struct containing the CSR data |
240 | * |
241 | * Returns > 0 upon failure and 0 upon success |
242 | */ |
243 | int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run) |
244 | { |
245 | ulong insn; |
246 | |
247 | if (vcpu->arch.csr_decode.return_handled) |
248 | return 0; |
249 | vcpu->arch.csr_decode.return_handled = 1; |
250 | |
251 | /* Update destination register for CSR reads */ |
252 | insn = vcpu->arch.csr_decode.insn; |
253 | if ((insn >> SH_RD) & MASK_RX) |
254 | SET_RD(insn, &vcpu->arch.guest_context, |
255 | run->riscv_csr.ret_value); |
256 | |
257 | /* Move to next instruction */ |
258 | vcpu->arch.guest_context.sepc += INSN_LEN(insn); |
259 | |
260 | return 0; |
261 | } |
262 | |
263 | static int csr_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn) |
264 | { |
265 | int i, rc = KVM_INSN_ILLEGAL_TRAP; |
266 | unsigned int csr_num = insn >> SH_RS2; |
267 | unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX; |
268 | ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context); |
269 | const struct csr_func *tcfn, *cfn = NULL; |
270 | ulong val = 0, wr_mask = 0, new_val = 0; |
271 | |
272 | /* Decode the CSR instruction */ |
273 | switch (GET_FUNCT3(insn)) { |
274 | case GET_FUNCT3(INSN_MATCH_CSRRW): |
275 | wr_mask = -1UL; |
276 | new_val = rs1_val; |
277 | break; |
278 | case GET_FUNCT3(INSN_MATCH_CSRRS): |
279 | wr_mask = rs1_val; |
280 | new_val = -1UL; |
281 | break; |
282 | case GET_FUNCT3(INSN_MATCH_CSRRC): |
283 | wr_mask = rs1_val; |
284 | new_val = 0; |
285 | break; |
286 | case GET_FUNCT3(INSN_MATCH_CSRRWI): |
287 | wr_mask = -1UL; |
288 | new_val = rs1_num; |
289 | break; |
290 | case GET_FUNCT3(INSN_MATCH_CSRRSI): |
291 | wr_mask = rs1_num; |
292 | new_val = -1UL; |
293 | break; |
294 | case GET_FUNCT3(INSN_MATCH_CSRRCI): |
295 | wr_mask = rs1_num; |
296 | new_val = 0; |
297 | break; |
298 | default: |
299 | return rc; |
300 | } |
301 | |
302 | /* Save instruction decode info */ |
303 | vcpu->arch.csr_decode.insn = insn; |
304 | vcpu->arch.csr_decode.return_handled = 0; |
305 | |
306 | /* Update CSR details in kvm_run struct */ |
307 | run->riscv_csr.csr_num = csr_num; |
308 | run->riscv_csr.new_value = new_val; |
309 | run->riscv_csr.write_mask = wr_mask; |
310 | run->riscv_csr.ret_value = 0; |
311 | |
312 | /* Find in-kernel CSR function */ |
313 | for (i = 0; i < ARRAY_SIZE(csr_funcs); i++) { |
314 | tcfn = &csr_funcs[i]; |
315 | if ((tcfn->base <= csr_num) && |
316 | (csr_num < (tcfn->base + tcfn->count))) { |
317 | cfn = tcfn; |
318 | break; |
319 | } |
320 | } |
321 | |
322 | /* First try in-kernel CSR emulation */ |
323 | if (cfn && cfn->func) { |
324 | rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask); |
325 | if (rc > KVM_INSN_EXIT_TO_USER_SPACE) { |
326 | if (rc == KVM_INSN_CONTINUE_NEXT_SEPC) { |
327 | run->riscv_csr.ret_value = val; |
328 | vcpu->stat.csr_exit_kernel++; |
329 | kvm_riscv_vcpu_csr_return(vcpu, run); |
330 | rc = KVM_INSN_CONTINUE_SAME_SEPC; |
331 | } |
332 | return rc; |
333 | } |
334 | } |
335 | |
336 | /* Exit to user-space for CSR emulation */ |
337 | if (rc <= KVM_INSN_EXIT_TO_USER_SPACE) { |
338 | vcpu->stat.csr_exit_user++; |
339 | run->exit_reason = KVM_EXIT_RISCV_CSR; |
340 | } |
341 | |
342 | return rc; |
343 | } |
344 | |
345 | static const struct insn_func system_opcode_funcs[] = { |
346 | { |
347 | .mask = INSN_MASK_CSRRW, |
348 | .match = INSN_MATCH_CSRRW, |
349 | .func = csr_insn, |
350 | }, |
351 | { |
352 | .mask = INSN_MASK_CSRRS, |
353 | .match = INSN_MATCH_CSRRS, |
354 | .func = csr_insn, |
355 | }, |
356 | { |
357 | .mask = INSN_MASK_CSRRC, |
358 | .match = INSN_MATCH_CSRRC, |
359 | .func = csr_insn, |
360 | }, |
361 | { |
362 | .mask = INSN_MASK_CSRRWI, |
363 | .match = INSN_MATCH_CSRRWI, |
364 | .func = csr_insn, |
365 | }, |
366 | { |
367 | .mask = INSN_MASK_CSRRSI, |
368 | .match = INSN_MATCH_CSRRSI, |
369 | .func = csr_insn, |
370 | }, |
371 | { |
372 | .mask = INSN_MASK_CSRRCI, |
373 | .match = INSN_MATCH_CSRRCI, |
374 | .func = csr_insn, |
375 | }, |
376 | { |
377 | .mask = INSN_MASK_WFI, |
378 | .match = INSN_MATCH_WFI, |
379 | .func = wfi_insn, |
380 | }, |
381 | }; |
382 | |
383 | static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, |
384 | ulong insn) |
385 | { |
386 | int i, rc = KVM_INSN_ILLEGAL_TRAP; |
387 | const struct insn_func *ifn; |
388 | |
389 | for (i = 0; i < ARRAY_SIZE(system_opcode_funcs); i++) { |
390 | ifn = &system_opcode_funcs[i]; |
391 | if ((insn & ifn->mask) == ifn->match) { |
392 | rc = ifn->func(vcpu, run, insn); |
393 | break; |
394 | } |
395 | } |
396 | |
397 | switch (rc) { |
398 | case KVM_INSN_ILLEGAL_TRAP: |
399 | return truly_illegal_insn(vcpu, run, insn); |
400 | case KVM_INSN_VIRTUAL_TRAP: |
401 | return truly_virtual_insn(vcpu, run, insn); |
402 | case KVM_INSN_CONTINUE_NEXT_SEPC: |
403 | vcpu->arch.guest_context.sepc += INSN_LEN(insn); |
404 | break; |
405 | default: |
406 | break; |
407 | } |
408 | |
409 | return (rc <= 0) ? rc : 1; |
410 | } |
411 | |
412 | /** |
413 | * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap |
414 | * |
415 | * @vcpu: The VCPU pointer |
416 | * @run: The VCPU run struct containing the mmio data |
417 | * @trap: Trap details |
418 | * |
419 | * Returns > 0 to continue run-loop |
420 | * Returns 0 to exit run-loop and handle in user-space. |
421 | * Returns < 0 to report failure and exit run-loop |
422 | */ |
423 | int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, |
424 | struct kvm_cpu_trap *trap) |
425 | { |
426 | unsigned long insn = trap->stval; |
427 | struct kvm_cpu_trap utrap = { 0 }; |
428 | struct kvm_cpu_context *ct; |
429 | |
430 | if (unlikely(INSN_IS_16BIT(insn))) { |
431 | if (insn == 0) { |
432 | ct = &vcpu->arch.guest_context; |
433 | insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, |
434 | ct->sepc, |
435 | &utrap); |
436 | if (utrap.scause) { |
437 | utrap.sepc = ct->sepc; |
438 | kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); |
439 | return 1; |
440 | } |
441 | } |
442 | if (INSN_IS_16BIT(insn)) |
443 | return truly_illegal_insn(vcpu, run, insn); |
444 | } |
445 | |
446 | switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) { |
447 | case INSN_OPCODE_SYSTEM: |
448 | return system_opcode_insn(vcpu, run, insn); |
449 | default: |
450 | return truly_illegal_insn(vcpu, run, insn); |
451 | } |
452 | } |
453 | |
454 | /** |
455 | * kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction |
456 | * |
457 | * @vcpu: The VCPU pointer |
458 | * @run: The VCPU run struct containing the mmio data |
459 | * @fault_addr: Guest physical address to load |
460 | * @htinst: Transformed encoding of the load instruction |
461 | * |
462 | * Returns > 0 to continue run-loop |
463 | * Returns 0 to exit run-loop and handle in user-space. |
464 | * Returns < 0 to report failure and exit run-loop |
465 | */ |
466 | int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run, |
467 | unsigned long fault_addr, |
468 | unsigned long htinst) |
469 | { |
470 | u8 data_buf[8]; |
471 | unsigned long insn; |
472 | int shift = 0, len = 0, insn_len = 0; |
473 | struct kvm_cpu_trap utrap = { 0 }; |
474 | struct kvm_cpu_context *ct = &vcpu->arch.guest_context; |
475 | |
476 | /* Determine trapped instruction */ |
477 | if (htinst & 0x1) { |
478 | /* |
479 | * Bit[0] == 1 implies trapped instruction value is |
480 | * transformed instruction or custom instruction. |
481 | */ |
482 | insn = htinst | INSN_16BIT_MASK; |
483 | insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2; |
484 | } else { |
485 | /* |
486 | * Bit[0] == 0 implies trapped instruction value is |
487 | * zero or special value. |
488 | */ |
489 | insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, |
490 | &utrap); |
491 | if (utrap.scause) { |
492 | /* Redirect trap if we failed to read instruction */ |
493 | utrap.sepc = ct->sepc; |
494 | kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); |
495 | return 1; |
496 | } |
497 | insn_len = INSN_LEN(insn); |
498 | } |
499 | |
500 | /* Decode length of MMIO and shift */ |
501 | if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) { |
502 | len = 4; |
503 | shift = 8 * (sizeof(ulong) - len); |
504 | } else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) { |
505 | len = 1; |
506 | shift = 8 * (sizeof(ulong) - len); |
507 | } else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) { |
508 | len = 1; |
509 | shift = 8 * (sizeof(ulong) - len); |
510 | #ifdef CONFIG_64BIT |
511 | } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) { |
512 | len = 8; |
513 | shift = 8 * (sizeof(ulong) - len); |
514 | } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) { |
515 | len = 4; |
516 | #endif |
517 | } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) { |
518 | len = 2; |
519 | shift = 8 * (sizeof(ulong) - len); |
520 | } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) { |
521 | len = 2; |
522 | #ifdef CONFIG_64BIT |
523 | } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) { |
524 | len = 8; |
525 | shift = 8 * (sizeof(ulong) - len); |
526 | insn = RVC_RS2S(insn) << SH_RD; |
527 | } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP && |
528 | ((insn >> SH_RD) & 0x1f)) { |
529 | len = 8; |
530 | shift = 8 * (sizeof(ulong) - len); |
531 | #endif |
532 | } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) { |
533 | len = 4; |
534 | shift = 8 * (sizeof(ulong) - len); |
535 | insn = RVC_RS2S(insn) << SH_RD; |
536 | } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP && |
537 | ((insn >> SH_RD) & 0x1f)) { |
538 | len = 4; |
539 | shift = 8 * (sizeof(ulong) - len); |
540 | } else { |
541 | return -EOPNOTSUPP; |
542 | } |
543 | |
544 | /* Fault address should be aligned to length of MMIO */ |
545 | if (fault_addr & (len - 1)) |
546 | return -EIO; |
547 | |
548 | /* Save instruction decode info */ |
549 | vcpu->arch.mmio_decode.insn = insn; |
550 | vcpu->arch.mmio_decode.insn_len = insn_len; |
551 | vcpu->arch.mmio_decode.shift = shift; |
552 | vcpu->arch.mmio_decode.len = len; |
553 | vcpu->arch.mmio_decode.return_handled = 0; |
554 | |
555 | /* Update MMIO details in kvm_run struct */ |
556 | run->mmio.is_write = false; |
557 | run->mmio.phys_addr = fault_addr; |
558 | run->mmio.len = len; |
559 | |
560 | /* Try to handle MMIO access in the kernel */ |
561 | if (!kvm_io_bus_read(vcpu, bus_idx: KVM_MMIO_BUS, addr: fault_addr, len, val: data_buf)) { |
562 | /* Successfully handled MMIO access in the kernel so resume */ |
563 | memcpy(run->mmio.data, data_buf, len); |
564 | vcpu->stat.mmio_exit_kernel++; |
565 | kvm_riscv_vcpu_mmio_return(vcpu, run); |
566 | return 1; |
567 | } |
568 | |
569 | /* Exit to userspace for MMIO emulation */ |
570 | vcpu->stat.mmio_exit_user++; |
571 | run->exit_reason = KVM_EXIT_MMIO; |
572 | |
573 | return 0; |
574 | } |
575 | |
576 | /** |
577 | * kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction |
578 | * |
579 | * @vcpu: The VCPU pointer |
580 | * @run: The VCPU run struct containing the mmio data |
581 | * @fault_addr: Guest physical address to store |
582 | * @htinst: Transformed encoding of the store instruction |
583 | * |
584 | * Returns > 0 to continue run-loop |
585 | * Returns 0 to exit run-loop and handle in user-space. |
586 | * Returns < 0 to report failure and exit run-loop |
587 | */ |
588 | int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run, |
589 | unsigned long fault_addr, |
590 | unsigned long htinst) |
591 | { |
592 | u8 data8; |
593 | u16 data16; |
594 | u32 data32; |
595 | u64 data64; |
596 | ulong data; |
597 | unsigned long insn; |
598 | int len = 0, insn_len = 0; |
599 | struct kvm_cpu_trap utrap = { 0 }; |
600 | struct kvm_cpu_context *ct = &vcpu->arch.guest_context; |
601 | |
602 | /* Determine trapped instruction */ |
603 | if (htinst & 0x1) { |
604 | /* |
605 | * Bit[0] == 1 implies trapped instruction value is |
606 | * transformed instruction or custom instruction. |
607 | */ |
608 | insn = htinst | INSN_16BIT_MASK; |
609 | insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2; |
610 | } else { |
611 | /* |
612 | * Bit[0] == 0 implies trapped instruction value is |
613 | * zero or special value. |
614 | */ |
615 | insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, |
616 | &utrap); |
617 | if (utrap.scause) { |
618 | /* Redirect trap if we failed to read instruction */ |
619 | utrap.sepc = ct->sepc; |
620 | kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); |
621 | return 1; |
622 | } |
623 | insn_len = INSN_LEN(insn); |
624 | } |
625 | |
626 | data = GET_RS2(insn, &vcpu->arch.guest_context); |
627 | data8 = data16 = data32 = data64 = data; |
628 | |
629 | if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) { |
630 | len = 4; |
631 | } else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) { |
632 | len = 1; |
633 | #ifdef CONFIG_64BIT |
634 | } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) { |
635 | len = 8; |
636 | #endif |
637 | } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) { |
638 | len = 2; |
639 | #ifdef CONFIG_64BIT |
640 | } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) { |
641 | len = 8; |
642 | data64 = GET_RS2S(insn, &vcpu->arch.guest_context); |
643 | } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP && |
644 | ((insn >> SH_RD) & 0x1f)) { |
645 | len = 8; |
646 | data64 = GET_RS2C(insn, &vcpu->arch.guest_context); |
647 | #endif |
648 | } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) { |
649 | len = 4; |
650 | data32 = GET_RS2S(insn, &vcpu->arch.guest_context); |
651 | } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP && |
652 | ((insn >> SH_RD) & 0x1f)) { |
653 | len = 4; |
654 | data32 = GET_RS2C(insn, &vcpu->arch.guest_context); |
655 | } else { |
656 | return -EOPNOTSUPP; |
657 | } |
658 | |
659 | /* Fault address should be aligned to length of MMIO */ |
660 | if (fault_addr & (len - 1)) |
661 | return -EIO; |
662 | |
663 | /* Save instruction decode info */ |
664 | vcpu->arch.mmio_decode.insn = insn; |
665 | vcpu->arch.mmio_decode.insn_len = insn_len; |
666 | vcpu->arch.mmio_decode.shift = 0; |
667 | vcpu->arch.mmio_decode.len = len; |
668 | vcpu->arch.mmio_decode.return_handled = 0; |
669 | |
670 | /* Copy data to kvm_run instance */ |
671 | switch (len) { |
672 | case 1: |
673 | *((u8 *)run->mmio.data) = data8; |
674 | break; |
675 | case 2: |
676 | *((u16 *)run->mmio.data) = data16; |
677 | break; |
678 | case 4: |
679 | *((u32 *)run->mmio.data) = data32; |
680 | break; |
681 | case 8: |
682 | *((u64 *)run->mmio.data) = data64; |
683 | break; |
684 | default: |
685 | return -EOPNOTSUPP; |
686 | } |
687 | |
688 | /* Update MMIO details in kvm_run struct */ |
689 | run->mmio.is_write = true; |
690 | run->mmio.phys_addr = fault_addr; |
691 | run->mmio.len = len; |
692 | |
693 | /* Try to handle MMIO access in the kernel */ |
694 | if (!kvm_io_bus_write(vcpu, bus_idx: KVM_MMIO_BUS, |
695 | addr: fault_addr, len, val: run->mmio.data)) { |
696 | /* Successfully handled MMIO access in the kernel so resume */ |
697 | vcpu->stat.mmio_exit_kernel++; |
698 | kvm_riscv_vcpu_mmio_return(vcpu, run); |
699 | return 1; |
700 | } |
701 | |
702 | /* Exit to userspace for MMIO emulation */ |
703 | vcpu->stat.mmio_exit_user++; |
704 | run->exit_reason = KVM_EXIT_MMIO; |
705 | |
706 | return 0; |
707 | } |
708 | |
709 | /** |
710 | * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation |
711 | * or in-kernel IO emulation |
712 | * |
713 | * @vcpu: The VCPU pointer |
714 | * @run: The VCPU run struct containing the mmio data |
715 | */ |
716 | int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) |
717 | { |
718 | u8 data8; |
719 | u16 data16; |
720 | u32 data32; |
721 | u64 data64; |
722 | ulong insn; |
723 | int len, shift; |
724 | |
725 | if (vcpu->arch.mmio_decode.return_handled) |
726 | return 0; |
727 | |
728 | vcpu->arch.mmio_decode.return_handled = 1; |
729 | insn = vcpu->arch.mmio_decode.insn; |
730 | |
731 | if (run->mmio.is_write) |
732 | goto done; |
733 | |
734 | len = vcpu->arch.mmio_decode.len; |
735 | shift = vcpu->arch.mmio_decode.shift; |
736 | |
737 | switch (len) { |
738 | case 1: |
739 | data8 = *((u8 *)run->mmio.data); |
740 | SET_RD(insn, &vcpu->arch.guest_context, |
741 | (ulong)data8 << shift >> shift); |
742 | break; |
743 | case 2: |
744 | data16 = *((u16 *)run->mmio.data); |
745 | SET_RD(insn, &vcpu->arch.guest_context, |
746 | (ulong)data16 << shift >> shift); |
747 | break; |
748 | case 4: |
749 | data32 = *((u32 *)run->mmio.data); |
750 | SET_RD(insn, &vcpu->arch.guest_context, |
751 | (ulong)data32 << shift >> shift); |
752 | break; |
753 | case 8: |
754 | data64 = *((u64 *)run->mmio.data); |
755 | SET_RD(insn, &vcpu->arch.guest_context, |
756 | (ulong)data64 << shift >> shift); |
757 | break; |
758 | default: |
759 | return -EOPNOTSUPP; |
760 | } |
761 | |
762 | done: |
763 | /* Move to next instruction */ |
764 | vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len; |
765 | |
766 | return 0; |
767 | } |
768 | |