1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2020-2023 Loongson Technology Corporation Limited |
4 | */ |
5 | |
6 | #include <linux/kvm_host.h> |
7 | #include <linux/entry-kvm.h> |
8 | #include <asm/fpu.h> |
9 | #include <asm/loongarch.h> |
10 | #include <asm/setup.h> |
11 | #include <asm/time.h> |
12 | |
13 | #define CREATE_TRACE_POINTS |
14 | #include "trace.h" |
15 | |
16 | const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { |
17 | KVM_GENERIC_VCPU_STATS(), |
18 | STATS_DESC_COUNTER(VCPU, int_exits), |
19 | STATS_DESC_COUNTER(VCPU, idle_exits), |
20 | STATS_DESC_COUNTER(VCPU, cpucfg_exits), |
21 | STATS_DESC_COUNTER(VCPU, signal_exits), |
22 | }; |
23 | |
24 | const struct kvm_stats_header = { |
25 | .name_size = KVM_STATS_NAME_SIZE, |
26 | .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), |
27 | .id_offset = sizeof(struct kvm_stats_header), |
28 | .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, |
29 | .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + |
30 | sizeof(kvm_vcpu_stats_desc), |
31 | }; |
32 | |
33 | /* |
34 | * kvm_check_requests - check and handle pending vCPU requests |
35 | * |
36 | * Return: RESUME_GUEST if we should enter the guest |
37 | * RESUME_HOST if we should exit to userspace |
38 | */ |
39 | static int kvm_check_requests(struct kvm_vcpu *vcpu) |
40 | { |
41 | if (!kvm_request_pending(vcpu)) |
42 | return RESUME_GUEST; |
43 | |
44 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) |
45 | vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */ |
46 | |
47 | if (kvm_dirty_ring_check_request(vcpu)) |
48 | return RESUME_HOST; |
49 | |
50 | return RESUME_GUEST; |
51 | } |
52 | |
53 | /* |
54 | * Check and handle pending signal and vCPU requests etc |
55 | * Run with irq enabled and preempt enabled |
56 | * |
57 | * Return: RESUME_GUEST if we should enter the guest |
58 | * RESUME_HOST if we should exit to userspace |
59 | * < 0 if we should exit to userspace, where the return value |
60 | * indicates an error |
61 | */ |
62 | static int kvm_enter_guest_check(struct kvm_vcpu *vcpu) |
63 | { |
64 | int ret; |
65 | |
66 | /* |
67 | * Check conditions before entering the guest |
68 | */ |
69 | ret = xfer_to_guest_mode_handle_work(vcpu); |
70 | if (ret < 0) |
71 | return ret; |
72 | |
73 | ret = kvm_check_requests(vcpu); |
74 | |
75 | return ret; |
76 | } |
77 | |
78 | /* |
79 | * Called with irq enabled |
80 | * |
81 | * Return: RESUME_GUEST if we should enter the guest, and irq disabled |
82 | * Others if we should exit to userspace |
83 | */ |
84 | static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu) |
85 | { |
86 | int ret; |
87 | |
88 | do { |
89 | ret = kvm_enter_guest_check(vcpu); |
90 | if (ret != RESUME_GUEST) |
91 | break; |
92 | |
93 | /* |
94 | * Handle vcpu timer, interrupts, check requests and |
95 | * check vmid before vcpu enter guest |
96 | */ |
97 | local_irq_disable(); |
98 | kvm_deliver_intr(vcpu); |
99 | kvm_deliver_exception(vcpu); |
100 | /* Make sure the vcpu mode has been written */ |
101 | smp_store_mb(vcpu->mode, IN_GUEST_MODE); |
102 | kvm_check_vpid(vcpu); |
103 | vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); |
104 | /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */ |
105 | vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; |
106 | |
107 | if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) { |
108 | /* make sure the vcpu mode has been written */ |
109 | smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE); |
110 | local_irq_enable(); |
111 | ret = -EAGAIN; |
112 | } |
113 | } while (ret != RESUME_GUEST); |
114 | |
115 | return ret; |
116 | } |
117 | |
118 | /* |
119 | * Return 1 for resume guest and "<= 0" for resume host. |
120 | */ |
121 | static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) |
122 | { |
123 | int ret = RESUME_GUEST; |
124 | unsigned long estat = vcpu->arch.host_estat; |
125 | u32 intr = estat & 0x1fff; /* Ignore NMI */ |
126 | u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; |
127 | |
128 | vcpu->mode = OUTSIDE_GUEST_MODE; |
129 | |
130 | /* Set a default exit reason */ |
131 | run->exit_reason = KVM_EXIT_UNKNOWN; |
132 | |
133 | guest_timing_exit_irqoff(); |
134 | guest_state_exit_irqoff(); |
135 | local_irq_enable(); |
136 | |
137 | trace_kvm_exit(vcpu, reason: ecode); |
138 | if (ecode) { |
139 | ret = kvm_handle_fault(vcpu, ecode); |
140 | } else { |
141 | WARN(!intr, "vm exiting with suspicious irq\n" ); |
142 | ++vcpu->stat.int_exits; |
143 | } |
144 | |
145 | if (ret == RESUME_GUEST) |
146 | ret = kvm_pre_enter_guest(vcpu); |
147 | |
148 | if (ret != RESUME_GUEST) { |
149 | local_irq_disable(); |
150 | return ret; |
151 | } |
152 | |
153 | guest_timing_enter_irqoff(); |
154 | guest_state_enter_irqoff(); |
155 | trace_kvm_reenter(vcpu); |
156 | |
157 | return RESUME_GUEST; |
158 | } |
159 | |
160 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
161 | { |
162 | return !!(vcpu->arch.irq_pending) && |
163 | vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE; |
164 | } |
165 | |
166 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
167 | { |
168 | return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; |
169 | } |
170 | |
171 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) |
172 | { |
173 | return false; |
174 | } |
175 | |
176 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
177 | { |
178 | return VM_FAULT_SIGBUS; |
179 | } |
180 | |
181 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
182 | struct kvm_translation *tr) |
183 | { |
184 | return -EINVAL; |
185 | } |
186 | |
187 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
188 | { |
189 | int ret; |
190 | |
191 | /* Protect from TOD sync and vcpu_load/put() */ |
192 | preempt_disable(); |
193 | ret = kvm_pending_timer(vcpu) || |
194 | kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI); |
195 | preempt_enable(); |
196 | |
197 | return ret; |
198 | } |
199 | |
200 | int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) |
201 | { |
202 | int i; |
203 | |
204 | kvm_debug("vCPU Register Dump:\n" ); |
205 | kvm_debug("\tPC = 0x%08lx\n" , vcpu->arch.pc); |
206 | kvm_debug("\tExceptions: %08lx\n" , vcpu->arch.irq_pending); |
207 | |
208 | for (i = 0; i < 32; i += 4) { |
209 | kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n" , i, |
210 | vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], |
211 | vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); |
212 | } |
213 | |
214 | kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n" , |
215 | kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD), |
216 | kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT)); |
217 | |
218 | kvm_debug("\tERA: 0x%08lx\n" , kvm_read_hw_gcsr(LOONGARCH_CSR_ERA)); |
219 | |
220 | return 0; |
221 | } |
222 | |
223 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
224 | struct kvm_mp_state *mp_state) |
225 | { |
226 | *mp_state = vcpu->arch.mp_state; |
227 | |
228 | return 0; |
229 | } |
230 | |
231 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
232 | struct kvm_mp_state *mp_state) |
233 | { |
234 | int ret = 0; |
235 | |
236 | switch (mp_state->mp_state) { |
237 | case KVM_MP_STATE_RUNNABLE: |
238 | vcpu->arch.mp_state = *mp_state; |
239 | break; |
240 | default: |
241 | ret = -EINVAL; |
242 | } |
243 | |
244 | return ret; |
245 | } |
246 | |
247 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
248 | struct kvm_guest_debug *dbg) |
249 | { |
250 | return -EINVAL; |
251 | } |
252 | |
253 | static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) |
254 | { |
255 | unsigned long gintc; |
256 | struct loongarch_csrs *csr = vcpu->arch.csr; |
257 | |
258 | if (get_gcsr_flag(id) & INVALID_GCSR) |
259 | return -EINVAL; |
260 | |
261 | if (id == LOONGARCH_CSR_ESTAT) { |
262 | /* ESTAT IP0~IP7 get from GINTC */ |
263 | gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff; |
264 | *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2); |
265 | return 0; |
266 | } |
267 | |
268 | /* |
269 | * Get software CSR state since software state is consistent |
270 | * with hardware for synchronous ioctl |
271 | */ |
272 | *val = kvm_read_sw_gcsr(csr, id); |
273 | |
274 | return 0; |
275 | } |
276 | |
277 | static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) |
278 | { |
279 | int ret = 0, gintc; |
280 | struct loongarch_csrs *csr = vcpu->arch.csr; |
281 | |
282 | if (get_gcsr_flag(id) & INVALID_GCSR) |
283 | return -EINVAL; |
284 | |
285 | if (id == LOONGARCH_CSR_ESTAT) { |
286 | /* ESTAT IP0~IP7 inject through GINTC */ |
287 | gintc = (val >> 2) & 0xff; |
288 | kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc); |
289 | |
290 | gintc = val & ~(0xffUL << 2); |
291 | kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc); |
292 | |
293 | return ret; |
294 | } |
295 | |
296 | kvm_write_sw_gcsr(csr, id, val); |
297 | |
298 | return ret; |
299 | } |
300 | |
301 | static int _kvm_get_cpucfg_mask(int id, u64 *v) |
302 | { |
303 | if (id < 0 || id >= KVM_MAX_CPUCFG_REGS) |
304 | return -EINVAL; |
305 | |
306 | switch (id) { |
307 | case LOONGARCH_CPUCFG0: |
308 | *v = GENMASK(31, 0); |
309 | return 0; |
310 | case LOONGARCH_CPUCFG1: |
311 | /* CPUCFG1_MSGINT is not supported by KVM */ |
312 | *v = GENMASK(25, 0); |
313 | return 0; |
314 | case LOONGARCH_CPUCFG2: |
315 | /* CPUCFG2 features unconditionally supported by KVM */ |
316 | *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP | |
317 | CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV | |
318 | CPUCFG2_LSPW | CPUCFG2_LAM; |
319 | /* |
320 | * For the ISA extensions listed below, if one is supported |
321 | * by the host, then it is also supported by KVM. |
322 | */ |
323 | if (cpu_has_lsx) |
324 | *v |= CPUCFG2_LSX; |
325 | if (cpu_has_lasx) |
326 | *v |= CPUCFG2_LASX; |
327 | |
328 | return 0; |
329 | case LOONGARCH_CPUCFG3: |
330 | *v = GENMASK(16, 0); |
331 | return 0; |
332 | case LOONGARCH_CPUCFG4: |
333 | case LOONGARCH_CPUCFG5: |
334 | *v = GENMASK(31, 0); |
335 | return 0; |
336 | case LOONGARCH_CPUCFG16: |
337 | *v = GENMASK(16, 0); |
338 | return 0; |
339 | case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20: |
340 | *v = GENMASK(30, 0); |
341 | return 0; |
342 | default: |
343 | /* |
344 | * CPUCFG bits should be zero if reserved by HW or not |
345 | * supported by KVM. |
346 | */ |
347 | *v = 0; |
348 | return 0; |
349 | } |
350 | } |
351 | |
352 | static int kvm_check_cpucfg(int id, u64 val) |
353 | { |
354 | int ret; |
355 | u64 mask = 0; |
356 | |
357 | ret = _kvm_get_cpucfg_mask(id, v: &mask); |
358 | if (ret) |
359 | return ret; |
360 | |
361 | if (val & ~mask) |
362 | /* Unsupported features and/or the higher 32 bits should not be set */ |
363 | return -EINVAL; |
364 | |
365 | switch (id) { |
366 | case LOONGARCH_CPUCFG2: |
367 | if (!(val & CPUCFG2_LLFTP)) |
368 | /* Guests must have a constant timer */ |
369 | return -EINVAL; |
370 | if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP))) |
371 | /* Single and double float point must both be set when FP is enabled */ |
372 | return -EINVAL; |
373 | if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP)) |
374 | /* LSX architecturally implies FP but val does not satisfy that */ |
375 | return -EINVAL; |
376 | if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX)) |
377 | /* LASX architecturally implies LSX and FP but val does not satisfy that */ |
378 | return -EINVAL; |
379 | return 0; |
380 | default: |
381 | /* |
382 | * Values for the other CPUCFG IDs are not being further validated |
383 | * besides the mask check above. |
384 | */ |
385 | return 0; |
386 | } |
387 | } |
388 | |
389 | static int kvm_get_one_reg(struct kvm_vcpu *vcpu, |
390 | const struct kvm_one_reg *reg, u64 *v) |
391 | { |
392 | int id, ret = 0; |
393 | u64 type = reg->id & KVM_REG_LOONGARCH_MASK; |
394 | |
395 | switch (type) { |
396 | case KVM_REG_LOONGARCH_CSR: |
397 | id = KVM_GET_IOC_CSR_IDX(reg->id); |
398 | ret = _kvm_getcsr(vcpu, id, val: v); |
399 | break; |
400 | case KVM_REG_LOONGARCH_CPUCFG: |
401 | id = KVM_GET_IOC_CPUCFG_IDX(reg->id); |
402 | if (id >= 0 && id < KVM_MAX_CPUCFG_REGS) |
403 | *v = vcpu->arch.cpucfg[id]; |
404 | else |
405 | ret = -EINVAL; |
406 | break; |
407 | case KVM_REG_LOONGARCH_KVM: |
408 | switch (reg->id) { |
409 | case KVM_REG_LOONGARCH_COUNTER: |
410 | *v = drdtime() + vcpu->kvm->arch.time_offset; |
411 | break; |
412 | default: |
413 | ret = -EINVAL; |
414 | break; |
415 | } |
416 | break; |
417 | default: |
418 | ret = -EINVAL; |
419 | break; |
420 | } |
421 | |
422 | return ret; |
423 | } |
424 | |
425 | static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
426 | { |
427 | int ret = 0; |
428 | u64 v, size = reg->id & KVM_REG_SIZE_MASK; |
429 | |
430 | switch (size) { |
431 | case KVM_REG_SIZE_U64: |
432 | ret = kvm_get_one_reg(vcpu, reg, v: &v); |
433 | if (ret) |
434 | return ret; |
435 | ret = put_user(v, (u64 __user *)(long)reg->addr); |
436 | break; |
437 | default: |
438 | ret = -EINVAL; |
439 | break; |
440 | } |
441 | |
442 | return ret; |
443 | } |
444 | |
445 | static int kvm_set_one_reg(struct kvm_vcpu *vcpu, |
446 | const struct kvm_one_reg *reg, u64 v) |
447 | { |
448 | int id, ret = 0; |
449 | u64 type = reg->id & KVM_REG_LOONGARCH_MASK; |
450 | |
451 | switch (type) { |
452 | case KVM_REG_LOONGARCH_CSR: |
453 | id = KVM_GET_IOC_CSR_IDX(reg->id); |
454 | ret = _kvm_setcsr(vcpu, id, val: v); |
455 | break; |
456 | case KVM_REG_LOONGARCH_CPUCFG: |
457 | id = KVM_GET_IOC_CPUCFG_IDX(reg->id); |
458 | ret = kvm_check_cpucfg(id, val: v); |
459 | if (ret) |
460 | break; |
461 | vcpu->arch.cpucfg[id] = (u32)v; |
462 | break; |
463 | case KVM_REG_LOONGARCH_KVM: |
464 | switch (reg->id) { |
465 | case KVM_REG_LOONGARCH_COUNTER: |
466 | /* |
467 | * gftoffset is relative with board, not vcpu |
468 | * only set for the first time for smp system |
469 | */ |
470 | if (vcpu->vcpu_id == 0) |
471 | vcpu->kvm->arch.time_offset = (signed long)(v - drdtime()); |
472 | break; |
473 | case KVM_REG_LOONGARCH_VCPU_RESET: |
474 | kvm_reset_timer(vcpu); |
475 | memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); |
476 | memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); |
477 | break; |
478 | default: |
479 | ret = -EINVAL; |
480 | break; |
481 | } |
482 | break; |
483 | default: |
484 | ret = -EINVAL; |
485 | break; |
486 | } |
487 | |
488 | return ret; |
489 | } |
490 | |
491 | static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
492 | { |
493 | int ret = 0; |
494 | u64 v, size = reg->id & KVM_REG_SIZE_MASK; |
495 | |
496 | switch (size) { |
497 | case KVM_REG_SIZE_U64: |
498 | ret = get_user(v, (u64 __user *)(long)reg->addr); |
499 | if (ret) |
500 | return ret; |
501 | break; |
502 | default: |
503 | return -EINVAL; |
504 | } |
505 | |
506 | return kvm_set_one_reg(vcpu, reg, v); |
507 | } |
508 | |
509 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) |
510 | { |
511 | return -ENOIOCTLCMD; |
512 | } |
513 | |
514 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) |
515 | { |
516 | return -ENOIOCTLCMD; |
517 | } |
518 | |
519 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
520 | { |
521 | int i; |
522 | |
523 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) |
524 | regs->gpr[i] = vcpu->arch.gprs[i]; |
525 | |
526 | regs->pc = vcpu->arch.pc; |
527 | |
528 | return 0; |
529 | } |
530 | |
531 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
532 | { |
533 | int i; |
534 | |
535 | for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) |
536 | vcpu->arch.gprs[i] = regs->gpr[i]; |
537 | |
538 | vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ |
539 | vcpu->arch.pc = regs->pc; |
540 | |
541 | return 0; |
542 | } |
543 | |
544 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
545 | struct kvm_enable_cap *cap) |
546 | { |
547 | /* FPU is enabled by default, will support LSX/LASX later. */ |
548 | return -EINVAL; |
549 | } |
550 | |
551 | static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu, |
552 | struct kvm_device_attr *attr) |
553 | { |
554 | switch (attr->attr) { |
555 | case 2: |
556 | return 0; |
557 | default: |
558 | return -ENXIO; |
559 | } |
560 | |
561 | return -ENXIO; |
562 | } |
563 | |
564 | static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu, |
565 | struct kvm_device_attr *attr) |
566 | { |
567 | int ret = -ENXIO; |
568 | |
569 | switch (attr->group) { |
570 | case KVM_LOONGARCH_VCPU_CPUCFG: |
571 | ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr); |
572 | break; |
573 | default: |
574 | break; |
575 | } |
576 | |
577 | return ret; |
578 | } |
579 | |
580 | static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu, |
581 | struct kvm_device_attr *attr) |
582 | { |
583 | int ret = 0; |
584 | uint64_t val; |
585 | uint64_t __user *uaddr = (uint64_t __user *)attr->addr; |
586 | |
587 | ret = _kvm_get_cpucfg_mask(id: attr->attr, v: &val); |
588 | if (ret) |
589 | return ret; |
590 | |
591 | put_user(val, uaddr); |
592 | |
593 | return ret; |
594 | } |
595 | |
596 | static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu, |
597 | struct kvm_device_attr *attr) |
598 | { |
599 | int ret = -ENXIO; |
600 | |
601 | switch (attr->group) { |
602 | case KVM_LOONGARCH_VCPU_CPUCFG: |
603 | ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr); |
604 | break; |
605 | default: |
606 | break; |
607 | } |
608 | |
609 | return ret; |
610 | } |
611 | |
612 | static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu, |
613 | struct kvm_device_attr *attr) |
614 | { |
615 | return -ENXIO; |
616 | } |
617 | |
618 | static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu, |
619 | struct kvm_device_attr *attr) |
620 | { |
621 | int ret = -ENXIO; |
622 | |
623 | switch (attr->group) { |
624 | case KVM_LOONGARCH_VCPU_CPUCFG: |
625 | ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr); |
626 | break; |
627 | default: |
628 | break; |
629 | } |
630 | |
631 | return ret; |
632 | } |
633 | |
634 | long kvm_arch_vcpu_ioctl(struct file *filp, |
635 | unsigned int ioctl, unsigned long arg) |
636 | { |
637 | long r; |
638 | struct kvm_device_attr attr; |
639 | void __user *argp = (void __user *)arg; |
640 | struct kvm_vcpu *vcpu = filp->private_data; |
641 | |
642 | /* |
643 | * Only software CSR should be modified |
644 | * |
645 | * If any hardware CSR register is modified, vcpu_load/vcpu_put pair |
646 | * should be used. Since CSR registers owns by this vcpu, if switch |
647 | * to other vcpus, other vcpus need reload CSR registers. |
648 | * |
649 | * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should |
650 | * be clear in vcpu->arch.aux_inuse, and vcpu_load will check |
651 | * aux_inuse flag and reload CSR registers form software. |
652 | */ |
653 | |
654 | switch (ioctl) { |
655 | case KVM_SET_ONE_REG: |
656 | case KVM_GET_ONE_REG: { |
657 | struct kvm_one_reg reg; |
658 | |
659 | r = -EFAULT; |
660 | if (copy_from_user(to: ®, from: argp, n: sizeof(reg))) |
661 | break; |
662 | if (ioctl == KVM_SET_ONE_REG) { |
663 | r = kvm_set_reg(vcpu, reg: ®); |
664 | vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; |
665 | } else |
666 | r = kvm_get_reg(vcpu, reg: ®); |
667 | break; |
668 | } |
669 | case KVM_ENABLE_CAP: { |
670 | struct kvm_enable_cap cap; |
671 | |
672 | r = -EFAULT; |
673 | if (copy_from_user(to: &cap, from: argp, n: sizeof(cap))) |
674 | break; |
675 | r = kvm_vcpu_ioctl_enable_cap(vcpu, cap: &cap); |
676 | break; |
677 | } |
678 | case KVM_HAS_DEVICE_ATTR: { |
679 | r = -EFAULT; |
680 | if (copy_from_user(to: &attr, from: argp, n: sizeof(attr))) |
681 | break; |
682 | r = kvm_loongarch_vcpu_has_attr(vcpu, attr: &attr); |
683 | break; |
684 | } |
685 | case KVM_GET_DEVICE_ATTR: { |
686 | r = -EFAULT; |
687 | if (copy_from_user(to: &attr, from: argp, n: sizeof(attr))) |
688 | break; |
689 | r = kvm_loongarch_vcpu_get_attr(vcpu, attr: &attr); |
690 | break; |
691 | } |
692 | case KVM_SET_DEVICE_ATTR: { |
693 | r = -EFAULT; |
694 | if (copy_from_user(to: &attr, from: argp, n: sizeof(attr))) |
695 | break; |
696 | r = kvm_loongarch_vcpu_set_attr(vcpu, attr: &attr); |
697 | break; |
698 | } |
699 | default: |
700 | r = -ENOIOCTLCMD; |
701 | break; |
702 | } |
703 | |
704 | return r; |
705 | } |
706 | |
707 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
708 | { |
709 | int i = 0; |
710 | |
711 | fpu->fcc = vcpu->arch.fpu.fcc; |
712 | fpu->fcsr = vcpu->arch.fpu.fcsr; |
713 | for (i = 0; i < NUM_FPU_REGS; i++) |
714 | memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64); |
715 | |
716 | return 0; |
717 | } |
718 | |
719 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
720 | { |
721 | int i = 0; |
722 | |
723 | vcpu->arch.fpu.fcc = fpu->fcc; |
724 | vcpu->arch.fpu.fcsr = fpu->fcsr; |
725 | for (i = 0; i < NUM_FPU_REGS; i++) |
726 | memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64); |
727 | |
728 | return 0; |
729 | } |
730 | |
731 | /* Enable FPU and restore context */ |
732 | void kvm_own_fpu(struct kvm_vcpu *vcpu) |
733 | { |
734 | preempt_disable(); |
735 | |
736 | /* Enable FPU */ |
737 | set_csr_euen(CSR_EUEN_FPEN); |
738 | |
739 | kvm_restore_fpu(&vcpu->arch.fpu); |
740 | vcpu->arch.aux_inuse |= KVM_LARCH_FPU; |
741 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); |
742 | |
743 | preempt_enable(); |
744 | } |
745 | |
746 | #ifdef CONFIG_CPU_HAS_LSX |
747 | /* Enable LSX and restore context */ |
748 | int kvm_own_lsx(struct kvm_vcpu *vcpu) |
749 | { |
750 | if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch)) |
751 | return -EINVAL; |
752 | |
753 | preempt_disable(); |
754 | |
755 | /* Enable LSX for guest */ |
756 | set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN); |
757 | switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { |
758 | case KVM_LARCH_FPU: |
759 | /* |
760 | * Guest FPU state already loaded, |
761 | * only restore upper LSX state |
762 | */ |
763 | _restore_lsx_upper(&vcpu->arch.fpu); |
764 | break; |
765 | default: |
766 | /* Neither FP or LSX already active, |
767 | * restore full LSX state |
768 | */ |
769 | kvm_restore_lsx(&vcpu->arch.fpu); |
770 | break; |
771 | } |
772 | |
773 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX); |
774 | vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU; |
775 | preempt_enable(); |
776 | |
777 | return 0; |
778 | } |
779 | #endif |
780 | |
781 | #ifdef CONFIG_CPU_HAS_LASX |
782 | /* Enable LASX and restore context */ |
783 | int kvm_own_lasx(struct kvm_vcpu *vcpu) |
784 | { |
785 | if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch)) |
786 | return -EINVAL; |
787 | |
788 | preempt_disable(); |
789 | |
790 | set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); |
791 | switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { |
792 | case KVM_LARCH_LSX: |
793 | case KVM_LARCH_LSX | KVM_LARCH_FPU: |
794 | /* Guest LSX state already loaded, only restore upper LASX state */ |
795 | _restore_lasx_upper(&vcpu->arch.fpu); |
796 | break; |
797 | case KVM_LARCH_FPU: |
798 | /* Guest FP state already loaded, only restore upper LSX & LASX state */ |
799 | _restore_lsx_upper(&vcpu->arch.fpu); |
800 | _restore_lasx_upper(&vcpu->arch.fpu); |
801 | break; |
802 | default: |
803 | /* Neither FP or LSX already active, restore full LASX state */ |
804 | kvm_restore_lasx(&vcpu->arch.fpu); |
805 | break; |
806 | } |
807 | |
808 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX); |
809 | vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU; |
810 | preempt_enable(); |
811 | |
812 | return 0; |
813 | } |
814 | #endif |
815 | |
816 | /* Save context and disable FPU */ |
817 | void kvm_lose_fpu(struct kvm_vcpu *vcpu) |
818 | { |
819 | preempt_disable(); |
820 | |
821 | if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) { |
822 | kvm_save_lasx(&vcpu->arch.fpu); |
823 | vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX); |
824 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX); |
825 | |
826 | /* Disable LASX & LSX & FPU */ |
827 | clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); |
828 | } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { |
829 | kvm_save_lsx(&vcpu->arch.fpu); |
830 | vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU); |
831 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX); |
832 | |
833 | /* Disable LSX & FPU */ |
834 | clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN); |
835 | } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { |
836 | kvm_save_fpu(&vcpu->arch.fpu); |
837 | vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU; |
838 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); |
839 | |
840 | /* Disable FPU */ |
841 | clear_csr_euen(CSR_EUEN_FPEN); |
842 | } |
843 | |
844 | preempt_enable(); |
845 | } |
846 | |
847 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) |
848 | { |
849 | int intr = (int)irq->irq; |
850 | |
851 | if (intr > 0) |
852 | kvm_queue_irq(vcpu, intr); |
853 | else if (intr < 0) |
854 | kvm_dequeue_irq(vcpu, -intr); |
855 | else { |
856 | kvm_err("%s: invalid interrupt ioctl %d\n" , __func__, irq->irq); |
857 | return -EINVAL; |
858 | } |
859 | |
860 | kvm_vcpu_kick(vcpu); |
861 | |
862 | return 0; |
863 | } |
864 | |
865 | long kvm_arch_vcpu_async_ioctl(struct file *filp, |
866 | unsigned int ioctl, unsigned long arg) |
867 | { |
868 | void __user *argp = (void __user *)arg; |
869 | struct kvm_vcpu *vcpu = filp->private_data; |
870 | |
871 | if (ioctl == KVM_INTERRUPT) { |
872 | struct kvm_interrupt irq; |
873 | |
874 | if (copy_from_user(to: &irq, from: argp, n: sizeof(irq))) |
875 | return -EFAULT; |
876 | |
877 | kvm_debug("[%d] %s: irq: %d\n" , vcpu->vcpu_id, __func__, irq.irq); |
878 | |
879 | return kvm_vcpu_ioctl_interrupt(vcpu, irq: &irq); |
880 | } |
881 | |
882 | return -ENOIOCTLCMD; |
883 | } |
884 | |
885 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) |
886 | { |
887 | return 0; |
888 | } |
889 | |
890 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) |
891 | { |
892 | unsigned long timer_hz; |
893 | struct loongarch_csrs *csr; |
894 | |
895 | vcpu->arch.vpid = 0; |
896 | |
897 | hrtimer_init(timer: &vcpu->arch.swtimer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_ABS_PINNED); |
898 | vcpu->arch.swtimer.function = kvm_swtimer_wakeup; |
899 | |
900 | vcpu->arch.handle_exit = kvm_handle_exit; |
901 | vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry; |
902 | vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL); |
903 | if (!vcpu->arch.csr) |
904 | return -ENOMEM; |
905 | |
906 | /* |
907 | * All kvm exceptions share one exception entry, and host <-> guest |
908 | * switch also switch ECFG.VS field, keep host ECFG.VS info here. |
909 | */ |
910 | vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS); |
911 | |
912 | /* Init */ |
913 | vcpu->arch.last_sched_cpu = -1; |
914 | |
915 | /* |
916 | * Initialize guest register state to valid architectural reset state. |
917 | */ |
918 | timer_hz = calc_const_freq(); |
919 | kvm_init_timer(vcpu, timer_hz); |
920 | |
921 | /* Set Initialize mode for guest */ |
922 | csr = vcpu->arch.csr; |
923 | kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA); |
924 | |
925 | /* Set cpuid */ |
926 | kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id); |
927 | |
928 | /* Start with no pending virtual guest interrupts */ |
929 | csr->csrs[LOONGARCH_CSR_GINTC] = 0; |
930 | |
931 | return 0; |
932 | } |
933 | |
934 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
935 | { |
936 | } |
937 | |
938 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
939 | { |
940 | int cpu; |
941 | struct kvm_context *context; |
942 | |
943 | hrtimer_cancel(timer: &vcpu->arch.swtimer); |
944 | kvm_mmu_free_memory_cache(mc: &vcpu->arch.mmu_page_cache); |
945 | kfree(objp: vcpu->arch.csr); |
946 | |
947 | /* |
948 | * If the vCPU is freed and reused as another vCPU, we don't want the |
949 | * matching pointer wrongly hanging around in last_vcpu. |
950 | */ |
951 | for_each_possible_cpu(cpu) { |
952 | context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); |
953 | if (context->last_vcpu == vcpu) |
954 | context->last_vcpu = NULL; |
955 | } |
956 | } |
957 | |
958 | static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
959 | { |
960 | bool migrated; |
961 | struct kvm_context *context; |
962 | struct loongarch_csrs *csr = vcpu->arch.csr; |
963 | |
964 | /* |
965 | * Have we migrated to a different CPU? |
966 | * If so, any old guest TLB state may be stale. |
967 | */ |
968 | migrated = (vcpu->arch.last_sched_cpu != cpu); |
969 | |
970 | /* |
971 | * Was this the last vCPU to run on this CPU? |
972 | * If not, any old guest state from this vCPU will have been clobbered. |
973 | */ |
974 | context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); |
975 | if (migrated || (context->last_vcpu != vcpu)) |
976 | vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; |
977 | context->last_vcpu = vcpu; |
978 | |
979 | /* Restore timer state regardless */ |
980 | kvm_restore_timer(vcpu); |
981 | |
982 | /* Control guest page CCA attribute */ |
983 | change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT); |
984 | |
985 | /* Don't bother restoring registers multiple times unless necessary */ |
986 | if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) |
987 | return 0; |
988 | |
989 | write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset); |
990 | |
991 | /* Restore guest CSR registers */ |
992 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD); |
993 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD); |
994 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN); |
995 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC); |
996 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG); |
997 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA); |
998 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV); |
999 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI); |
1000 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY); |
1001 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX); |
1002 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI); |
1003 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0); |
1004 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1); |
1005 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID); |
1006 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL); |
1007 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH); |
1008 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0); |
1009 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1); |
1010 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE); |
1011 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG); |
1012 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID); |
1013 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0); |
1014 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1); |
1015 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2); |
1016 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3); |
1017 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4); |
1018 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5); |
1019 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6); |
1020 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7); |
1021 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID); |
1022 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC); |
1023 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY); |
1024 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV); |
1025 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA); |
1026 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE); |
1027 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0); |
1028 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1); |
1029 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI); |
1030 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD); |
1031 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0); |
1032 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1); |
1033 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2); |
1034 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3); |
1035 | kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL); |
1036 | |
1037 | /* Restore Root.GINTC from unused Guest.GINTC register */ |
1038 | write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]); |
1039 | |
1040 | /* |
1041 | * We should clear linked load bit to break interrupted atomics. This |
1042 | * prevents a SC on the next vCPU from succeeding by matching a LL on |
1043 | * the previous vCPU. |
1044 | */ |
1045 | if (vcpu->kvm->created_vcpus > 1) |
1046 | set_gcsr_llbctl(CSR_LLBCTL_WCLLB); |
1047 | |
1048 | vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE; |
1049 | |
1050 | return 0; |
1051 | } |
1052 | |
1053 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
1054 | { |
1055 | unsigned long flags; |
1056 | |
1057 | local_irq_save(flags); |
1058 | /* Restore guest state to registers */ |
1059 | _kvm_vcpu_load(vcpu, cpu); |
1060 | local_irq_restore(flags); |
1061 | } |
1062 | |
1063 | static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu) |
1064 | { |
1065 | struct loongarch_csrs *csr = vcpu->arch.csr; |
1066 | |
1067 | kvm_lose_fpu(vcpu); |
1068 | |
1069 | /* |
1070 | * Update CSR state from hardware if software CSR state is stale, |
1071 | * most CSR registers are kept unchanged during process context |
1072 | * switch except CSR registers like remaining timer tick value and |
1073 | * injected interrupt state. |
1074 | */ |
1075 | if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST) |
1076 | goto out; |
1077 | |
1078 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD); |
1079 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD); |
1080 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN); |
1081 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC); |
1082 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG); |
1083 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA); |
1084 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV); |
1085 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI); |
1086 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY); |
1087 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX); |
1088 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI); |
1089 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0); |
1090 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1); |
1091 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID); |
1092 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL); |
1093 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH); |
1094 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0); |
1095 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1); |
1096 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE); |
1097 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG); |
1098 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID); |
1099 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1); |
1100 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2); |
1101 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3); |
1102 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0); |
1103 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1); |
1104 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2); |
1105 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3); |
1106 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4); |
1107 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5); |
1108 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6); |
1109 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7); |
1110 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID); |
1111 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC); |
1112 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL); |
1113 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY); |
1114 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV); |
1115 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA); |
1116 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE); |
1117 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0); |
1118 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1); |
1119 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI); |
1120 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD); |
1121 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0); |
1122 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1); |
1123 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2); |
1124 | kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3); |
1125 | |
1126 | vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST; |
1127 | |
1128 | out: |
1129 | kvm_save_timer(vcpu); |
1130 | /* Save Root.GINTC into unused Guest.GINTC register */ |
1131 | csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc(); |
1132 | |
1133 | return 0; |
1134 | } |
1135 | |
1136 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
1137 | { |
1138 | int cpu; |
1139 | unsigned long flags; |
1140 | |
1141 | local_irq_save(flags); |
1142 | cpu = smp_processor_id(); |
1143 | vcpu->arch.last_sched_cpu = cpu; |
1144 | |
1145 | /* Save guest state in registers */ |
1146 | _kvm_vcpu_put(vcpu, cpu); |
1147 | local_irq_restore(flags); |
1148 | } |
1149 | |
1150 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) |
1151 | { |
1152 | int r = -EINTR; |
1153 | struct kvm_run *run = vcpu->run; |
1154 | |
1155 | if (vcpu->mmio_needed) { |
1156 | if (!vcpu->mmio_is_write) |
1157 | kvm_complete_mmio_read(vcpu, run); |
1158 | vcpu->mmio_needed = 0; |
1159 | } |
1160 | |
1161 | if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) { |
1162 | if (!run->iocsr_io.is_write) |
1163 | kvm_complete_iocsr_read(vcpu, run); |
1164 | } |
1165 | |
1166 | if (run->immediate_exit) |
1167 | return r; |
1168 | |
1169 | /* Clear exit_reason */ |
1170 | run->exit_reason = KVM_EXIT_UNKNOWN; |
1171 | lose_fpu(1); |
1172 | vcpu_load(vcpu); |
1173 | kvm_sigset_activate(vcpu); |
1174 | r = kvm_pre_enter_guest(vcpu); |
1175 | if (r != RESUME_GUEST) |
1176 | goto out; |
1177 | |
1178 | guest_timing_enter_irqoff(); |
1179 | guest_state_enter_irqoff(); |
1180 | trace_kvm_enter(vcpu); |
1181 | r = kvm_loongarch_ops->enter_guest(run, vcpu); |
1182 | |
1183 | trace_kvm_out(vcpu); |
1184 | /* |
1185 | * Guest exit is already recorded at kvm_handle_exit() |
1186 | * return value must not be RESUME_GUEST |
1187 | */ |
1188 | local_irq_enable(); |
1189 | out: |
1190 | kvm_sigset_deactivate(vcpu); |
1191 | vcpu_put(vcpu); |
1192 | |
1193 | return r; |
1194 | } |
1195 | |