1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * Copyright (C) 2020-2023 Loongson Technology Corporation Limited |
4 | */ |
5 | |
6 | #include <linux/linkage.h> |
7 | #include <asm/asm.h> |
8 | #include <asm/asmmacro.h> |
9 | #include <asm/loongarch.h> |
10 | #include <asm/regdef.h> |
11 | #include <asm/unwind_hints.h> |
12 | |
13 | #define HGPR_OFFSET(x) (PT_R0 + 8*x) |
14 | #define GGPR_OFFSET(x) (KVM_ARCH_GGPR + 8*x) |
15 | |
16 | .macro kvm_save_host_gpr base |
17 | .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31 |
18 | st.d $r\n, \base, HGPR_OFFSET(\n) |
19 | .endr |
20 | .endm |
21 | |
22 | .macro kvm_restore_host_gpr base |
23 | .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31 |
24 | ld.d $r\n, \base, HGPR_OFFSET(\n) |
25 | .endr |
26 | .endm |
27 | |
28 | /* |
29 | * Save and restore all GPRs except base register, |
30 | * and default value of base register is a2. |
31 | */ |
32 | .macro kvm_save_guest_gprs base |
33 | .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 |
34 | st.d $r\n, \base, GGPR_OFFSET(\n) |
35 | .endr |
36 | .endm |
37 | |
38 | .macro kvm_restore_guest_gprs base |
39 | .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 |
40 | ld.d $r\n, \base, GGPR_OFFSET(\n) |
41 | .endr |
42 | .endm |
43 | |
44 | /* |
45 | * Prepare switch to guest, save host regs and restore guest regs. |
46 | * a2: kvm_vcpu_arch, don't touch it until 'ertn' |
47 | * t0, t1: temp register |
48 | */ |
49 | .macro kvm_switch_to_guest |
50 | /* Set host ECFG.VS=0, all exceptions share one exception entry */ |
51 | csrrd t0, LOONGARCH_CSR_ECFG |
52 | bstrins.w t0, zero, CSR_ECFG_VS_SHIFT_END, CSR_ECFG_VS_SHIFT |
53 | csrwr t0, LOONGARCH_CSR_ECFG |
54 | |
55 | /* Load up the new EENTRY */ |
56 | ld.d t0, a2, KVM_ARCH_GEENTRY |
57 | csrwr t0, LOONGARCH_CSR_EENTRY |
58 | |
59 | /* Set Guest ERA */ |
60 | ld.d t0, a2, KVM_ARCH_GPC |
61 | csrwr t0, LOONGARCH_CSR_ERA |
62 | |
63 | /* Save host PGDL */ |
64 | csrrd t0, LOONGARCH_CSR_PGDL |
65 | st.d t0, a2, KVM_ARCH_HPGD |
66 | |
67 | /* Switch to kvm */ |
68 | ld.d t1, a2, KVM_VCPU_KVM - KVM_VCPU_ARCH |
69 | |
70 | /* Load guest PGDL */ |
71 | li.w t0, KVM_GPGD |
72 | ldx.d t0, t1, t0 |
73 | csrwr t0, LOONGARCH_CSR_PGDL |
74 | |
75 | /* Mix GID and RID */ |
76 | csrrd t1, LOONGARCH_CSR_GSTAT |
77 | bstrpick.w t1, t1, CSR_GSTAT_GID_SHIFT_END, CSR_GSTAT_GID_SHIFT |
78 | csrrd t0, LOONGARCH_CSR_GTLBC |
79 | bstrins.w t0, t1, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT |
80 | csrwr t0, LOONGARCH_CSR_GTLBC |
81 | |
82 | /* |
83 | * Enable intr in root mode with future ertn so that host interrupt |
84 | * can be responsed during VM runs |
85 | * Guest CRMD comes from separate GCSR_CRMD register |
86 | */ |
87 | ori t0, zero, CSR_PRMD_PIE |
88 | csrxchg t0, t0, LOONGARCH_CSR_PRMD |
89 | |
90 | /* Set PVM bit to setup ertn to guest context */ |
91 | ori t0, zero, CSR_GSTAT_PVM |
92 | csrxchg t0, t0, LOONGARCH_CSR_GSTAT |
93 | |
94 | /* Load Guest GPRs */ |
95 | kvm_restore_guest_gprs a2 |
96 | /* Load KVM_ARCH register */ |
97 | ld.d a2, a2, (KVM_ARCH_GGPR + 8 * REG_A2) |
98 | |
99 | ertn /* Switch to guest: GSTAT.PGM = 1, ERRCTL.ISERR = 0, TLBRPRMD.ISTLBR = 0 */ |
100 | .endm |
101 | |
102 | /* |
103 | * Exception entry for general exception from guest mode |
104 | * - IRQ is disabled |
105 | * - kernel privilege in root mode |
106 | * - page mode keep unchanged from previous PRMD in root mode |
107 | * - Fixme: tlb exception cannot happen since registers relative with TLB |
108 | * - is still in guest mode, such as pgd table/vmid registers etc, |
109 | * - will fix with hw page walk enabled in future |
110 | * load kvm_vcpu from reserved CSR KVM_VCPU_KS, and save a2 to KVM_TEMP_KS |
111 | */ |
112 | .text |
113 | .cfi_sections .debug_frame |
114 | SYM_CODE_START(kvm_exc_entry) |
115 | UNWIND_HINT_UNDEFINED |
116 | csrwr a2, KVM_TEMP_KS |
117 | csrrd a2, KVM_VCPU_KS |
118 | addi.d a2, a2, KVM_VCPU_ARCH |
119 | |
120 | /* After save GPRs, free to use any GPR */ |
121 | kvm_save_guest_gprs a2 |
122 | /* Save guest A2 */ |
123 | csrrd t0, KVM_TEMP_KS |
124 | st.d t0, a2, (KVM_ARCH_GGPR + 8 * REG_A2) |
125 | |
126 | /* A2 is kvm_vcpu_arch, A1 is free to use */ |
127 | csrrd s1, KVM_VCPU_KS |
128 | ld.d s0, s1, KVM_VCPU_RUN |
129 | |
130 | csrrd t0, LOONGARCH_CSR_ESTAT |
131 | st.d t0, a2, KVM_ARCH_HESTAT |
132 | csrrd t0, LOONGARCH_CSR_ERA |
133 | st.d t0, a2, KVM_ARCH_GPC |
134 | csrrd t0, LOONGARCH_CSR_BADV |
135 | st.d t0, a2, KVM_ARCH_HBADV |
136 | csrrd t0, LOONGARCH_CSR_BADI |
137 | st.d t0, a2, KVM_ARCH_HBADI |
138 | |
139 | /* Restore host ECFG.VS */ |
140 | csrrd t0, LOONGARCH_CSR_ECFG |
141 | ld.d t1, a2, KVM_ARCH_HECFG |
142 | or t0, t0, t1 |
143 | csrwr t0, LOONGARCH_CSR_ECFG |
144 | |
145 | /* Restore host EENTRY */ |
146 | ld.d t0, a2, KVM_ARCH_HEENTRY |
147 | csrwr t0, LOONGARCH_CSR_EENTRY |
148 | |
149 | /* Restore host pgd table */ |
150 | ld.d t0, a2, KVM_ARCH_HPGD |
151 | csrwr t0, LOONGARCH_CSR_PGDL |
152 | |
153 | /* |
154 | * Disable PGM bit to enter root mode by default with next ertn |
155 | */ |
156 | ori t0, zero, CSR_GSTAT_PVM |
157 | csrxchg zero, t0, LOONGARCH_CSR_GSTAT |
158 | |
159 | /* |
160 | * Clear GTLBC.TGID field |
161 | * 0: for root tlb update in future tlb instr |
162 | * others: for guest tlb update like gpa to hpa in future tlb instr |
163 | */ |
164 | csrrd t0, LOONGARCH_CSR_GTLBC |
165 | bstrins.w t0, zero, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT |
166 | csrwr t0, LOONGARCH_CSR_GTLBC |
167 | ld.d tp, a2, KVM_ARCH_HTP |
168 | ld.d sp, a2, KVM_ARCH_HSP |
169 | /* restore per cpu register */ |
170 | ld.d u0, a2, KVM_ARCH_HPERCPU |
171 | addi.d sp, sp, -PT_SIZE |
172 | |
173 | /* Prepare handle exception */ |
174 | or a0, s0, zero |
175 | or a1, s1, zero |
176 | ld.d t8, a2, KVM_ARCH_HANDLE_EXIT |
177 | jirl ra, t8, 0 |
178 | |
179 | or a2, s1, zero |
180 | addi.d a2, a2, KVM_VCPU_ARCH |
181 | |
182 | /* Resume host when ret <= 0 */ |
183 | blez a0, ret_to_host |
184 | |
185 | /* |
186 | * Return to guest |
187 | * Save per cpu register again, maybe switched to another cpu |
188 | */ |
189 | st.d u0, a2, KVM_ARCH_HPERCPU |
190 | |
191 | /* Save kvm_vcpu to kscratch */ |
192 | csrwr s1, KVM_VCPU_KS |
193 | kvm_switch_to_guest |
194 | |
195 | ret_to_host: |
196 | ld.d a2, a2, KVM_ARCH_HSP |
197 | addi.d a2, a2, -PT_SIZE |
198 | kvm_restore_host_gpr a2 |
199 | jr ra |
200 | |
201 | SYM_INNER_LABEL(kvm_exc_entry_end, SYM_L_LOCAL) |
202 | SYM_CODE_END(kvm_exc_entry) |
203 | |
204 | /* |
205 | * int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu) |
206 | * |
207 | * @register_param: |
208 | * a0: kvm_run* run |
209 | * a1: kvm_vcpu* vcpu |
210 | */ |
211 | SYM_FUNC_START(kvm_enter_guest) |
212 | /* Allocate space in stack bottom */ |
213 | addi.d a2, sp, -PT_SIZE |
214 | /* Save host GPRs */ |
215 | kvm_save_host_gpr a2 |
216 | |
217 | addi.d a2, a1, KVM_VCPU_ARCH |
218 | st.d sp, a2, KVM_ARCH_HSP |
219 | st.d tp, a2, KVM_ARCH_HTP |
220 | /* Save per cpu register */ |
221 | st.d u0, a2, KVM_ARCH_HPERCPU |
222 | |
223 | /* Save kvm_vcpu to kscratch */ |
224 | csrwr a1, KVM_VCPU_KS |
225 | kvm_switch_to_guest |
226 | SYM_INNER_LABEL(kvm_enter_guest_end, SYM_L_LOCAL) |
227 | SYM_FUNC_END(kvm_enter_guest) |
228 | |
229 | SYM_FUNC_START(kvm_save_fpu) |
230 | fpu_save_csr a0 t1 |
231 | fpu_save_double a0 t1 |
232 | fpu_save_cc a0 t1 t2 |
233 | jr ra |
234 | SYM_FUNC_END(kvm_save_fpu) |
235 | |
236 | SYM_FUNC_START(kvm_restore_fpu) |
237 | fpu_restore_double a0 t1 |
238 | fpu_restore_csr a0 t1 t2 |
239 | fpu_restore_cc a0 t1 t2 |
240 | jr ra |
241 | SYM_FUNC_END(kvm_restore_fpu) |
242 | |
243 | #ifdef CONFIG_CPU_HAS_LSX |
244 | SYM_FUNC_START(kvm_save_lsx) |
245 | fpu_save_csr a0 t1 |
246 | fpu_save_cc a0 t1 t2 |
247 | lsx_save_data a0 t1 |
248 | jr ra |
249 | SYM_FUNC_END(kvm_save_lsx) |
250 | |
251 | SYM_FUNC_START(kvm_restore_lsx) |
252 | lsx_restore_data a0 t1 |
253 | fpu_restore_cc a0 t1 t2 |
254 | fpu_restore_csr a0 t1 t2 |
255 | jr ra |
256 | SYM_FUNC_END(kvm_restore_lsx) |
257 | #endif |
258 | |
259 | #ifdef CONFIG_CPU_HAS_LASX |
260 | SYM_FUNC_START(kvm_save_lasx) |
261 | fpu_save_csr a0 t1 |
262 | fpu_save_cc a0 t1 t2 |
263 | lasx_save_data a0 t1 |
264 | jr ra |
265 | SYM_FUNC_END(kvm_save_lasx) |
266 | |
267 | SYM_FUNC_START(kvm_restore_lasx) |
268 | lasx_restore_data a0 t1 |
269 | fpu_restore_cc a0 t1 t2 |
270 | fpu_restore_csr a0 t1 t2 |
271 | jr ra |
272 | SYM_FUNC_END(kvm_restore_lasx) |
273 | #endif |
274 | .section ".rodata" |
275 | SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry) |
276 | SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest) |
277 | |
278 | #ifdef CONFIG_CPU_HAS_LBT |
279 | STACK_FRAME_NON_STANDARD kvm_restore_fpu |
280 | STACK_FRAME_NON_STANDARD kvm_restore_lsx |
281 | STACK_FRAME_NON_STANDARD kvm_restore_lasx |
282 | #endif |
283 | |