1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 *
4 * Copyright SUSE Linux Products GmbH 2010
5 * Copyright 2010-2011 Freescale Semiconductor, Inc.
6 *
7 * Authors: Alexander Graf <agraf@suse.de>
8 */
9
10#include <asm/ppc_asm.h>
11#include <asm/kvm_asm.h>
12#include <asm/reg.h>
13#include <asm/page.h>
14#include <asm/asm-offsets.h>
15#include <asm/asm-compat.h>
16
17#define KVM_MAGIC_PAGE (-4096)
18
19#ifdef CONFIG_64BIT
20#define LL64(reg, offs, reg2) ld reg, (offs)(reg2)
21#define STL64(reg, offs, reg2) std reg, (offs)(reg2)
22#else
23#define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2)
24#define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2)
25#endif
26
27#define SCRATCH_SAVE \
28 /* Enable critical section. We are critical if \
29 shared->critical == r1 */ \
30 STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \
31 \
32 /* Save state */ \
33 PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
34 PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
35 mfcr r31; \
36 stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
37
38#define SCRATCH_RESTORE \
39 /* Restore state */ \
40 PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
41 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \
42 mtcr r30; \
43 PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
44 \
45 /* Disable critical section. We are critical if \
46 shared->critical == r1 and r2 is always != r1 */ \
47 STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
48
49.global kvm_template_start
50kvm_template_start:
51
52.global kvm_emulate_mtmsrd
53kvm_emulate_mtmsrd:
54
55 SCRATCH_SAVE
56
57 /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
58 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
59 lis r30, (~(MSR_EE | MSR_RI))@h
60 ori r30, r30, (~(MSR_EE | MSR_RI))@l
61 and r31, r31, r30
62
63 /* OR the register's (MSR_EE|MSR_RI) on MSR */
64kvm_emulate_mtmsrd_reg:
65 ori r30, r0, 0
66 andi. r30, r30, (MSR_EE|MSR_RI)
67 or r31, r31, r30
68
69 /* Put MSR back into magic page */
70 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
71
72 /* Check if we have to fetch an interrupt */
73 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
74 cmpwi r31, 0
75 beq+ no_check
76
77 /* Check if we may trigger an interrupt */
78 andi. r30, r30, MSR_EE
79 beq no_check
80
81 SCRATCH_RESTORE
82
83 /* Nag hypervisor */
84kvm_emulate_mtmsrd_orig_ins:
85 tlbsync
86
87 b kvm_emulate_mtmsrd_branch
88
89no_check:
90
91 SCRATCH_RESTORE
92
93 /* Go back to caller */
94kvm_emulate_mtmsrd_branch:
95 b .
96kvm_emulate_mtmsrd_end:
97
98.global kvm_emulate_mtmsrd_branch_offs
99kvm_emulate_mtmsrd_branch_offs:
100 .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
101
102.global kvm_emulate_mtmsrd_reg_offs
103kvm_emulate_mtmsrd_reg_offs:
104 .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
105
106.global kvm_emulate_mtmsrd_orig_ins_offs
107kvm_emulate_mtmsrd_orig_ins_offs:
108 .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
109
110.global kvm_emulate_mtmsrd_len
111kvm_emulate_mtmsrd_len:
112 .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
113
114
115#define MSR_SAFE_BITS (MSR_EE | MSR_RI)
116#define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
117
118.global kvm_emulate_mtmsr
119kvm_emulate_mtmsr:
120
121 SCRATCH_SAVE
122
123 /* Fetch old MSR in r31 */
124 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
125
126 /* Find the changed bits between old and new MSR */
127kvm_emulate_mtmsr_reg1:
128 ori r30, r0, 0
129 xor r31, r30, r31
130
131 /* Check if we need to really do mtmsr */
132 LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
133 and. r31, r31, r30
134
135 /* No critical bits changed? Maybe we can stay in the guest. */
136 beq maybe_stay_in_guest
137
138do_mtmsr:
139
140 SCRATCH_RESTORE
141
142 /* Just fire off the mtmsr if it's critical */
143kvm_emulate_mtmsr_orig_ins:
144 mtmsr r0
145
146 b kvm_emulate_mtmsr_branch
147
148maybe_stay_in_guest:
149
150 /* Get the target register in r30 */
151kvm_emulate_mtmsr_reg2:
152 ori r30, r0, 0
153
154 /* Put MSR into magic page because we don't call mtmsr */
155 STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
156
157 /* Check if we have to fetch an interrupt */
158 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
159 cmpwi r31, 0
160 beq+ no_mtmsr
161
162 /* Check if we may trigger an interrupt */
163 andi. r31, r30, MSR_EE
164 bne do_mtmsr
165
166no_mtmsr:
167
168 SCRATCH_RESTORE
169
170 /* Go back to caller */
171kvm_emulate_mtmsr_branch:
172 b .
173kvm_emulate_mtmsr_end:
174
175.global kvm_emulate_mtmsr_branch_offs
176kvm_emulate_mtmsr_branch_offs:
177 .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
178
179.global kvm_emulate_mtmsr_reg1_offs
180kvm_emulate_mtmsr_reg1_offs:
181 .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
182
183.global kvm_emulate_mtmsr_reg2_offs
184kvm_emulate_mtmsr_reg2_offs:
185 .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
186
187.global kvm_emulate_mtmsr_orig_ins_offs
188kvm_emulate_mtmsr_orig_ins_offs:
189 .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
190
191.global kvm_emulate_mtmsr_len
192kvm_emulate_mtmsr_len:
193 .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
194
195#ifdef CONFIG_BOOKE
196
197/* also used for wrteei 1 */
198.global kvm_emulate_wrtee
199kvm_emulate_wrtee:
200
201 SCRATCH_SAVE
202
203 /* Fetch old MSR in r31 */
204 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
205
206 /* Insert new MSR[EE] */
207kvm_emulate_wrtee_reg:
208 ori r30, r0, 0
209 rlwimi r31, r30, 0, MSR_EE
210
211 /*
212 * If MSR[EE] is now set, check for a pending interrupt.
213 * We could skip this if MSR[EE] was already on, but that
214 * should be rare, so don't bother.
215 */
216 andi. r30, r30, MSR_EE
217
218 /* Put MSR into magic page because we don't call wrtee */
219 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
220
221 beq no_wrtee
222
223 /* Check if we have to fetch an interrupt */
224 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
225 cmpwi r30, 0
226 bne do_wrtee
227
228no_wrtee:
229 SCRATCH_RESTORE
230
231 /* Go back to caller */
232kvm_emulate_wrtee_branch:
233 b .
234
235do_wrtee:
236 SCRATCH_RESTORE
237
238 /* Just fire off the wrtee if it's critical */
239kvm_emulate_wrtee_orig_ins:
240 wrtee r0
241
242 b kvm_emulate_wrtee_branch
243
244kvm_emulate_wrtee_end:
245
246.global kvm_emulate_wrtee_branch_offs
247kvm_emulate_wrtee_branch_offs:
248 .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
249
250.global kvm_emulate_wrtee_reg_offs
251kvm_emulate_wrtee_reg_offs:
252 .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
253
254.global kvm_emulate_wrtee_orig_ins_offs
255kvm_emulate_wrtee_orig_ins_offs:
256 .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
257
258.global kvm_emulate_wrtee_len
259kvm_emulate_wrtee_len:
260 .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
261
262.global kvm_emulate_wrteei_0
263kvm_emulate_wrteei_0:
264 SCRATCH_SAVE
265
266 /* Fetch old MSR in r31 */
267 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
268
269 /* Remove MSR_EE from old MSR */
270 rlwinm r31, r31, 0, ~MSR_EE
271
272 /* Write new MSR value back */
273 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
274
275 SCRATCH_RESTORE
276
277 /* Go back to caller */
278kvm_emulate_wrteei_0_branch:
279 b .
280kvm_emulate_wrteei_0_end:
281
282.global kvm_emulate_wrteei_0_branch_offs
283kvm_emulate_wrteei_0_branch_offs:
284 .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
285
286.global kvm_emulate_wrteei_0_len
287kvm_emulate_wrteei_0_len:
288 .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
289
290#endif /* CONFIG_BOOKE */
291
292#ifdef CONFIG_PPC_BOOK3S_32
293
294.global kvm_emulate_mtsrin
295kvm_emulate_mtsrin:
296
297 SCRATCH_SAVE
298
299 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
300 andi. r31, r31, MSR_DR | MSR_IR
301 beq kvm_emulate_mtsrin_reg1
302
303 SCRATCH_RESTORE
304
305kvm_emulate_mtsrin_orig_ins:
306 nop
307 b kvm_emulate_mtsrin_branch
308
309kvm_emulate_mtsrin_reg1:
310 /* rX >> 26 */
311 rlwinm r30,r0,6,26,29
312
313kvm_emulate_mtsrin_reg2:
314 stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
315
316 SCRATCH_RESTORE
317
318 /* Go back to caller */
319kvm_emulate_mtsrin_branch:
320 b .
321kvm_emulate_mtsrin_end:
322
323.global kvm_emulate_mtsrin_branch_offs
324kvm_emulate_mtsrin_branch_offs:
325 .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
326
327.global kvm_emulate_mtsrin_reg1_offs
328kvm_emulate_mtsrin_reg1_offs:
329 .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
330
331.global kvm_emulate_mtsrin_reg2_offs
332kvm_emulate_mtsrin_reg2_offs:
333 .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
334
335.global kvm_emulate_mtsrin_orig_ins_offs
336kvm_emulate_mtsrin_orig_ins_offs:
337 .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
338
339.global kvm_emulate_mtsrin_len
340kvm_emulate_mtsrin_len:
341 .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
342
343#endif /* CONFIG_PPC_BOOK3S_32 */
344
345 .balign 4
346 .global kvm_tmp
347kvm_tmp:
348 .space (64 * 1024)
349
350.global kvm_tmp_end
351kvm_tmp_end:
352
353.global kvm_template_end
354kvm_template_end:
355

source code of linux/arch/powerpc/kernel/kvm_emul.S