1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * hosting IBM Z kernel virtual machines (s390x) |
4 | * |
5 | * Copyright IBM Corp. 2008, 2020 |
6 | * |
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
8 | * Christian Borntraeger <borntraeger@de.ibm.com> |
9 | * Christian Ehrhardt <ehrhardt@de.ibm.com> |
10 | * Jason J. Herne <jjherne@us.ibm.com> |
11 | */ |
12 | |
13 | #define KMSG_COMPONENT "kvm-s390" |
14 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
15 | |
16 | #include <linux/compiler.h> |
17 | #include <linux/err.h> |
18 | #include <linux/fs.h> |
19 | #include <linux/hrtimer.h> |
20 | #include <linux/init.h> |
21 | #include <linux/kvm.h> |
22 | #include <linux/kvm_host.h> |
23 | #include <linux/mman.h> |
24 | #include <linux/module.h> |
25 | #include <linux/moduleparam.h> |
26 | #include <linux/random.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/timer.h> |
29 | #include <linux/vmalloc.h> |
30 | #include <linux/bitmap.h> |
31 | #include <linux/sched/signal.h> |
32 | #include <linux/string.h> |
33 | #include <linux/pgtable.h> |
34 | #include <linux/mmu_notifier.h> |
35 | |
36 | #include <asm/access-regs.h> |
37 | #include <asm/asm-offsets.h> |
38 | #include <asm/lowcore.h> |
39 | #include <asm/stp.h> |
40 | #include <asm/gmap.h> |
41 | #include <asm/nmi.h> |
42 | #include <asm/isc.h> |
43 | #include <asm/sclp.h> |
44 | #include <asm/cpacf.h> |
45 | #include <asm/timex.h> |
46 | #include <asm/fpu.h> |
47 | #include <asm/ap.h> |
48 | #include <asm/uv.h> |
49 | #include "kvm-s390.h" |
50 | #include "gaccess.h" |
51 | #include "pci.h" |
52 | |
53 | #define CREATE_TRACE_POINTS |
54 | #include "trace.h" |
55 | #include "trace-s390.h" |
56 | |
57 | #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */ |
58 | #define LOCAL_IRQS 32 |
59 | #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \ |
60 | (KVM_MAX_VCPUS + LOCAL_IRQS)) |
61 | |
62 | const struct _kvm_stats_desc kvm_vm_stats_desc[] = { |
63 | KVM_GENERIC_VM_STATS(), |
64 | STATS_DESC_COUNTER(VM, inject_io), |
65 | STATS_DESC_COUNTER(VM, inject_float_mchk), |
66 | STATS_DESC_COUNTER(VM, inject_pfault_done), |
67 | STATS_DESC_COUNTER(VM, inject_service_signal), |
68 | STATS_DESC_COUNTER(VM, inject_virtio), |
69 | STATS_DESC_COUNTER(VM, aen_forward), |
70 | STATS_DESC_COUNTER(VM, gmap_shadow_reuse), |
71 | STATS_DESC_COUNTER(VM, gmap_shadow_create), |
72 | STATS_DESC_COUNTER(VM, gmap_shadow_r1_entry), |
73 | STATS_DESC_COUNTER(VM, gmap_shadow_r2_entry), |
74 | STATS_DESC_COUNTER(VM, gmap_shadow_r3_entry), |
75 | STATS_DESC_COUNTER(VM, gmap_shadow_sg_entry), |
76 | STATS_DESC_COUNTER(VM, gmap_shadow_pg_entry), |
77 | }; |
78 | |
79 | const struct kvm_stats_header = { |
80 | .name_size = KVM_STATS_NAME_SIZE, |
81 | .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), |
82 | .id_offset = sizeof(struct kvm_stats_header), |
83 | .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, |
84 | .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + |
85 | sizeof(kvm_vm_stats_desc), |
86 | }; |
87 | |
88 | const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { |
89 | KVM_GENERIC_VCPU_STATS(), |
90 | STATS_DESC_COUNTER(VCPU, exit_userspace), |
91 | STATS_DESC_COUNTER(VCPU, exit_null), |
92 | STATS_DESC_COUNTER(VCPU, exit_external_request), |
93 | STATS_DESC_COUNTER(VCPU, exit_io_request), |
94 | STATS_DESC_COUNTER(VCPU, exit_external_interrupt), |
95 | STATS_DESC_COUNTER(VCPU, exit_stop_request), |
96 | STATS_DESC_COUNTER(VCPU, exit_validity), |
97 | STATS_DESC_COUNTER(VCPU, exit_instruction), |
98 | STATS_DESC_COUNTER(VCPU, exit_pei), |
99 | STATS_DESC_COUNTER(VCPU, halt_no_poll_steal), |
100 | STATS_DESC_COUNTER(VCPU, instruction_lctl), |
101 | STATS_DESC_COUNTER(VCPU, instruction_lctlg), |
102 | STATS_DESC_COUNTER(VCPU, instruction_stctl), |
103 | STATS_DESC_COUNTER(VCPU, instruction_stctg), |
104 | STATS_DESC_COUNTER(VCPU, exit_program_interruption), |
105 | STATS_DESC_COUNTER(VCPU, exit_instr_and_program), |
106 | STATS_DESC_COUNTER(VCPU, exit_operation_exception), |
107 | STATS_DESC_COUNTER(VCPU, deliver_ckc), |
108 | STATS_DESC_COUNTER(VCPU, deliver_cputm), |
109 | STATS_DESC_COUNTER(VCPU, deliver_external_call), |
110 | STATS_DESC_COUNTER(VCPU, deliver_emergency_signal), |
111 | STATS_DESC_COUNTER(VCPU, deliver_service_signal), |
112 | STATS_DESC_COUNTER(VCPU, deliver_virtio), |
113 | STATS_DESC_COUNTER(VCPU, deliver_stop_signal), |
114 | STATS_DESC_COUNTER(VCPU, deliver_prefix_signal), |
115 | STATS_DESC_COUNTER(VCPU, deliver_restart_signal), |
116 | STATS_DESC_COUNTER(VCPU, deliver_program), |
117 | STATS_DESC_COUNTER(VCPU, deliver_io), |
118 | STATS_DESC_COUNTER(VCPU, deliver_machine_check), |
119 | STATS_DESC_COUNTER(VCPU, exit_wait_state), |
120 | STATS_DESC_COUNTER(VCPU, inject_ckc), |
121 | STATS_DESC_COUNTER(VCPU, inject_cputm), |
122 | STATS_DESC_COUNTER(VCPU, inject_external_call), |
123 | STATS_DESC_COUNTER(VCPU, inject_emergency_signal), |
124 | STATS_DESC_COUNTER(VCPU, inject_mchk), |
125 | STATS_DESC_COUNTER(VCPU, inject_pfault_init), |
126 | STATS_DESC_COUNTER(VCPU, inject_program), |
127 | STATS_DESC_COUNTER(VCPU, inject_restart), |
128 | STATS_DESC_COUNTER(VCPU, inject_set_prefix), |
129 | STATS_DESC_COUNTER(VCPU, inject_stop_signal), |
130 | STATS_DESC_COUNTER(VCPU, instruction_epsw), |
131 | STATS_DESC_COUNTER(VCPU, instruction_gs), |
132 | STATS_DESC_COUNTER(VCPU, instruction_io_other), |
133 | STATS_DESC_COUNTER(VCPU, instruction_lpsw), |
134 | STATS_DESC_COUNTER(VCPU, instruction_lpswe), |
135 | STATS_DESC_COUNTER(VCPU, instruction_pfmf), |
136 | STATS_DESC_COUNTER(VCPU, instruction_ptff), |
137 | STATS_DESC_COUNTER(VCPU, instruction_sck), |
138 | STATS_DESC_COUNTER(VCPU, instruction_sckpf), |
139 | STATS_DESC_COUNTER(VCPU, instruction_stidp), |
140 | STATS_DESC_COUNTER(VCPU, instruction_spx), |
141 | STATS_DESC_COUNTER(VCPU, instruction_stpx), |
142 | STATS_DESC_COUNTER(VCPU, instruction_stap), |
143 | STATS_DESC_COUNTER(VCPU, instruction_iske), |
144 | STATS_DESC_COUNTER(VCPU, instruction_ri), |
145 | STATS_DESC_COUNTER(VCPU, instruction_rrbe), |
146 | STATS_DESC_COUNTER(VCPU, instruction_sske), |
147 | STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock), |
148 | STATS_DESC_COUNTER(VCPU, instruction_stsi), |
149 | STATS_DESC_COUNTER(VCPU, instruction_stfl), |
150 | STATS_DESC_COUNTER(VCPU, instruction_tb), |
151 | STATS_DESC_COUNTER(VCPU, instruction_tpi), |
152 | STATS_DESC_COUNTER(VCPU, instruction_tprot), |
153 | STATS_DESC_COUNTER(VCPU, instruction_tsch), |
154 | STATS_DESC_COUNTER(VCPU, instruction_sie), |
155 | STATS_DESC_COUNTER(VCPU, instruction_essa), |
156 | STATS_DESC_COUNTER(VCPU, instruction_sthyi), |
157 | STATS_DESC_COUNTER(VCPU, instruction_sigp_sense), |
158 | STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running), |
159 | STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call), |
160 | STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency), |
161 | STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency), |
162 | STATS_DESC_COUNTER(VCPU, instruction_sigp_start), |
163 | STATS_DESC_COUNTER(VCPU, instruction_sigp_stop), |
164 | STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status), |
165 | STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status), |
166 | STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status), |
167 | STATS_DESC_COUNTER(VCPU, instruction_sigp_arch), |
168 | STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix), |
169 | STATS_DESC_COUNTER(VCPU, instruction_sigp_restart), |
170 | STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset), |
171 | STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset), |
172 | STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown), |
173 | STATS_DESC_COUNTER(VCPU, instruction_diagnose_10), |
174 | STATS_DESC_COUNTER(VCPU, instruction_diagnose_44), |
175 | STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c), |
176 | STATS_DESC_COUNTER(VCPU, diag_9c_ignored), |
177 | STATS_DESC_COUNTER(VCPU, diag_9c_forward), |
178 | STATS_DESC_COUNTER(VCPU, instruction_diagnose_258), |
179 | STATS_DESC_COUNTER(VCPU, instruction_diagnose_308), |
180 | STATS_DESC_COUNTER(VCPU, instruction_diagnose_500), |
181 | STATS_DESC_COUNTER(VCPU, instruction_diagnose_other), |
182 | STATS_DESC_COUNTER(VCPU, pfault_sync) |
183 | }; |
184 | |
185 | const struct kvm_stats_header = { |
186 | .name_size = KVM_STATS_NAME_SIZE, |
187 | .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), |
188 | .id_offset = sizeof(struct kvm_stats_header), |
189 | .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, |
190 | .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + |
191 | sizeof(kvm_vcpu_stats_desc), |
192 | }; |
193 | |
194 | /* allow nested virtualization in KVM (if enabled by user space) */ |
195 | static int nested; |
196 | module_param(nested, int, S_IRUGO); |
197 | MODULE_PARM_DESC(nested, "Nested virtualization support" ); |
198 | |
199 | /* allow 1m huge page guest backing, if !nested */ |
200 | static int hpage; |
201 | module_param(hpage, int, 0444); |
202 | MODULE_PARM_DESC(hpage, "1m huge page backing support" ); |
203 | |
204 | /* maximum percentage of steal time for polling. >100 is treated like 100 */ |
205 | static u8 halt_poll_max_steal = 10; |
206 | module_param(halt_poll_max_steal, byte, 0644); |
207 | MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling" ); |
208 | |
209 | /* if set to true, the GISA will be initialized and used if available */ |
210 | static bool use_gisa = true; |
211 | module_param(use_gisa, bool, 0644); |
212 | MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it." ); |
213 | |
214 | /* maximum diag9c forwarding per second */ |
215 | unsigned int diag9c_forwarding_hz; |
216 | module_param(diag9c_forwarding_hz, uint, 0644); |
217 | MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off" ); |
218 | |
219 | /* |
220 | * allow asynchronous deinit for protected guests; enable by default since |
221 | * the feature is opt-in anyway |
222 | */ |
223 | static int async_destroy = 1; |
224 | module_param(async_destroy, int, 0444); |
225 | MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests" ); |
226 | |
227 | /* |
228 | * For now we handle at most 16 double words as this is what the s390 base |
229 | * kernel handles and stores in the prefix page. If we ever need to go beyond |
230 | * this, this requires changes to code, but the external uapi can stay. |
231 | */ |
232 | #define SIZE_INTERNAL 16 |
233 | |
234 | /* |
235 | * Base feature mask that defines default mask for facilities. Consists of the |
236 | * defines in FACILITIES_KVM and the non-hypervisor managed bits. |
237 | */ |
238 | static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM }; |
239 | /* |
240 | * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL |
241 | * and defines the facilities that can be enabled via a cpu model. |
242 | */ |
243 | static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL }; |
244 | |
245 | static unsigned long kvm_s390_fac_size(void) |
246 | { |
247 | BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64); |
248 | BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64); |
249 | BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) > |
250 | sizeof(stfle_fac_list)); |
251 | |
252 | return SIZE_INTERNAL; |
253 | } |
254 | |
255 | /* available cpu features supported by kvm */ |
256 | static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); |
257 | /* available subfunctions indicated via query / "test bit" */ |
258 | static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc; |
259 | |
260 | static struct gmap_notifier gmap_notifier; |
261 | static struct gmap_notifier vsie_gmap_notifier; |
262 | debug_info_t *kvm_s390_dbf; |
263 | debug_info_t *kvm_s390_dbf_uv; |
264 | |
265 | /* Section: not file related */ |
266 | /* forward declarations */ |
267 | static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, |
268 | unsigned long end); |
269 | static int sca_switch_to_extended(struct kvm *kvm); |
270 | |
271 | static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta) |
272 | { |
273 | u8 delta_idx = 0; |
274 | |
275 | /* |
276 | * The TOD jumps by delta, we have to compensate this by adding |
277 | * -delta to the epoch. |
278 | */ |
279 | delta = -delta; |
280 | |
281 | /* sign-extension - we're adding to signed values below */ |
282 | if ((s64)delta < 0) |
283 | delta_idx = -1; |
284 | |
285 | scb->epoch += delta; |
286 | if (scb->ecd & ECD_MEF) { |
287 | scb->epdx += delta_idx; |
288 | if (scb->epoch < delta) |
289 | scb->epdx += 1; |
290 | } |
291 | } |
292 | |
293 | /* |
294 | * This callback is executed during stop_machine(). All CPUs are therefore |
295 | * temporarily stopped. In order not to change guest behavior, we have to |
296 | * disable preemption whenever we touch the epoch of kvm and the VCPUs, |
297 | * so a CPU won't be stopped while calculating with the epoch. |
298 | */ |
299 | static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val, |
300 | void *v) |
301 | { |
302 | struct kvm *kvm; |
303 | struct kvm_vcpu *vcpu; |
304 | unsigned long i; |
305 | unsigned long long *delta = v; |
306 | |
307 | list_for_each_entry(kvm, &vm_list, vm_list) { |
308 | kvm_for_each_vcpu(i, vcpu, kvm) { |
309 | kvm_clock_sync_scb(scb: vcpu->arch.sie_block, delta: *delta); |
310 | if (i == 0) { |
311 | kvm->arch.epoch = vcpu->arch.sie_block->epoch; |
312 | kvm->arch.epdx = vcpu->arch.sie_block->epdx; |
313 | } |
314 | if (vcpu->arch.cputm_enabled) |
315 | vcpu->arch.cputm_start += *delta; |
316 | if (vcpu->arch.vsie_block) |
317 | kvm_clock_sync_scb(scb: vcpu->arch.vsie_block, |
318 | delta: *delta); |
319 | } |
320 | } |
321 | return NOTIFY_OK; |
322 | } |
323 | |
324 | static struct notifier_block kvm_clock_notifier = { |
325 | .notifier_call = kvm_clock_sync, |
326 | }; |
327 | |
328 | static void allow_cpu_feat(unsigned long nr) |
329 | { |
330 | set_bit_inv(nr, kvm_s390_available_cpu_feat); |
331 | } |
332 | |
333 | static inline int plo_test_bit(unsigned char nr) |
334 | { |
335 | unsigned long function = (unsigned long)nr | 0x100; |
336 | int cc; |
337 | |
338 | asm volatile( |
339 | " lgr 0,%[function]\n" |
340 | /* Parameter registers are ignored for "test bit" */ |
341 | " plo 0,0,0,0(0)\n" |
342 | " ipm %0\n" |
343 | " srl %0,28\n" |
344 | : "=d" (cc) |
345 | : [function] "d" (function) |
346 | : "cc" , "0" ); |
347 | return cc == 0; |
348 | } |
349 | |
350 | static __always_inline void __insn32_query(unsigned int opcode, u8 *query) |
351 | { |
352 | asm volatile( |
353 | " lghi 0,0\n" |
354 | " lgr 1,%[query]\n" |
355 | /* Parameter registers are ignored */ |
356 | " .insn rrf,%[opc] << 16,2,4,6,0\n" |
357 | : |
358 | : [query] "d" ((unsigned long)query), [opc] "i" (opcode) |
359 | : "cc" , "memory" , "0" , "1" ); |
360 | } |
361 | |
362 | #define INSN_SORTL 0xb938 |
363 | #define INSN_DFLTCC 0xb939 |
364 | |
365 | static void __init kvm_s390_cpu_feat_init(void) |
366 | { |
367 | int i; |
368 | |
369 | for (i = 0; i < 256; ++i) { |
370 | if (plo_test_bit(nr: i)) |
371 | kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7); |
372 | } |
373 | |
374 | if (test_facility(28)) /* TOD-clock steering */ |
375 | ptff(kvm_s390_available_subfunc.ptff, |
376 | sizeof(kvm_s390_available_subfunc.ptff), |
377 | PTFF_QAF); |
378 | |
379 | if (test_facility(17)) { /* MSA */ |
380 | __cpacf_query(CPACF_KMAC, (cpacf_mask_t *) |
381 | kvm_s390_available_subfunc.kmac); |
382 | __cpacf_query(CPACF_KMC, (cpacf_mask_t *) |
383 | kvm_s390_available_subfunc.kmc); |
384 | __cpacf_query(CPACF_KM, (cpacf_mask_t *) |
385 | kvm_s390_available_subfunc.km); |
386 | __cpacf_query(CPACF_KIMD, (cpacf_mask_t *) |
387 | kvm_s390_available_subfunc.kimd); |
388 | __cpacf_query(CPACF_KLMD, (cpacf_mask_t *) |
389 | kvm_s390_available_subfunc.klmd); |
390 | } |
391 | if (test_facility(76)) /* MSA3 */ |
392 | __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *) |
393 | kvm_s390_available_subfunc.pckmo); |
394 | if (test_facility(77)) { /* MSA4 */ |
395 | __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *) |
396 | kvm_s390_available_subfunc.kmctr); |
397 | __cpacf_query(CPACF_KMF, (cpacf_mask_t *) |
398 | kvm_s390_available_subfunc.kmf); |
399 | __cpacf_query(CPACF_KMO, (cpacf_mask_t *) |
400 | kvm_s390_available_subfunc.kmo); |
401 | __cpacf_query(CPACF_PCC, (cpacf_mask_t *) |
402 | kvm_s390_available_subfunc.pcc); |
403 | } |
404 | if (test_facility(57)) /* MSA5 */ |
405 | __cpacf_query(CPACF_PRNO, (cpacf_mask_t *) |
406 | kvm_s390_available_subfunc.ppno); |
407 | |
408 | if (test_facility(146)) /* MSA8 */ |
409 | __cpacf_query(CPACF_KMA, (cpacf_mask_t *) |
410 | kvm_s390_available_subfunc.kma); |
411 | |
412 | if (test_facility(155)) /* MSA9 */ |
413 | __cpacf_query(CPACF_KDSA, (cpacf_mask_t *) |
414 | kvm_s390_available_subfunc.kdsa); |
415 | |
416 | if (test_facility(150)) /* SORTL */ |
417 | __insn32_query(INSN_SORTL, query: kvm_s390_available_subfunc.sortl); |
418 | |
419 | if (test_facility(151)) /* DFLTCC */ |
420 | __insn32_query(INSN_DFLTCC, query: kvm_s390_available_subfunc.dfltcc); |
421 | |
422 | if (MACHINE_HAS_ESOP) |
423 | allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP); |
424 | /* |
425 | * We need SIE support, ESOP (PROT_READ protection for gmap_shadow), |
426 | * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing). |
427 | */ |
428 | if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao || |
429 | !test_facility(3) || !nested) |
430 | return; |
431 | allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2); |
432 | if (sclp.has_64bscao) |
433 | allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO); |
434 | if (sclp.has_siif) |
435 | allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF); |
436 | if (sclp.has_gpere) |
437 | allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE); |
438 | if (sclp.has_gsls) |
439 | allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS); |
440 | if (sclp.has_ib) |
441 | allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB); |
442 | if (sclp.has_cei) |
443 | allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI); |
444 | if (sclp.has_ibs) |
445 | allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS); |
446 | if (sclp.has_kss) |
447 | allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS); |
448 | /* |
449 | * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make |
450 | * all skey handling functions read/set the skey from the PGSTE |
451 | * instead of the real storage key. |
452 | * |
453 | * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make |
454 | * pages being detected as preserved although they are resident. |
455 | * |
456 | * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will |
457 | * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY. |
458 | * |
459 | * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and |
460 | * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be |
461 | * correctly shadowed. We can do that for the PGSTE but not for PTE.I. |
462 | * |
463 | * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We |
464 | * cannot easily shadow the SCA because of the ipte lock. |
465 | */ |
466 | } |
467 | |
468 | static int __init __kvm_s390_init(void) |
469 | { |
470 | int rc = -ENOMEM; |
471 | |
472 | kvm_s390_dbf = debug_register("kvm-trace" , 32, 1, 7 * sizeof(long)); |
473 | if (!kvm_s390_dbf) |
474 | return -ENOMEM; |
475 | |
476 | kvm_s390_dbf_uv = debug_register("kvm-uv" , 32, 1, 7 * sizeof(long)); |
477 | if (!kvm_s390_dbf_uv) |
478 | goto err_kvm_uv; |
479 | |
480 | if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) || |
481 | debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view)) |
482 | goto err_debug_view; |
483 | |
484 | kvm_s390_cpu_feat_init(); |
485 | |
486 | /* Register floating interrupt controller interface. */ |
487 | rc = kvm_register_device_ops(ops: &kvm_flic_ops, KVM_DEV_TYPE_FLIC); |
488 | if (rc) { |
489 | pr_err("A FLIC registration call failed with rc=%d\n" , rc); |
490 | goto err_flic; |
491 | } |
492 | |
493 | if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) { |
494 | rc = kvm_s390_pci_init(); |
495 | if (rc) { |
496 | pr_err("Unable to allocate AIFT for PCI\n" ); |
497 | goto err_pci; |
498 | } |
499 | } |
500 | |
501 | rc = kvm_s390_gib_init(GAL_ISC); |
502 | if (rc) |
503 | goto err_gib; |
504 | |
505 | gmap_notifier.notifier_call = kvm_gmap_notifier; |
506 | gmap_register_pte_notifier(&gmap_notifier); |
507 | vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier; |
508 | gmap_register_pte_notifier(&vsie_gmap_notifier); |
509 | atomic_notifier_chain_register(&s390_epoch_delta_notifier, |
510 | &kvm_clock_notifier); |
511 | |
512 | return 0; |
513 | |
514 | err_gib: |
515 | if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) |
516 | kvm_s390_pci_exit(); |
517 | err_pci: |
518 | err_flic: |
519 | err_debug_view: |
520 | debug_unregister(kvm_s390_dbf_uv); |
521 | err_kvm_uv: |
522 | debug_unregister(kvm_s390_dbf); |
523 | return rc; |
524 | } |
525 | |
526 | static void __kvm_s390_exit(void) |
527 | { |
528 | gmap_unregister_pte_notifier(&gmap_notifier); |
529 | gmap_unregister_pte_notifier(&vsie_gmap_notifier); |
530 | atomic_notifier_chain_unregister(&s390_epoch_delta_notifier, |
531 | &kvm_clock_notifier); |
532 | |
533 | kvm_s390_gib_destroy(); |
534 | if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) |
535 | kvm_s390_pci_exit(); |
536 | debug_unregister(kvm_s390_dbf); |
537 | debug_unregister(kvm_s390_dbf_uv); |
538 | } |
539 | |
540 | /* Section: device related */ |
541 | long kvm_arch_dev_ioctl(struct file *filp, |
542 | unsigned int ioctl, unsigned long arg) |
543 | { |
544 | if (ioctl == KVM_S390_ENABLE_SIE) |
545 | return s390_enable_sie(); |
546 | return -EINVAL; |
547 | } |
548 | |
549 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
550 | { |
551 | int r; |
552 | |
553 | switch (ext) { |
554 | case KVM_CAP_S390_PSW: |
555 | case KVM_CAP_S390_GMAP: |
556 | case KVM_CAP_SYNC_MMU: |
557 | #ifdef CONFIG_KVM_S390_UCONTROL |
558 | case KVM_CAP_S390_UCONTROL: |
559 | #endif |
560 | case KVM_CAP_ASYNC_PF: |
561 | case KVM_CAP_SYNC_REGS: |
562 | case KVM_CAP_ONE_REG: |
563 | case KVM_CAP_ENABLE_CAP: |
564 | case KVM_CAP_S390_CSS_SUPPORT: |
565 | case KVM_CAP_IOEVENTFD: |
566 | case KVM_CAP_S390_IRQCHIP: |
567 | case KVM_CAP_VM_ATTRIBUTES: |
568 | case KVM_CAP_MP_STATE: |
569 | case KVM_CAP_IMMEDIATE_EXIT: |
570 | case KVM_CAP_S390_INJECT_IRQ: |
571 | case KVM_CAP_S390_USER_SIGP: |
572 | case KVM_CAP_S390_USER_STSI: |
573 | case KVM_CAP_S390_SKEYS: |
574 | case KVM_CAP_S390_IRQ_STATE: |
575 | case KVM_CAP_S390_USER_INSTR0: |
576 | case KVM_CAP_S390_CMMA_MIGRATION: |
577 | case KVM_CAP_S390_AIS: |
578 | case KVM_CAP_S390_AIS_MIGRATION: |
579 | case KVM_CAP_S390_VCPU_RESETS: |
580 | case KVM_CAP_SET_GUEST_DEBUG: |
581 | case KVM_CAP_S390_DIAG318: |
582 | case KVM_CAP_IRQFD_RESAMPLE: |
583 | r = 1; |
584 | break; |
585 | case KVM_CAP_SET_GUEST_DEBUG2: |
586 | r = KVM_GUESTDBG_VALID_MASK; |
587 | break; |
588 | case KVM_CAP_S390_HPAGE_1M: |
589 | r = 0; |
590 | if (hpage && !kvm_is_ucontrol(kvm)) |
591 | r = 1; |
592 | break; |
593 | case KVM_CAP_S390_MEM_OP: |
594 | r = MEM_OP_MAX_SIZE; |
595 | break; |
596 | case KVM_CAP_S390_MEM_OP_EXTENSION: |
597 | /* |
598 | * Flag bits indicating which extensions are supported. |
599 | * If r > 0, the base extension must also be supported/indicated, |
600 | * in order to maintain backwards compatibility. |
601 | */ |
602 | r = KVM_S390_MEMOP_EXTENSION_CAP_BASE | |
603 | KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG; |
604 | break; |
605 | case KVM_CAP_NR_VCPUS: |
606 | case KVM_CAP_MAX_VCPUS: |
607 | case KVM_CAP_MAX_VCPU_ID: |
608 | r = KVM_S390_BSCA_CPU_SLOTS; |
609 | if (!kvm_s390_use_sca_entries()) |
610 | r = KVM_MAX_VCPUS; |
611 | else if (sclp.has_esca && sclp.has_64bscao) |
612 | r = KVM_S390_ESCA_CPU_SLOTS; |
613 | if (ext == KVM_CAP_NR_VCPUS) |
614 | r = min_t(unsigned int, num_online_cpus(), r); |
615 | break; |
616 | case KVM_CAP_S390_COW: |
617 | r = MACHINE_HAS_ESOP; |
618 | break; |
619 | case KVM_CAP_S390_VECTOR_REGISTERS: |
620 | r = test_facility(129); |
621 | break; |
622 | case KVM_CAP_S390_RI: |
623 | r = test_facility(64); |
624 | break; |
625 | case KVM_CAP_S390_GS: |
626 | r = test_facility(133); |
627 | break; |
628 | case KVM_CAP_S390_BPB: |
629 | r = test_facility(82); |
630 | break; |
631 | case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE: |
632 | r = async_destroy && is_prot_virt_host(); |
633 | break; |
634 | case KVM_CAP_S390_PROTECTED: |
635 | r = is_prot_virt_host(); |
636 | break; |
637 | case KVM_CAP_S390_PROTECTED_DUMP: { |
638 | u64 pv_cmds_dump[] = { |
639 | BIT_UVC_CMD_DUMP_INIT, |
640 | BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE, |
641 | BIT_UVC_CMD_DUMP_CPU, |
642 | BIT_UVC_CMD_DUMP_COMPLETE, |
643 | }; |
644 | int i; |
645 | |
646 | r = is_prot_virt_host(); |
647 | |
648 | for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) { |
649 | if (!test_bit_inv(pv_cmds_dump[i], |
650 | (unsigned long *)&uv_info.inst_calls_list)) { |
651 | r = 0; |
652 | break; |
653 | } |
654 | } |
655 | break; |
656 | } |
657 | case KVM_CAP_S390_ZPCI_OP: |
658 | r = kvm_s390_pci_interp_allowed(); |
659 | break; |
660 | case KVM_CAP_S390_CPU_TOPOLOGY: |
661 | r = test_facility(11); |
662 | break; |
663 | default: |
664 | r = 0; |
665 | } |
666 | return r; |
667 | } |
668 | |
669 | void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) |
670 | { |
671 | int i; |
672 | gfn_t cur_gfn, last_gfn; |
673 | unsigned long gaddr, vmaddr; |
674 | struct gmap *gmap = kvm->arch.gmap; |
675 | DECLARE_BITMAP(bitmap, _PAGE_ENTRIES); |
676 | |
677 | /* Loop over all guest segments */ |
678 | cur_gfn = memslot->base_gfn; |
679 | last_gfn = memslot->base_gfn + memslot->npages; |
680 | for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) { |
681 | gaddr = gfn_to_gpa(gfn: cur_gfn); |
682 | vmaddr = gfn_to_hva_memslot(slot: memslot, gfn: cur_gfn); |
683 | if (kvm_is_error_hva(addr: vmaddr)) |
684 | continue; |
685 | |
686 | bitmap_zero(bitmap, _PAGE_ENTRIES); |
687 | gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr); |
688 | for (i = 0; i < _PAGE_ENTRIES; i++) { |
689 | if (test_bit(i, bitmap)) |
690 | mark_page_dirty(kvm, cur_gfn + i); |
691 | } |
692 | |
693 | if (fatal_signal_pending(current)) |
694 | return; |
695 | cond_resched(); |
696 | } |
697 | } |
698 | |
699 | /* Section: vm related */ |
700 | static void sca_del_vcpu(struct kvm_vcpu *vcpu); |
701 | |
702 | /* |
703 | * Get (and clear) the dirty memory log for a memory slot. |
704 | */ |
705 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
706 | struct kvm_dirty_log *log) |
707 | { |
708 | int r; |
709 | unsigned long n; |
710 | struct kvm_memory_slot *memslot; |
711 | int is_dirty; |
712 | |
713 | if (kvm_is_ucontrol(kvm)) |
714 | return -EINVAL; |
715 | |
716 | mutex_lock(&kvm->slots_lock); |
717 | |
718 | r = -EINVAL; |
719 | if (log->slot >= KVM_USER_MEM_SLOTS) |
720 | goto out; |
721 | |
722 | r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot); |
723 | if (r) |
724 | goto out; |
725 | |
726 | /* Clear the dirty log */ |
727 | if (is_dirty) { |
728 | n = kvm_dirty_bitmap_bytes(memslot); |
729 | memset(memslot->dirty_bitmap, 0, n); |
730 | } |
731 | r = 0; |
732 | out: |
733 | mutex_unlock(lock: &kvm->slots_lock); |
734 | return r; |
735 | } |
736 | |
737 | static void icpt_operexc_on_all_vcpus(struct kvm *kvm) |
738 | { |
739 | unsigned long i; |
740 | struct kvm_vcpu *vcpu; |
741 | |
742 | kvm_for_each_vcpu(i, vcpu, kvm) { |
743 | kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu); |
744 | } |
745 | } |
746 | |
747 | int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) |
748 | { |
749 | int r; |
750 | |
751 | if (cap->flags) |
752 | return -EINVAL; |
753 | |
754 | switch (cap->cap) { |
755 | case KVM_CAP_S390_IRQCHIP: |
756 | VM_EVENT(kvm, 3, "%s" , "ENABLE: CAP_S390_IRQCHIP" ); |
757 | kvm->arch.use_irqchip = 1; |
758 | r = 0; |
759 | break; |
760 | case KVM_CAP_S390_USER_SIGP: |
761 | VM_EVENT(kvm, 3, "%s" , "ENABLE: CAP_S390_USER_SIGP" ); |
762 | kvm->arch.user_sigp = 1; |
763 | r = 0; |
764 | break; |
765 | case KVM_CAP_S390_VECTOR_REGISTERS: |
766 | mutex_lock(&kvm->lock); |
767 | if (kvm->created_vcpus) { |
768 | r = -EBUSY; |
769 | } else if (cpu_has_vx()) { |
770 | set_kvm_facility(fac_list: kvm->arch.model.fac_mask, nr: 129); |
771 | set_kvm_facility(fac_list: kvm->arch.model.fac_list, nr: 129); |
772 | if (test_facility(134)) { |
773 | set_kvm_facility(fac_list: kvm->arch.model.fac_mask, nr: 134); |
774 | set_kvm_facility(fac_list: kvm->arch.model.fac_list, nr: 134); |
775 | } |
776 | if (test_facility(135)) { |
777 | set_kvm_facility(fac_list: kvm->arch.model.fac_mask, nr: 135); |
778 | set_kvm_facility(fac_list: kvm->arch.model.fac_list, nr: 135); |
779 | } |
780 | if (test_facility(148)) { |
781 | set_kvm_facility(fac_list: kvm->arch.model.fac_mask, nr: 148); |
782 | set_kvm_facility(fac_list: kvm->arch.model.fac_list, nr: 148); |
783 | } |
784 | if (test_facility(152)) { |
785 | set_kvm_facility(fac_list: kvm->arch.model.fac_mask, nr: 152); |
786 | set_kvm_facility(fac_list: kvm->arch.model.fac_list, nr: 152); |
787 | } |
788 | if (test_facility(192)) { |
789 | set_kvm_facility(fac_list: kvm->arch.model.fac_mask, nr: 192); |
790 | set_kvm_facility(fac_list: kvm->arch.model.fac_list, nr: 192); |
791 | } |
792 | r = 0; |
793 | } else |
794 | r = -EINVAL; |
795 | mutex_unlock(lock: &kvm->lock); |
796 | VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s" , |
797 | r ? "(not available)" : "(success)" ); |
798 | break; |
799 | case KVM_CAP_S390_RI: |
800 | r = -EINVAL; |
801 | mutex_lock(&kvm->lock); |
802 | if (kvm->created_vcpus) { |
803 | r = -EBUSY; |
804 | } else if (test_facility(64)) { |
805 | set_kvm_facility(fac_list: kvm->arch.model.fac_mask, nr: 64); |
806 | set_kvm_facility(fac_list: kvm->arch.model.fac_list, nr: 64); |
807 | r = 0; |
808 | } |
809 | mutex_unlock(lock: &kvm->lock); |
810 | VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s" , |
811 | r ? "(not available)" : "(success)" ); |
812 | break; |
813 | case KVM_CAP_S390_AIS: |
814 | mutex_lock(&kvm->lock); |
815 | if (kvm->created_vcpus) { |
816 | r = -EBUSY; |
817 | } else { |
818 | set_kvm_facility(fac_list: kvm->arch.model.fac_mask, nr: 72); |
819 | set_kvm_facility(fac_list: kvm->arch.model.fac_list, nr: 72); |
820 | r = 0; |
821 | } |
822 | mutex_unlock(lock: &kvm->lock); |
823 | VM_EVENT(kvm, 3, "ENABLE: AIS %s" , |
824 | r ? "(not available)" : "(success)" ); |
825 | break; |
826 | case KVM_CAP_S390_GS: |
827 | r = -EINVAL; |
828 | mutex_lock(&kvm->lock); |
829 | if (kvm->created_vcpus) { |
830 | r = -EBUSY; |
831 | } else if (test_facility(133)) { |
832 | set_kvm_facility(fac_list: kvm->arch.model.fac_mask, nr: 133); |
833 | set_kvm_facility(fac_list: kvm->arch.model.fac_list, nr: 133); |
834 | r = 0; |
835 | } |
836 | mutex_unlock(lock: &kvm->lock); |
837 | VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s" , |
838 | r ? "(not available)" : "(success)" ); |
839 | break; |
840 | case KVM_CAP_S390_HPAGE_1M: |
841 | mutex_lock(&kvm->lock); |
842 | if (kvm->created_vcpus) |
843 | r = -EBUSY; |
844 | else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) |
845 | r = -EINVAL; |
846 | else { |
847 | r = 0; |
848 | mmap_write_lock(mm: kvm->mm); |
849 | kvm->mm->context.allow_gmap_hpage_1m = 1; |
850 | mmap_write_unlock(mm: kvm->mm); |
851 | /* |
852 | * We might have to create fake 4k page |
853 | * tables. To avoid that the hardware works on |
854 | * stale PGSTEs, we emulate these instructions. |
855 | */ |
856 | kvm->arch.use_skf = 0; |
857 | kvm->arch.use_pfmfi = 0; |
858 | } |
859 | mutex_unlock(lock: &kvm->lock); |
860 | VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s" , |
861 | r ? "(not available)" : "(success)" ); |
862 | break; |
863 | case KVM_CAP_S390_USER_STSI: |
864 | VM_EVENT(kvm, 3, "%s" , "ENABLE: CAP_S390_USER_STSI" ); |
865 | kvm->arch.user_stsi = 1; |
866 | r = 0; |
867 | break; |
868 | case KVM_CAP_S390_USER_INSTR0: |
869 | VM_EVENT(kvm, 3, "%s" , "ENABLE: CAP_S390_USER_INSTR0" ); |
870 | kvm->arch.user_instr0 = 1; |
871 | icpt_operexc_on_all_vcpus(kvm); |
872 | r = 0; |
873 | break; |
874 | case KVM_CAP_S390_CPU_TOPOLOGY: |
875 | r = -EINVAL; |
876 | mutex_lock(&kvm->lock); |
877 | if (kvm->created_vcpus) { |
878 | r = -EBUSY; |
879 | } else if (test_facility(11)) { |
880 | set_kvm_facility(fac_list: kvm->arch.model.fac_mask, nr: 11); |
881 | set_kvm_facility(fac_list: kvm->arch.model.fac_list, nr: 11); |
882 | r = 0; |
883 | } |
884 | mutex_unlock(lock: &kvm->lock); |
885 | VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s" , |
886 | r ? "(not available)" : "(success)" ); |
887 | break; |
888 | default: |
889 | r = -EINVAL; |
890 | break; |
891 | } |
892 | return r; |
893 | } |
894 | |
895 | static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) |
896 | { |
897 | int ret; |
898 | |
899 | switch (attr->attr) { |
900 | case KVM_S390_VM_MEM_LIMIT_SIZE: |
901 | ret = 0; |
902 | VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes" , |
903 | kvm->arch.mem_limit); |
904 | if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) |
905 | ret = -EFAULT; |
906 | break; |
907 | default: |
908 | ret = -ENXIO; |
909 | break; |
910 | } |
911 | return ret; |
912 | } |
913 | |
914 | static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) |
915 | { |
916 | int ret; |
917 | unsigned int idx; |
918 | switch (attr->attr) { |
919 | case KVM_S390_VM_MEM_ENABLE_CMMA: |
920 | ret = -ENXIO; |
921 | if (!sclp.has_cmma) |
922 | break; |
923 | |
924 | VM_EVENT(kvm, 3, "%s" , "ENABLE: CMMA support" ); |
925 | mutex_lock(&kvm->lock); |
926 | if (kvm->created_vcpus) |
927 | ret = -EBUSY; |
928 | else if (kvm->mm->context.allow_gmap_hpage_1m) |
929 | ret = -EINVAL; |
930 | else { |
931 | kvm->arch.use_cmma = 1; |
932 | /* Not compatible with cmma. */ |
933 | kvm->arch.use_pfmfi = 0; |
934 | ret = 0; |
935 | } |
936 | mutex_unlock(lock: &kvm->lock); |
937 | break; |
938 | case KVM_S390_VM_MEM_CLR_CMMA: |
939 | ret = -ENXIO; |
940 | if (!sclp.has_cmma) |
941 | break; |
942 | ret = -EINVAL; |
943 | if (!kvm->arch.use_cmma) |
944 | break; |
945 | |
946 | VM_EVENT(kvm, 3, "%s" , "RESET: CMMA states" ); |
947 | mutex_lock(&kvm->lock); |
948 | idx = srcu_read_lock(ssp: &kvm->srcu); |
949 | s390_reset_cmma(kvm->arch.gmap->mm); |
950 | srcu_read_unlock(ssp: &kvm->srcu, idx); |
951 | mutex_unlock(lock: &kvm->lock); |
952 | ret = 0; |
953 | break; |
954 | case KVM_S390_VM_MEM_LIMIT_SIZE: { |
955 | unsigned long new_limit; |
956 | |
957 | if (kvm_is_ucontrol(kvm)) |
958 | return -EINVAL; |
959 | |
960 | if (get_user(new_limit, (u64 __user *)attr->addr)) |
961 | return -EFAULT; |
962 | |
963 | if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && |
964 | new_limit > kvm->arch.mem_limit) |
965 | return -E2BIG; |
966 | |
967 | if (!new_limit) |
968 | return -EINVAL; |
969 | |
970 | /* gmap_create takes last usable address */ |
971 | if (new_limit != KVM_S390_NO_MEM_LIMIT) |
972 | new_limit -= 1; |
973 | |
974 | ret = -EBUSY; |
975 | mutex_lock(&kvm->lock); |
976 | if (!kvm->created_vcpus) { |
977 | /* gmap_create will round the limit up */ |
978 | struct gmap *new = gmap_create(current->mm, new_limit); |
979 | |
980 | if (!new) { |
981 | ret = -ENOMEM; |
982 | } else { |
983 | gmap_remove(kvm->arch.gmap); |
984 | new->private = kvm; |
985 | kvm->arch.gmap = new; |
986 | ret = 0; |
987 | } |
988 | } |
989 | mutex_unlock(lock: &kvm->lock); |
990 | VM_EVENT(kvm, 3, "SET: max guest address: %lu" , new_limit); |
991 | VM_EVENT(kvm, 3, "New guest asce: 0x%pK" , |
992 | (void *) kvm->arch.gmap->asce); |
993 | break; |
994 | } |
995 | default: |
996 | ret = -ENXIO; |
997 | break; |
998 | } |
999 | return ret; |
1000 | } |
1001 | |
1002 | static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu); |
1003 | |
1004 | void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm) |
1005 | { |
1006 | struct kvm_vcpu *vcpu; |
1007 | unsigned long i; |
1008 | |
1009 | kvm_s390_vcpu_block_all(kvm); |
1010 | |
1011 | kvm_for_each_vcpu(i, vcpu, kvm) { |
1012 | kvm_s390_vcpu_crypto_setup(vcpu); |
1013 | /* recreate the shadow crycb by leaving the VSIE handler */ |
1014 | kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu); |
1015 | } |
1016 | |
1017 | kvm_s390_vcpu_unblock_all(kvm); |
1018 | } |
1019 | |
1020 | static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) |
1021 | { |
1022 | mutex_lock(&kvm->lock); |
1023 | switch (attr->attr) { |
1024 | case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: |
1025 | if (!test_kvm_facility(kvm, nr: 76)) { |
1026 | mutex_unlock(lock: &kvm->lock); |
1027 | return -EINVAL; |
1028 | } |
1029 | get_random_bytes( |
1030 | buf: kvm->arch.crypto.crycb->aes_wrapping_key_mask, |
1031 | len: sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); |
1032 | kvm->arch.crypto.aes_kw = 1; |
1033 | VM_EVENT(kvm, 3, "%s" , "ENABLE: AES keywrapping support" ); |
1034 | break; |
1035 | case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: |
1036 | if (!test_kvm_facility(kvm, nr: 76)) { |
1037 | mutex_unlock(lock: &kvm->lock); |
1038 | return -EINVAL; |
1039 | } |
1040 | get_random_bytes( |
1041 | buf: kvm->arch.crypto.crycb->dea_wrapping_key_mask, |
1042 | len: sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); |
1043 | kvm->arch.crypto.dea_kw = 1; |
1044 | VM_EVENT(kvm, 3, "%s" , "ENABLE: DEA keywrapping support" ); |
1045 | break; |
1046 | case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: |
1047 | if (!test_kvm_facility(kvm, nr: 76)) { |
1048 | mutex_unlock(lock: &kvm->lock); |
1049 | return -EINVAL; |
1050 | } |
1051 | kvm->arch.crypto.aes_kw = 0; |
1052 | memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, |
1053 | sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); |
1054 | VM_EVENT(kvm, 3, "%s" , "DISABLE: AES keywrapping support" ); |
1055 | break; |
1056 | case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: |
1057 | if (!test_kvm_facility(kvm, nr: 76)) { |
1058 | mutex_unlock(lock: &kvm->lock); |
1059 | return -EINVAL; |
1060 | } |
1061 | kvm->arch.crypto.dea_kw = 0; |
1062 | memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, |
1063 | sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); |
1064 | VM_EVENT(kvm, 3, "%s" , "DISABLE: DEA keywrapping support" ); |
1065 | break; |
1066 | case KVM_S390_VM_CRYPTO_ENABLE_APIE: |
1067 | if (!ap_instructions_available()) { |
1068 | mutex_unlock(lock: &kvm->lock); |
1069 | return -EOPNOTSUPP; |
1070 | } |
1071 | kvm->arch.crypto.apie = 1; |
1072 | break; |
1073 | case KVM_S390_VM_CRYPTO_DISABLE_APIE: |
1074 | if (!ap_instructions_available()) { |
1075 | mutex_unlock(lock: &kvm->lock); |
1076 | return -EOPNOTSUPP; |
1077 | } |
1078 | kvm->arch.crypto.apie = 0; |
1079 | break; |
1080 | default: |
1081 | mutex_unlock(lock: &kvm->lock); |
1082 | return -ENXIO; |
1083 | } |
1084 | |
1085 | kvm_s390_vcpu_crypto_reset_all(kvm); |
1086 | mutex_unlock(lock: &kvm->lock); |
1087 | return 0; |
1088 | } |
1089 | |
1090 | static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu) |
1091 | { |
1092 | /* Only set the ECB bits after guest requests zPCI interpretation */ |
1093 | if (!vcpu->kvm->arch.use_zpci_interp) |
1094 | return; |
1095 | |
1096 | vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI; |
1097 | vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI; |
1098 | } |
1099 | |
1100 | void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm) |
1101 | { |
1102 | struct kvm_vcpu *vcpu; |
1103 | unsigned long i; |
1104 | |
1105 | lockdep_assert_held(&kvm->lock); |
1106 | |
1107 | if (!kvm_s390_pci_interp_allowed()) |
1108 | return; |
1109 | |
1110 | /* |
1111 | * If host is configured for PCI and the necessary facilities are |
1112 | * available, turn on interpretation for the life of this guest |
1113 | */ |
1114 | kvm->arch.use_zpci_interp = 1; |
1115 | |
1116 | kvm_s390_vcpu_block_all(kvm); |
1117 | |
1118 | kvm_for_each_vcpu(i, vcpu, kvm) { |
1119 | kvm_s390_vcpu_pci_setup(vcpu); |
1120 | kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu); |
1121 | } |
1122 | |
1123 | kvm_s390_vcpu_unblock_all(kvm); |
1124 | } |
1125 | |
1126 | static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req) |
1127 | { |
1128 | unsigned long cx; |
1129 | struct kvm_vcpu *vcpu; |
1130 | |
1131 | kvm_for_each_vcpu(cx, vcpu, kvm) |
1132 | kvm_s390_sync_request(req, vcpu); |
1133 | } |
1134 | |
1135 | /* |
1136 | * Must be called with kvm->srcu held to avoid races on memslots, and with |
1137 | * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration. |
1138 | */ |
1139 | static int kvm_s390_vm_start_migration(struct kvm *kvm) |
1140 | { |
1141 | struct kvm_memory_slot *ms; |
1142 | struct kvm_memslots *slots; |
1143 | unsigned long ram_pages = 0; |
1144 | int bkt; |
1145 | |
1146 | /* migration mode already enabled */ |
1147 | if (kvm->arch.migration_mode) |
1148 | return 0; |
1149 | slots = kvm_memslots(kvm); |
1150 | if (!slots || kvm_memslots_empty(slots)) |
1151 | return -EINVAL; |
1152 | |
1153 | if (!kvm->arch.use_cmma) { |
1154 | kvm->arch.migration_mode = 1; |
1155 | return 0; |
1156 | } |
1157 | /* mark all the pages in active slots as dirty */ |
1158 | kvm_for_each_memslot(ms, bkt, slots) { |
1159 | if (!ms->dirty_bitmap) |
1160 | return -EINVAL; |
1161 | /* |
1162 | * The second half of the bitmap is only used on x86, |
1163 | * and would be wasted otherwise, so we put it to good |
1164 | * use here to keep track of the state of the storage |
1165 | * attributes. |
1166 | */ |
1167 | memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms)); |
1168 | ram_pages += ms->npages; |
1169 | } |
1170 | atomic64_set(v: &kvm->arch.cmma_dirty_pages, i: ram_pages); |
1171 | kvm->arch.migration_mode = 1; |
1172 | kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION); |
1173 | return 0; |
1174 | } |
1175 | |
1176 | /* |
1177 | * Must be called with kvm->slots_lock to avoid races with ourselves and |
1178 | * kvm_s390_vm_start_migration. |
1179 | */ |
1180 | static int kvm_s390_vm_stop_migration(struct kvm *kvm) |
1181 | { |
1182 | /* migration mode already disabled */ |
1183 | if (!kvm->arch.migration_mode) |
1184 | return 0; |
1185 | kvm->arch.migration_mode = 0; |
1186 | if (kvm->arch.use_cmma) |
1187 | kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); |
1188 | return 0; |
1189 | } |
1190 | |
1191 | static int kvm_s390_vm_set_migration(struct kvm *kvm, |
1192 | struct kvm_device_attr *attr) |
1193 | { |
1194 | int res = -ENXIO; |
1195 | |
1196 | mutex_lock(&kvm->slots_lock); |
1197 | switch (attr->attr) { |
1198 | case KVM_S390_VM_MIGRATION_START: |
1199 | res = kvm_s390_vm_start_migration(kvm); |
1200 | break; |
1201 | case KVM_S390_VM_MIGRATION_STOP: |
1202 | res = kvm_s390_vm_stop_migration(kvm); |
1203 | break; |
1204 | default: |
1205 | break; |
1206 | } |
1207 | mutex_unlock(lock: &kvm->slots_lock); |
1208 | |
1209 | return res; |
1210 | } |
1211 | |
1212 | static int kvm_s390_vm_get_migration(struct kvm *kvm, |
1213 | struct kvm_device_attr *attr) |
1214 | { |
1215 | u64 mig = kvm->arch.migration_mode; |
1216 | |
1217 | if (attr->attr != KVM_S390_VM_MIGRATION_STATUS) |
1218 | return -ENXIO; |
1219 | |
1220 | if (copy_to_user(to: (void __user *)attr->addr, from: &mig, n: sizeof(mig))) |
1221 | return -EFAULT; |
1222 | return 0; |
1223 | } |
1224 | |
1225 | static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod); |
1226 | |
1227 | static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) |
1228 | { |
1229 | struct kvm_s390_vm_tod_clock gtod; |
1230 | |
1231 | if (copy_from_user(to: >od, from: (void __user *)attr->addr, n: sizeof(gtod))) |
1232 | return -EFAULT; |
1233 | |
1234 | if (!test_kvm_facility(kvm, nr: 139) && gtod.epoch_idx) |
1235 | return -EINVAL; |
1236 | __kvm_s390_set_tod_clock(kvm, gtod: >od); |
1237 | |
1238 | VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx" , |
1239 | gtod.epoch_idx, gtod.tod); |
1240 | |
1241 | return 0; |
1242 | } |
1243 | |
1244 | static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) |
1245 | { |
1246 | u8 gtod_high; |
1247 | |
1248 | if (copy_from_user(to: >od_high, from: (void __user *)attr->addr, |
1249 | n: sizeof(gtod_high))) |
1250 | return -EFAULT; |
1251 | |
1252 | if (gtod_high != 0) |
1253 | return -EINVAL; |
1254 | VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x" , gtod_high); |
1255 | |
1256 | return 0; |
1257 | } |
1258 | |
1259 | static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) |
1260 | { |
1261 | struct kvm_s390_vm_tod_clock gtod = { 0 }; |
1262 | |
1263 | if (copy_from_user(to: >od.tod, from: (void __user *)attr->addr, |
1264 | n: sizeof(gtod.tod))) |
1265 | return -EFAULT; |
1266 | |
1267 | __kvm_s390_set_tod_clock(kvm, gtod: >od); |
1268 | VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx" , gtod.tod); |
1269 | return 0; |
1270 | } |
1271 | |
1272 | static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) |
1273 | { |
1274 | int ret; |
1275 | |
1276 | if (attr->flags) |
1277 | return -EINVAL; |
1278 | |
1279 | mutex_lock(&kvm->lock); |
1280 | /* |
1281 | * For protected guests, the TOD is managed by the ultravisor, so trying |
1282 | * to change it will never bring the expected results. |
1283 | */ |
1284 | if (kvm_s390_pv_is_protected(kvm)) { |
1285 | ret = -EOPNOTSUPP; |
1286 | goto out_unlock; |
1287 | } |
1288 | |
1289 | switch (attr->attr) { |
1290 | case KVM_S390_VM_TOD_EXT: |
1291 | ret = kvm_s390_set_tod_ext(kvm, attr); |
1292 | break; |
1293 | case KVM_S390_VM_TOD_HIGH: |
1294 | ret = kvm_s390_set_tod_high(kvm, attr); |
1295 | break; |
1296 | case KVM_S390_VM_TOD_LOW: |
1297 | ret = kvm_s390_set_tod_low(kvm, attr); |
1298 | break; |
1299 | default: |
1300 | ret = -ENXIO; |
1301 | break; |
1302 | } |
1303 | |
1304 | out_unlock: |
1305 | mutex_unlock(lock: &kvm->lock); |
1306 | return ret; |
1307 | } |
1308 | |
1309 | static void kvm_s390_get_tod_clock(struct kvm *kvm, |
1310 | struct kvm_s390_vm_tod_clock *gtod) |
1311 | { |
1312 | union tod_clock clk; |
1313 | |
1314 | preempt_disable(); |
1315 | |
1316 | store_tod_clock_ext(&clk); |
1317 | |
1318 | gtod->tod = clk.tod + kvm->arch.epoch; |
1319 | gtod->epoch_idx = 0; |
1320 | if (test_kvm_facility(kvm, nr: 139)) { |
1321 | gtod->epoch_idx = clk.ei + kvm->arch.epdx; |
1322 | if (gtod->tod < clk.tod) |
1323 | gtod->epoch_idx += 1; |
1324 | } |
1325 | |
1326 | preempt_enable(); |
1327 | } |
1328 | |
1329 | static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) |
1330 | { |
1331 | struct kvm_s390_vm_tod_clock gtod; |
1332 | |
1333 | memset(>od, 0, sizeof(gtod)); |
1334 | kvm_s390_get_tod_clock(kvm, gtod: >od); |
1335 | if (copy_to_user(to: (void __user *)attr->addr, from: >od, n: sizeof(gtod))) |
1336 | return -EFAULT; |
1337 | |
1338 | VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx" , |
1339 | gtod.epoch_idx, gtod.tod); |
1340 | return 0; |
1341 | } |
1342 | |
1343 | static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) |
1344 | { |
1345 | u8 gtod_high = 0; |
1346 | |
1347 | if (copy_to_user(to: (void __user *)attr->addr, from: >od_high, |
1348 | n: sizeof(gtod_high))) |
1349 | return -EFAULT; |
1350 | VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x" , gtod_high); |
1351 | |
1352 | return 0; |
1353 | } |
1354 | |
1355 | static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) |
1356 | { |
1357 | u64 gtod; |
1358 | |
1359 | gtod = kvm_s390_get_tod_clock_fast(kvm); |
1360 | if (copy_to_user(to: (void __user *)attr->addr, from: >od, n: sizeof(gtod))) |
1361 | return -EFAULT; |
1362 | VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx" , gtod); |
1363 | |
1364 | return 0; |
1365 | } |
1366 | |
1367 | static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) |
1368 | { |
1369 | int ret; |
1370 | |
1371 | if (attr->flags) |
1372 | return -EINVAL; |
1373 | |
1374 | switch (attr->attr) { |
1375 | case KVM_S390_VM_TOD_EXT: |
1376 | ret = kvm_s390_get_tod_ext(kvm, attr); |
1377 | break; |
1378 | case KVM_S390_VM_TOD_HIGH: |
1379 | ret = kvm_s390_get_tod_high(kvm, attr); |
1380 | break; |
1381 | case KVM_S390_VM_TOD_LOW: |
1382 | ret = kvm_s390_get_tod_low(kvm, attr); |
1383 | break; |
1384 | default: |
1385 | ret = -ENXIO; |
1386 | break; |
1387 | } |
1388 | return ret; |
1389 | } |
1390 | |
1391 | static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) |
1392 | { |
1393 | struct kvm_s390_vm_cpu_processor *proc; |
1394 | u16 lowest_ibc, unblocked_ibc; |
1395 | int ret = 0; |
1396 | |
1397 | mutex_lock(&kvm->lock); |
1398 | if (kvm->created_vcpus) { |
1399 | ret = -EBUSY; |
1400 | goto out; |
1401 | } |
1402 | proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT); |
1403 | if (!proc) { |
1404 | ret = -ENOMEM; |
1405 | goto out; |
1406 | } |
1407 | if (!copy_from_user(proc, (void __user *)attr->addr, |
1408 | sizeof(*proc))) { |
1409 | kvm->arch.model.cpuid = proc->cpuid; |
1410 | lowest_ibc = sclp.ibc >> 16 & 0xfff; |
1411 | unblocked_ibc = sclp.ibc & 0xfff; |
1412 | if (lowest_ibc && proc->ibc) { |
1413 | if (proc->ibc > unblocked_ibc) |
1414 | kvm->arch.model.ibc = unblocked_ibc; |
1415 | else if (proc->ibc < lowest_ibc) |
1416 | kvm->arch.model.ibc = lowest_ibc; |
1417 | else |
1418 | kvm->arch.model.ibc = proc->ibc; |
1419 | } |
1420 | memcpy(kvm->arch.model.fac_list, proc->fac_list, |
1421 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
1422 | VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx" , |
1423 | kvm->arch.model.ibc, |
1424 | kvm->arch.model.cpuid); |
1425 | VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx" , |
1426 | kvm->arch.model.fac_list[0], |
1427 | kvm->arch.model.fac_list[1], |
1428 | kvm->arch.model.fac_list[2]); |
1429 | } else |
1430 | ret = -EFAULT; |
1431 | kfree(objp: proc); |
1432 | out: |
1433 | mutex_unlock(lock: &kvm->lock); |
1434 | return ret; |
1435 | } |
1436 | |
1437 | static int kvm_s390_set_processor_feat(struct kvm *kvm, |
1438 | struct kvm_device_attr *attr) |
1439 | { |
1440 | struct kvm_s390_vm_cpu_feat data; |
1441 | |
1442 | if (copy_from_user(to: &data, from: (void __user *)attr->addr, n: sizeof(data))) |
1443 | return -EFAULT; |
1444 | if (!bitmap_subset((unsigned long *) data.feat, |
1445 | kvm_s390_available_cpu_feat, |
1446 | KVM_S390_VM_CPU_FEAT_NR_BITS)) |
1447 | return -EINVAL; |
1448 | |
1449 | mutex_lock(&kvm->lock); |
1450 | if (kvm->created_vcpus) { |
1451 | mutex_unlock(lock: &kvm->lock); |
1452 | return -EBUSY; |
1453 | } |
1454 | bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS); |
1455 | mutex_unlock(lock: &kvm->lock); |
1456 | VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx" , |
1457 | data.feat[0], |
1458 | data.feat[1], |
1459 | data.feat[2]); |
1460 | return 0; |
1461 | } |
1462 | |
1463 | static int kvm_s390_set_processor_subfunc(struct kvm *kvm, |
1464 | struct kvm_device_attr *attr) |
1465 | { |
1466 | mutex_lock(&kvm->lock); |
1467 | if (kvm->created_vcpus) { |
1468 | mutex_unlock(lock: &kvm->lock); |
1469 | return -EBUSY; |
1470 | } |
1471 | |
1472 | if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, |
1473 | sizeof(struct kvm_s390_vm_cpu_subfunc))) { |
1474 | mutex_unlock(lock: &kvm->lock); |
1475 | return -EFAULT; |
1476 | } |
1477 | mutex_unlock(lock: &kvm->lock); |
1478 | |
1479 | VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx" , |
1480 | ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], |
1481 | ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], |
1482 | ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], |
1483 | ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); |
1484 | VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx" , |
1485 | ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], |
1486 | ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); |
1487 | VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx" , |
1488 | ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], |
1489 | ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); |
1490 | VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx" , |
1491 | ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], |
1492 | ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); |
1493 | VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx" , |
1494 | ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], |
1495 | ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); |
1496 | VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx" , |
1497 | ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], |
1498 | ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); |
1499 | VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx" , |
1500 | ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], |
1501 | ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); |
1502 | VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx" , |
1503 | ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], |
1504 | ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); |
1505 | VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx" , |
1506 | ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], |
1507 | ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); |
1508 | VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx" , |
1509 | ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], |
1510 | ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); |
1511 | VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx" , |
1512 | ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], |
1513 | ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); |
1514 | VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx" , |
1515 | ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], |
1516 | ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); |
1517 | VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx" , |
1518 | ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], |
1519 | ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); |
1520 | VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx" , |
1521 | ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], |
1522 | ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); |
1523 | VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx" , |
1524 | ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], |
1525 | ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); |
1526 | VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx" , |
1527 | ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], |
1528 | ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], |
1529 | ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], |
1530 | ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); |
1531 | VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx" , |
1532 | ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], |
1533 | ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], |
1534 | ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], |
1535 | ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); |
1536 | |
1537 | return 0; |
1538 | } |
1539 | |
1540 | #define KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK \ |
1541 | ( \ |
1542 | ((struct kvm_s390_vm_cpu_uv_feat){ \ |
1543 | .ap = 1, \ |
1544 | .ap_intr = 1, \ |
1545 | }) \ |
1546 | .feat \ |
1547 | ) |
1548 | |
1549 | static int kvm_s390_set_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) |
1550 | { |
1551 | struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr; |
1552 | unsigned long data, filter; |
1553 | |
1554 | filter = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK; |
1555 | if (get_user(data, &ptr->feat)) |
1556 | return -EFAULT; |
1557 | if (!bitmap_subset(&data, &filter, KVM_S390_VM_CPU_UV_FEAT_NR_BITS)) |
1558 | return -EINVAL; |
1559 | |
1560 | mutex_lock(&kvm->lock); |
1561 | if (kvm->created_vcpus) { |
1562 | mutex_unlock(lock: &kvm->lock); |
1563 | return -EBUSY; |
1564 | } |
1565 | kvm->arch.model.uv_feat_guest.feat = data; |
1566 | mutex_unlock(lock: &kvm->lock); |
1567 | |
1568 | VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx" , data); |
1569 | |
1570 | return 0; |
1571 | } |
1572 | |
1573 | static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) |
1574 | { |
1575 | int ret = -ENXIO; |
1576 | |
1577 | switch (attr->attr) { |
1578 | case KVM_S390_VM_CPU_PROCESSOR: |
1579 | ret = kvm_s390_set_processor(kvm, attr); |
1580 | break; |
1581 | case KVM_S390_VM_CPU_PROCESSOR_FEAT: |
1582 | ret = kvm_s390_set_processor_feat(kvm, attr); |
1583 | break; |
1584 | case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC: |
1585 | ret = kvm_s390_set_processor_subfunc(kvm, attr); |
1586 | break; |
1587 | case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST: |
1588 | ret = kvm_s390_set_uv_feat(kvm, attr); |
1589 | break; |
1590 | } |
1591 | return ret; |
1592 | } |
1593 | |
1594 | static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) |
1595 | { |
1596 | struct kvm_s390_vm_cpu_processor *proc; |
1597 | int ret = 0; |
1598 | |
1599 | proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT); |
1600 | if (!proc) { |
1601 | ret = -ENOMEM; |
1602 | goto out; |
1603 | } |
1604 | proc->cpuid = kvm->arch.model.cpuid; |
1605 | proc->ibc = kvm->arch.model.ibc; |
1606 | memcpy(&proc->fac_list, kvm->arch.model.fac_list, |
1607 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
1608 | VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx" , |
1609 | kvm->arch.model.ibc, |
1610 | kvm->arch.model.cpuid); |
1611 | VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx" , |
1612 | kvm->arch.model.fac_list[0], |
1613 | kvm->arch.model.fac_list[1], |
1614 | kvm->arch.model.fac_list[2]); |
1615 | if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) |
1616 | ret = -EFAULT; |
1617 | kfree(objp: proc); |
1618 | out: |
1619 | return ret; |
1620 | } |
1621 | |
1622 | static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) |
1623 | { |
1624 | struct kvm_s390_vm_cpu_machine *mach; |
1625 | int ret = 0; |
1626 | |
1627 | mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT); |
1628 | if (!mach) { |
1629 | ret = -ENOMEM; |
1630 | goto out; |
1631 | } |
1632 | get_cpu_id((struct cpuid *) &mach->cpuid); |
1633 | mach->ibc = sclp.ibc; |
1634 | memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, |
1635 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
1636 | memcpy((unsigned long *)&mach->fac_list, stfle_fac_list, |
1637 | sizeof(stfle_fac_list)); |
1638 | VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx" , |
1639 | kvm->arch.model.ibc, |
1640 | kvm->arch.model.cpuid); |
1641 | VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx" , |
1642 | mach->fac_mask[0], |
1643 | mach->fac_mask[1], |
1644 | mach->fac_mask[2]); |
1645 | VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx" , |
1646 | mach->fac_list[0], |
1647 | mach->fac_list[1], |
1648 | mach->fac_list[2]); |
1649 | if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) |
1650 | ret = -EFAULT; |
1651 | kfree(objp: mach); |
1652 | out: |
1653 | return ret; |
1654 | } |
1655 | |
1656 | static int kvm_s390_get_processor_feat(struct kvm *kvm, |
1657 | struct kvm_device_attr *attr) |
1658 | { |
1659 | struct kvm_s390_vm_cpu_feat data; |
1660 | |
1661 | bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); |
1662 | if (copy_to_user(to: (void __user *)attr->addr, from: &data, n: sizeof(data))) |
1663 | return -EFAULT; |
1664 | VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx" , |
1665 | data.feat[0], |
1666 | data.feat[1], |
1667 | data.feat[2]); |
1668 | return 0; |
1669 | } |
1670 | |
1671 | static int kvm_s390_get_machine_feat(struct kvm *kvm, |
1672 | struct kvm_device_attr *attr) |
1673 | { |
1674 | struct kvm_s390_vm_cpu_feat data; |
1675 | |
1676 | bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); |
1677 | if (copy_to_user(to: (void __user *)attr->addr, from: &data, n: sizeof(data))) |
1678 | return -EFAULT; |
1679 | VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx" , |
1680 | data.feat[0], |
1681 | data.feat[1], |
1682 | data.feat[2]); |
1683 | return 0; |
1684 | } |
1685 | |
1686 | static int kvm_s390_get_processor_subfunc(struct kvm *kvm, |
1687 | struct kvm_device_attr *attr) |
1688 | { |
1689 | if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, |
1690 | sizeof(struct kvm_s390_vm_cpu_subfunc))) |
1691 | return -EFAULT; |
1692 | |
1693 | VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx" , |
1694 | ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], |
1695 | ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], |
1696 | ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], |
1697 | ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); |
1698 | VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx" , |
1699 | ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], |
1700 | ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); |
1701 | VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx" , |
1702 | ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], |
1703 | ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); |
1704 | VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx" , |
1705 | ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], |
1706 | ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); |
1707 | VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx" , |
1708 | ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], |
1709 | ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); |
1710 | VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx" , |
1711 | ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], |
1712 | ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); |
1713 | VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx" , |
1714 | ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], |
1715 | ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); |
1716 | VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx" , |
1717 | ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], |
1718 | ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); |
1719 | VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx" , |
1720 | ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], |
1721 | ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); |
1722 | VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx" , |
1723 | ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], |
1724 | ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); |
1725 | VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx" , |
1726 | ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], |
1727 | ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); |
1728 | VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx" , |
1729 | ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], |
1730 | ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); |
1731 | VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx" , |
1732 | ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], |
1733 | ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); |
1734 | VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx" , |
1735 | ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], |
1736 | ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); |
1737 | VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx" , |
1738 | ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], |
1739 | ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); |
1740 | VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx" , |
1741 | ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], |
1742 | ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], |
1743 | ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], |
1744 | ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); |
1745 | VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx" , |
1746 | ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], |
1747 | ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], |
1748 | ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], |
1749 | ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); |
1750 | |
1751 | return 0; |
1752 | } |
1753 | |
1754 | static int kvm_s390_get_machine_subfunc(struct kvm *kvm, |
1755 | struct kvm_device_attr *attr) |
1756 | { |
1757 | if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc, |
1758 | sizeof(struct kvm_s390_vm_cpu_subfunc))) |
1759 | return -EFAULT; |
1760 | |
1761 | VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx" , |
1762 | ((unsigned long *) &kvm_s390_available_subfunc.plo)[0], |
1763 | ((unsigned long *) &kvm_s390_available_subfunc.plo)[1], |
1764 | ((unsigned long *) &kvm_s390_available_subfunc.plo)[2], |
1765 | ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]); |
1766 | VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx" , |
1767 | ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0], |
1768 | ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]); |
1769 | VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx" , |
1770 | ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0], |
1771 | ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]); |
1772 | VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx" , |
1773 | ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0], |
1774 | ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]); |
1775 | VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx" , |
1776 | ((unsigned long *) &kvm_s390_available_subfunc.km)[0], |
1777 | ((unsigned long *) &kvm_s390_available_subfunc.km)[1]); |
1778 | VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx" , |
1779 | ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0], |
1780 | ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]); |
1781 | VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx" , |
1782 | ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0], |
1783 | ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]); |
1784 | VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx" , |
1785 | ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0], |
1786 | ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]); |
1787 | VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx" , |
1788 | ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0], |
1789 | ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]); |
1790 | VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx" , |
1791 | ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0], |
1792 | ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]); |
1793 | VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx" , |
1794 | ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0], |
1795 | ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]); |
1796 | VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx" , |
1797 | ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0], |
1798 | ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]); |
1799 | VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx" , |
1800 | ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0], |
1801 | ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]); |
1802 | VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx" , |
1803 | ((unsigned long *) &kvm_s390_available_subfunc.kma)[0], |
1804 | ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]); |
1805 | VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx" , |
1806 | ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0], |
1807 | ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]); |
1808 | VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx" , |
1809 | ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0], |
1810 | ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1], |
1811 | ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2], |
1812 | ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]); |
1813 | VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx" , |
1814 | ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0], |
1815 | ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1], |
1816 | ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2], |
1817 | ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]); |
1818 | |
1819 | return 0; |
1820 | } |
1821 | |
1822 | static int kvm_s390_get_processor_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) |
1823 | { |
1824 | struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr; |
1825 | unsigned long feat = kvm->arch.model.uv_feat_guest.feat; |
1826 | |
1827 | if (put_user(feat, &dst->feat)) |
1828 | return -EFAULT; |
1829 | VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx" , feat); |
1830 | |
1831 | return 0; |
1832 | } |
1833 | |
1834 | static int kvm_s390_get_machine_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) |
1835 | { |
1836 | struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr; |
1837 | unsigned long feat; |
1838 | |
1839 | BUILD_BUG_ON(sizeof(*dst) != sizeof(uv_info.uv_feature_indications)); |
1840 | |
1841 | feat = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK; |
1842 | if (put_user(feat, &dst->feat)) |
1843 | return -EFAULT; |
1844 | VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx" , feat); |
1845 | |
1846 | return 0; |
1847 | } |
1848 | |
1849 | static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) |
1850 | { |
1851 | int ret = -ENXIO; |
1852 | |
1853 | switch (attr->attr) { |
1854 | case KVM_S390_VM_CPU_PROCESSOR: |
1855 | ret = kvm_s390_get_processor(kvm, attr); |
1856 | break; |
1857 | case KVM_S390_VM_CPU_MACHINE: |
1858 | ret = kvm_s390_get_machine(kvm, attr); |
1859 | break; |
1860 | case KVM_S390_VM_CPU_PROCESSOR_FEAT: |
1861 | ret = kvm_s390_get_processor_feat(kvm, attr); |
1862 | break; |
1863 | case KVM_S390_VM_CPU_MACHINE_FEAT: |
1864 | ret = kvm_s390_get_machine_feat(kvm, attr); |
1865 | break; |
1866 | case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC: |
1867 | ret = kvm_s390_get_processor_subfunc(kvm, attr); |
1868 | break; |
1869 | case KVM_S390_VM_CPU_MACHINE_SUBFUNC: |
1870 | ret = kvm_s390_get_machine_subfunc(kvm, attr); |
1871 | break; |
1872 | case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST: |
1873 | ret = kvm_s390_get_processor_uv_feat(kvm, attr); |
1874 | break; |
1875 | case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST: |
1876 | ret = kvm_s390_get_machine_uv_feat(kvm, attr); |
1877 | break; |
1878 | } |
1879 | return ret; |
1880 | } |
1881 | |
1882 | /** |
1883 | * kvm_s390_update_topology_change_report - update CPU topology change report |
1884 | * @kvm: guest KVM description |
1885 | * @val: set or clear the MTCR bit |
1886 | * |
1887 | * Updates the Multiprocessor Topology-Change-Report bit to signal |
1888 | * the guest with a topology change. |
1889 | * This is only relevant if the topology facility is present. |
1890 | * |
1891 | * The SCA version, bsca or esca, doesn't matter as offset is the same. |
1892 | */ |
1893 | static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val) |
1894 | { |
1895 | union sca_utility new, old; |
1896 | struct bsca_block *sca; |
1897 | |
1898 | read_lock(&kvm->arch.sca_lock); |
1899 | sca = kvm->arch.sca; |
1900 | do { |
1901 | old = READ_ONCE(sca->utility); |
1902 | new = old; |
1903 | new.mtcr = val; |
1904 | } while (cmpxchg(&sca->utility.val, old.val, new.val) != old.val); |
1905 | read_unlock(&kvm->arch.sca_lock); |
1906 | } |
1907 | |
1908 | static int kvm_s390_set_topo_change_indication(struct kvm *kvm, |
1909 | struct kvm_device_attr *attr) |
1910 | { |
1911 | if (!test_kvm_facility(kvm, nr: 11)) |
1912 | return -ENXIO; |
1913 | |
1914 | kvm_s390_update_topology_change_report(kvm, val: !!attr->attr); |
1915 | return 0; |
1916 | } |
1917 | |
1918 | static int kvm_s390_get_topo_change_indication(struct kvm *kvm, |
1919 | struct kvm_device_attr *attr) |
1920 | { |
1921 | u8 topo; |
1922 | |
1923 | if (!test_kvm_facility(kvm, nr: 11)) |
1924 | return -ENXIO; |
1925 | |
1926 | read_lock(&kvm->arch.sca_lock); |
1927 | topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr; |
1928 | read_unlock(&kvm->arch.sca_lock); |
1929 | |
1930 | return put_user(topo, (u8 __user *)attr->addr); |
1931 | } |
1932 | |
1933 | static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) |
1934 | { |
1935 | int ret; |
1936 | |
1937 | switch (attr->group) { |
1938 | case KVM_S390_VM_MEM_CTRL: |
1939 | ret = kvm_s390_set_mem_control(kvm, attr); |
1940 | break; |
1941 | case KVM_S390_VM_TOD: |
1942 | ret = kvm_s390_set_tod(kvm, attr); |
1943 | break; |
1944 | case KVM_S390_VM_CPU_MODEL: |
1945 | ret = kvm_s390_set_cpu_model(kvm, attr); |
1946 | break; |
1947 | case KVM_S390_VM_CRYPTO: |
1948 | ret = kvm_s390_vm_set_crypto(kvm, attr); |
1949 | break; |
1950 | case KVM_S390_VM_MIGRATION: |
1951 | ret = kvm_s390_vm_set_migration(kvm, attr); |
1952 | break; |
1953 | case KVM_S390_VM_CPU_TOPOLOGY: |
1954 | ret = kvm_s390_set_topo_change_indication(kvm, attr); |
1955 | break; |
1956 | default: |
1957 | ret = -ENXIO; |
1958 | break; |
1959 | } |
1960 | |
1961 | return ret; |
1962 | } |
1963 | |
1964 | static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) |
1965 | { |
1966 | int ret; |
1967 | |
1968 | switch (attr->group) { |
1969 | case KVM_S390_VM_MEM_CTRL: |
1970 | ret = kvm_s390_get_mem_control(kvm, attr); |
1971 | break; |
1972 | case KVM_S390_VM_TOD: |
1973 | ret = kvm_s390_get_tod(kvm, attr); |
1974 | break; |
1975 | case KVM_S390_VM_CPU_MODEL: |
1976 | ret = kvm_s390_get_cpu_model(kvm, attr); |
1977 | break; |
1978 | case KVM_S390_VM_MIGRATION: |
1979 | ret = kvm_s390_vm_get_migration(kvm, attr); |
1980 | break; |
1981 | case KVM_S390_VM_CPU_TOPOLOGY: |
1982 | ret = kvm_s390_get_topo_change_indication(kvm, attr); |
1983 | break; |
1984 | default: |
1985 | ret = -ENXIO; |
1986 | break; |
1987 | } |
1988 | |
1989 | return ret; |
1990 | } |
1991 | |
1992 | static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) |
1993 | { |
1994 | int ret; |
1995 | |
1996 | switch (attr->group) { |
1997 | case KVM_S390_VM_MEM_CTRL: |
1998 | switch (attr->attr) { |
1999 | case KVM_S390_VM_MEM_ENABLE_CMMA: |
2000 | case KVM_S390_VM_MEM_CLR_CMMA: |
2001 | ret = sclp.has_cmma ? 0 : -ENXIO; |
2002 | break; |
2003 | case KVM_S390_VM_MEM_LIMIT_SIZE: |
2004 | ret = 0; |
2005 | break; |
2006 | default: |
2007 | ret = -ENXIO; |
2008 | break; |
2009 | } |
2010 | break; |
2011 | case KVM_S390_VM_TOD: |
2012 | switch (attr->attr) { |
2013 | case KVM_S390_VM_TOD_LOW: |
2014 | case KVM_S390_VM_TOD_HIGH: |
2015 | ret = 0; |
2016 | break; |
2017 | default: |
2018 | ret = -ENXIO; |
2019 | break; |
2020 | } |
2021 | break; |
2022 | case KVM_S390_VM_CPU_MODEL: |
2023 | switch (attr->attr) { |
2024 | case KVM_S390_VM_CPU_PROCESSOR: |
2025 | case KVM_S390_VM_CPU_MACHINE: |
2026 | case KVM_S390_VM_CPU_PROCESSOR_FEAT: |
2027 | case KVM_S390_VM_CPU_MACHINE_FEAT: |
2028 | case KVM_S390_VM_CPU_MACHINE_SUBFUNC: |
2029 | case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC: |
2030 | case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST: |
2031 | case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST: |
2032 | ret = 0; |
2033 | break; |
2034 | default: |
2035 | ret = -ENXIO; |
2036 | break; |
2037 | } |
2038 | break; |
2039 | case KVM_S390_VM_CRYPTO: |
2040 | switch (attr->attr) { |
2041 | case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: |
2042 | case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: |
2043 | case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: |
2044 | case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: |
2045 | ret = 0; |
2046 | break; |
2047 | case KVM_S390_VM_CRYPTO_ENABLE_APIE: |
2048 | case KVM_S390_VM_CRYPTO_DISABLE_APIE: |
2049 | ret = ap_instructions_available() ? 0 : -ENXIO; |
2050 | break; |
2051 | default: |
2052 | ret = -ENXIO; |
2053 | break; |
2054 | } |
2055 | break; |
2056 | case KVM_S390_VM_MIGRATION: |
2057 | ret = 0; |
2058 | break; |
2059 | case KVM_S390_VM_CPU_TOPOLOGY: |
2060 | ret = test_kvm_facility(kvm, nr: 11) ? 0 : -ENXIO; |
2061 | break; |
2062 | default: |
2063 | ret = -ENXIO; |
2064 | break; |
2065 | } |
2066 | |
2067 | return ret; |
2068 | } |
2069 | |
2070 | static int kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) |
2071 | { |
2072 | uint8_t *keys; |
2073 | uint64_t hva; |
2074 | int srcu_idx, i, r = 0; |
2075 | |
2076 | if (args->flags != 0) |
2077 | return -EINVAL; |
2078 | |
2079 | /* Is this guest using storage keys? */ |
2080 | if (!mm_uses_skeys(current->mm)) |
2081 | return KVM_S390_GET_SKEYS_NONE; |
2082 | |
2083 | /* Enforce sane limit on memory allocation */ |
2084 | if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) |
2085 | return -EINVAL; |
2086 | |
2087 | keys = kvmalloc_array(n: args->count, size: sizeof(uint8_t), GFP_KERNEL_ACCOUNT); |
2088 | if (!keys) |
2089 | return -ENOMEM; |
2090 | |
2091 | mmap_read_lock(current->mm); |
2092 | srcu_idx = srcu_read_lock(ssp: &kvm->srcu); |
2093 | for (i = 0; i < args->count; i++) { |
2094 | hva = gfn_to_hva(kvm, gfn: args->start_gfn + i); |
2095 | if (kvm_is_error_hva(addr: hva)) { |
2096 | r = -EFAULT; |
2097 | break; |
2098 | } |
2099 | |
2100 | r = get_guest_storage_key(current->mm, hva, &keys[i]); |
2101 | if (r) |
2102 | break; |
2103 | } |
2104 | srcu_read_unlock(ssp: &kvm->srcu, idx: srcu_idx); |
2105 | mmap_read_unlock(current->mm); |
2106 | |
2107 | if (!r) { |
2108 | r = copy_to_user(to: (uint8_t __user *)args->skeydata_addr, from: keys, |
2109 | n: sizeof(uint8_t) * args->count); |
2110 | if (r) |
2111 | r = -EFAULT; |
2112 | } |
2113 | |
2114 | kvfree(addr: keys); |
2115 | return r; |
2116 | } |
2117 | |
2118 | static int kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) |
2119 | { |
2120 | uint8_t *keys; |
2121 | uint64_t hva; |
2122 | int srcu_idx, i, r = 0; |
2123 | bool unlocked; |
2124 | |
2125 | if (args->flags != 0) |
2126 | return -EINVAL; |
2127 | |
2128 | /* Enforce sane limit on memory allocation */ |
2129 | if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) |
2130 | return -EINVAL; |
2131 | |
2132 | keys = kvmalloc_array(n: args->count, size: sizeof(uint8_t), GFP_KERNEL_ACCOUNT); |
2133 | if (!keys) |
2134 | return -ENOMEM; |
2135 | |
2136 | r = copy_from_user(to: keys, from: (uint8_t __user *)args->skeydata_addr, |
2137 | n: sizeof(uint8_t) * args->count); |
2138 | if (r) { |
2139 | r = -EFAULT; |
2140 | goto out; |
2141 | } |
2142 | |
2143 | /* Enable storage key handling for the guest */ |
2144 | r = s390_enable_skey(); |
2145 | if (r) |
2146 | goto out; |
2147 | |
2148 | i = 0; |
2149 | mmap_read_lock(current->mm); |
2150 | srcu_idx = srcu_read_lock(ssp: &kvm->srcu); |
2151 | while (i < args->count) { |
2152 | unlocked = false; |
2153 | hva = gfn_to_hva(kvm, gfn: args->start_gfn + i); |
2154 | if (kvm_is_error_hva(addr: hva)) { |
2155 | r = -EFAULT; |
2156 | break; |
2157 | } |
2158 | |
2159 | /* Lowest order bit is reserved */ |
2160 | if (keys[i] & 0x01) { |
2161 | r = -EINVAL; |
2162 | break; |
2163 | } |
2164 | |
2165 | r = set_guest_storage_key(current->mm, hva, keys[i], 0); |
2166 | if (r) { |
2167 | r = fixup_user_fault(current->mm, address: hva, |
2168 | fault_flags: FAULT_FLAG_WRITE, unlocked: &unlocked); |
2169 | if (r) |
2170 | break; |
2171 | } |
2172 | if (!r) |
2173 | i++; |
2174 | } |
2175 | srcu_read_unlock(ssp: &kvm->srcu, idx: srcu_idx); |
2176 | mmap_read_unlock(current->mm); |
2177 | out: |
2178 | kvfree(addr: keys); |
2179 | return r; |
2180 | } |
2181 | |
2182 | /* |
2183 | * Base address and length must be sent at the start of each block, therefore |
2184 | * it's cheaper to send some clean data, as long as it's less than the size of |
2185 | * two longs. |
2186 | */ |
2187 | #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *)) |
2188 | /* for consistency */ |
2189 | #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX) |
2190 | |
2191 | static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, |
2192 | u8 *res, unsigned long bufsize) |
2193 | { |
2194 | unsigned long pgstev, hva, cur_gfn = args->start_gfn; |
2195 | |
2196 | args->count = 0; |
2197 | while (args->count < bufsize) { |
2198 | hva = gfn_to_hva(kvm, gfn: cur_gfn); |
2199 | /* |
2200 | * We return an error if the first value was invalid, but we |
2201 | * return successfully if at least one value was copied. |
2202 | */ |
2203 | if (kvm_is_error_hva(addr: hva)) |
2204 | return args->count ? 0 : -EFAULT; |
2205 | if (get_pgste(kvm->mm, hva, &pgstev) < 0) |
2206 | pgstev = 0; |
2207 | res[args->count++] = (pgstev >> 24) & 0x43; |
2208 | cur_gfn++; |
2209 | } |
2210 | |
2211 | return 0; |
2212 | } |
2213 | |
2214 | static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots, |
2215 | gfn_t gfn) |
2216 | { |
2217 | return ____gfn_to_memslot(slots, gfn, approx: true); |
2218 | } |
2219 | |
2220 | static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots, |
2221 | unsigned long cur_gfn) |
2222 | { |
2223 | struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, gfn: cur_gfn); |
2224 | unsigned long ofs = cur_gfn - ms->base_gfn; |
2225 | struct rb_node *mnode = &ms->gfn_node[slots->node_idx]; |
2226 | |
2227 | if (ms->base_gfn + ms->npages <= cur_gfn) { |
2228 | mnode = rb_next(mnode); |
2229 | /* If we are above the highest slot, wrap around */ |
2230 | if (!mnode) |
2231 | mnode = rb_first(&slots->gfn_tree); |
2232 | |
2233 | ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); |
2234 | ofs = 0; |
2235 | } |
2236 | |
2237 | if (cur_gfn < ms->base_gfn) |
2238 | ofs = 0; |
2239 | |
2240 | ofs = find_next_bit(addr: kvm_second_dirty_bitmap(memslot: ms), size: ms->npages, offset: ofs); |
2241 | while (ofs >= ms->npages && (mnode = rb_next(mnode))) { |
2242 | ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); |
2243 | ofs = find_first_bit(addr: kvm_second_dirty_bitmap(memslot: ms), size: ms->npages); |
2244 | } |
2245 | return ms->base_gfn + ofs; |
2246 | } |
2247 | |
2248 | static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, |
2249 | u8 *res, unsigned long bufsize) |
2250 | { |
2251 | unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev; |
2252 | struct kvm_memslots *slots = kvm_memslots(kvm); |
2253 | struct kvm_memory_slot *ms; |
2254 | |
2255 | if (unlikely(kvm_memslots_empty(slots))) |
2256 | return 0; |
2257 | |
2258 | cur_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn: args->start_gfn); |
2259 | ms = gfn_to_memslot(kvm, gfn: cur_gfn); |
2260 | args->count = 0; |
2261 | args->start_gfn = cur_gfn; |
2262 | if (!ms) |
2263 | return 0; |
2264 | next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn: cur_gfn + 1); |
2265 | mem_end = kvm_s390_get_gfn_end(slots); |
2266 | |
2267 | while (args->count < bufsize) { |
2268 | hva = gfn_to_hva(kvm, gfn: cur_gfn); |
2269 | if (kvm_is_error_hva(addr: hva)) |
2270 | return 0; |
2271 | /* Decrement only if we actually flipped the bit to 0 */ |
2272 | if (test_and_clear_bit(nr: cur_gfn - ms->base_gfn, addr: kvm_second_dirty_bitmap(memslot: ms))) |
2273 | atomic64_dec(v: &kvm->arch.cmma_dirty_pages); |
2274 | if (get_pgste(kvm->mm, hva, &pgstev) < 0) |
2275 | pgstev = 0; |
2276 | /* Save the value */ |
2277 | res[args->count++] = (pgstev >> 24) & 0x43; |
2278 | /* If the next bit is too far away, stop. */ |
2279 | if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE) |
2280 | return 0; |
2281 | /* If we reached the previous "next", find the next one */ |
2282 | if (cur_gfn == next_gfn) |
2283 | next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn: cur_gfn + 1); |
2284 | /* Reached the end of memory or of the buffer, stop */ |
2285 | if ((next_gfn >= mem_end) || |
2286 | (next_gfn - args->start_gfn >= bufsize)) |
2287 | return 0; |
2288 | cur_gfn++; |
2289 | /* Reached the end of the current memslot, take the next one. */ |
2290 | if (cur_gfn - ms->base_gfn >= ms->npages) { |
2291 | ms = gfn_to_memslot(kvm, gfn: cur_gfn); |
2292 | if (!ms) |
2293 | return 0; |
2294 | } |
2295 | } |
2296 | return 0; |
2297 | } |
2298 | |
2299 | /* |
2300 | * This function searches for the next page with dirty CMMA attributes, and |
2301 | * saves the attributes in the buffer up to either the end of the buffer or |
2302 | * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found; |
2303 | * no trailing clean bytes are saved. |
2304 | * In case no dirty bits were found, or if CMMA was not enabled or used, the |
2305 | * output buffer will indicate 0 as length. |
2306 | */ |
2307 | static int kvm_s390_get_cmma_bits(struct kvm *kvm, |
2308 | struct kvm_s390_cmma_log *args) |
2309 | { |
2310 | unsigned long bufsize; |
2311 | int srcu_idx, peek, ret; |
2312 | u8 *values; |
2313 | |
2314 | if (!kvm->arch.use_cmma) |
2315 | return -ENXIO; |
2316 | /* Invalid/unsupported flags were specified */ |
2317 | if (args->flags & ~KVM_S390_CMMA_PEEK) |
2318 | return -EINVAL; |
2319 | /* Migration mode query, and we are not doing a migration */ |
2320 | peek = !!(args->flags & KVM_S390_CMMA_PEEK); |
2321 | if (!peek && !kvm->arch.migration_mode) |
2322 | return -EINVAL; |
2323 | /* CMMA is disabled or was not used, or the buffer has length zero */ |
2324 | bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX); |
2325 | if (!bufsize || !kvm->mm->context.uses_cmm) { |
2326 | memset(args, 0, sizeof(*args)); |
2327 | return 0; |
2328 | } |
2329 | /* We are not peeking, and there are no dirty pages */ |
2330 | if (!peek && !atomic64_read(v: &kvm->arch.cmma_dirty_pages)) { |
2331 | memset(args, 0, sizeof(*args)); |
2332 | return 0; |
2333 | } |
2334 | |
2335 | values = vmalloc(size: bufsize); |
2336 | if (!values) |
2337 | return -ENOMEM; |
2338 | |
2339 | mmap_read_lock(mm: kvm->mm); |
2340 | srcu_idx = srcu_read_lock(ssp: &kvm->srcu); |
2341 | if (peek) |
2342 | ret = kvm_s390_peek_cmma(kvm, args, res: values, bufsize); |
2343 | else |
2344 | ret = kvm_s390_get_cmma(kvm, args, res: values, bufsize); |
2345 | srcu_read_unlock(ssp: &kvm->srcu, idx: srcu_idx); |
2346 | mmap_read_unlock(mm: kvm->mm); |
2347 | |
2348 | if (kvm->arch.migration_mode) |
2349 | args->remaining = atomic64_read(v: &kvm->arch.cmma_dirty_pages); |
2350 | else |
2351 | args->remaining = 0; |
2352 | |
2353 | if (copy_to_user(to: (void __user *)args->values, from: values, n: args->count)) |
2354 | ret = -EFAULT; |
2355 | |
2356 | vfree(addr: values); |
2357 | return ret; |
2358 | } |
2359 | |
2360 | /* |
2361 | * This function sets the CMMA attributes for the given pages. If the input |
2362 | * buffer has zero length, no action is taken, otherwise the attributes are |
2363 | * set and the mm->context.uses_cmm flag is set. |
2364 | */ |
2365 | static int kvm_s390_set_cmma_bits(struct kvm *kvm, |
2366 | const struct kvm_s390_cmma_log *args) |
2367 | { |
2368 | unsigned long hva, mask, pgstev, i; |
2369 | uint8_t *bits; |
2370 | int srcu_idx, r = 0; |
2371 | |
2372 | mask = args->mask; |
2373 | |
2374 | if (!kvm->arch.use_cmma) |
2375 | return -ENXIO; |
2376 | /* invalid/unsupported flags */ |
2377 | if (args->flags != 0) |
2378 | return -EINVAL; |
2379 | /* Enforce sane limit on memory allocation */ |
2380 | if (args->count > KVM_S390_CMMA_SIZE_MAX) |
2381 | return -EINVAL; |
2382 | /* Nothing to do */ |
2383 | if (args->count == 0) |
2384 | return 0; |
2385 | |
2386 | bits = vmalloc(array_size(sizeof(*bits), args->count)); |
2387 | if (!bits) |
2388 | return -ENOMEM; |
2389 | |
2390 | r = copy_from_user(to: bits, from: (void __user *)args->values, n: args->count); |
2391 | if (r) { |
2392 | r = -EFAULT; |
2393 | goto out; |
2394 | } |
2395 | |
2396 | mmap_read_lock(mm: kvm->mm); |
2397 | srcu_idx = srcu_read_lock(ssp: &kvm->srcu); |
2398 | for (i = 0; i < args->count; i++) { |
2399 | hva = gfn_to_hva(kvm, gfn: args->start_gfn + i); |
2400 | if (kvm_is_error_hva(addr: hva)) { |
2401 | r = -EFAULT; |
2402 | break; |
2403 | } |
2404 | |
2405 | pgstev = bits[i]; |
2406 | pgstev = pgstev << 24; |
2407 | mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT; |
2408 | set_pgste_bits(kvm->mm, hva, mask, pgstev); |
2409 | } |
2410 | srcu_read_unlock(ssp: &kvm->srcu, idx: srcu_idx); |
2411 | mmap_read_unlock(mm: kvm->mm); |
2412 | |
2413 | if (!kvm->mm->context.uses_cmm) { |
2414 | mmap_write_lock(mm: kvm->mm); |
2415 | kvm->mm->context.uses_cmm = 1; |
2416 | mmap_write_unlock(mm: kvm->mm); |
2417 | } |
2418 | out: |
2419 | vfree(addr: bits); |
2420 | return r; |
2421 | } |
2422 | |
2423 | /** |
2424 | * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to |
2425 | * non protected. |
2426 | * @kvm: the VM whose protected vCPUs are to be converted |
2427 | * @rc: return value for the RC field of the UVC (in case of error) |
2428 | * @rrc: return value for the RRC field of the UVC (in case of error) |
2429 | * |
2430 | * Does not stop in case of error, tries to convert as many |
2431 | * CPUs as possible. In case of error, the RC and RRC of the last error are |
2432 | * returned. |
2433 | * |
2434 | * Return: 0 in case of success, otherwise -EIO |
2435 | */ |
2436 | int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc) |
2437 | { |
2438 | struct kvm_vcpu *vcpu; |
2439 | unsigned long i; |
2440 | u16 _rc, _rrc; |
2441 | int ret = 0; |
2442 | |
2443 | /* |
2444 | * We ignore failures and try to destroy as many CPUs as possible. |
2445 | * At the same time we must not free the assigned resources when |
2446 | * this fails, as the ultravisor has still access to that memory. |
2447 | * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak |
2448 | * behind. |
2449 | * We want to return the first failure rc and rrc, though. |
2450 | */ |
2451 | kvm_for_each_vcpu(i, vcpu, kvm) { |
2452 | mutex_lock(&vcpu->mutex); |
2453 | if (kvm_s390_pv_destroy_cpu(vcpu, rc: &_rc, rrc: &_rrc) && !ret) { |
2454 | *rc = _rc; |
2455 | *rrc = _rrc; |
2456 | ret = -EIO; |
2457 | } |
2458 | mutex_unlock(lock: &vcpu->mutex); |
2459 | } |
2460 | /* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */ |
2461 | if (use_gisa) |
2462 | kvm_s390_gisa_enable(kvm); |
2463 | return ret; |
2464 | } |
2465 | |
2466 | /** |
2467 | * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM |
2468 | * to protected. |
2469 | * @kvm: the VM whose protected vCPUs are to be converted |
2470 | * @rc: return value for the RC field of the UVC (in case of error) |
2471 | * @rrc: return value for the RRC field of the UVC (in case of error) |
2472 | * |
2473 | * Tries to undo the conversion in case of error. |
2474 | * |
2475 | * Return: 0 in case of success, otherwise -EIO |
2476 | */ |
2477 | static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc) |
2478 | { |
2479 | unsigned long i; |
2480 | int r = 0; |
2481 | u16 dummy; |
2482 | |
2483 | struct kvm_vcpu *vcpu; |
2484 | |
2485 | /* Disable the GISA if the ultravisor does not support AIV. */ |
2486 | if (!uv_has_feature(BIT_UV_FEAT_AIV)) |
2487 | kvm_s390_gisa_disable(kvm); |
2488 | |
2489 | kvm_for_each_vcpu(i, vcpu, kvm) { |
2490 | mutex_lock(&vcpu->mutex); |
2491 | r = kvm_s390_pv_create_cpu(vcpu, rc, rrc); |
2492 | mutex_unlock(lock: &vcpu->mutex); |
2493 | if (r) |
2494 | break; |
2495 | } |
2496 | if (r) |
2497 | kvm_s390_cpus_from_pv(kvm, rc: &dummy, rrc: &dummy); |
2498 | return r; |
2499 | } |
2500 | |
2501 | /* |
2502 | * Here we provide user space with a direct interface to query UV |
2503 | * related data like UV maxima and available features as well as |
2504 | * feature specific data. |
2505 | * |
2506 | * To facilitate future extension of the data structures we'll try to |
2507 | * write data up to the maximum requested length. |
2508 | */ |
2509 | static ssize_t kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info) |
2510 | { |
2511 | ssize_t len_min; |
2512 | |
2513 | switch (info->header.id) { |
2514 | case KVM_PV_INFO_VM: { |
2515 | len_min = sizeof(info->header) + sizeof(info->vm); |
2516 | |
2517 | if (info->header.len_max < len_min) |
2518 | return -EINVAL; |
2519 | |
2520 | memcpy(info->vm.inst_calls_list, |
2521 | uv_info.inst_calls_list, |
2522 | sizeof(uv_info.inst_calls_list)); |
2523 | |
2524 | /* It's max cpuid not max cpus, so it's off by one */ |
2525 | info->vm.max_cpus = uv_info.max_guest_cpu_id + 1; |
2526 | info->vm.max_guests = uv_info.max_num_sec_conf; |
2527 | info->vm.max_guest_addr = uv_info.max_sec_stor_addr; |
2528 | info->vm.feature_indication = uv_info.uv_feature_indications; |
2529 | |
2530 | return len_min; |
2531 | } |
2532 | case KVM_PV_INFO_DUMP: { |
2533 | len_min = sizeof(info->header) + sizeof(info->dump); |
2534 | |
2535 | if (info->header.len_max < len_min) |
2536 | return -EINVAL; |
2537 | |
2538 | info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len; |
2539 | info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len; |
2540 | info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len; |
2541 | return len_min; |
2542 | } |
2543 | default: |
2544 | return -EINVAL; |
2545 | } |
2546 | } |
2547 | |
2548 | static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd, |
2549 | struct kvm_s390_pv_dmp dmp) |
2550 | { |
2551 | int r = -EINVAL; |
2552 | void __user *result_buff = (void __user *)dmp.buff_addr; |
2553 | |
2554 | switch (dmp.subcmd) { |
2555 | case KVM_PV_DUMP_INIT: { |
2556 | if (kvm->arch.pv.dumping) |
2557 | break; |
2558 | |
2559 | /* |
2560 | * Block SIE entry as concurrent dump UVCs could lead |
2561 | * to validities. |
2562 | */ |
2563 | kvm_s390_vcpu_block_all(kvm); |
2564 | |
2565 | r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), |
2566 | UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc); |
2567 | KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x" , |
2568 | cmd->rc, cmd->rrc); |
2569 | if (!r) { |
2570 | kvm->arch.pv.dumping = true; |
2571 | } else { |
2572 | kvm_s390_vcpu_unblock_all(kvm); |
2573 | r = -EINVAL; |
2574 | } |
2575 | break; |
2576 | } |
2577 | case KVM_PV_DUMP_CONFIG_STOR_STATE: { |
2578 | if (!kvm->arch.pv.dumping) |
2579 | break; |
2580 | |
2581 | /* |
2582 | * gaddr is an output parameter since we might stop |
2583 | * early. As dmp will be copied back in our caller, we |
2584 | * don't need to do it ourselves. |
2585 | */ |
2586 | r = kvm_s390_pv_dump_stor_state(kvm, buff_user: result_buff, gaddr: &dmp.gaddr, buff_user_len: dmp.buff_len, |
2587 | rc: &cmd->rc, rrc: &cmd->rrc); |
2588 | break; |
2589 | } |
2590 | case KVM_PV_DUMP_COMPLETE: { |
2591 | if (!kvm->arch.pv.dumping) |
2592 | break; |
2593 | |
2594 | r = -EINVAL; |
2595 | if (dmp.buff_len < uv_info.conf_dump_finalize_len) |
2596 | break; |
2597 | |
2598 | r = kvm_s390_pv_dump_complete(kvm, buff_user: result_buff, |
2599 | rc: &cmd->rc, rrc: &cmd->rrc); |
2600 | break; |
2601 | } |
2602 | default: |
2603 | r = -ENOTTY; |
2604 | break; |
2605 | } |
2606 | |
2607 | return r; |
2608 | } |
2609 | |
2610 | static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd) |
2611 | { |
2612 | const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM); |
2613 | void __user *argp = (void __user *)cmd->data; |
2614 | int r = 0; |
2615 | u16 dummy; |
2616 | |
2617 | if (need_lock) |
2618 | mutex_lock(&kvm->lock); |
2619 | |
2620 | switch (cmd->cmd) { |
2621 | case KVM_PV_ENABLE: { |
2622 | r = -EINVAL; |
2623 | if (kvm_s390_pv_is_protected(kvm)) |
2624 | break; |
2625 | |
2626 | /* |
2627 | * FMT 4 SIE needs esca. As we never switch back to bsca from |
2628 | * esca, we need no cleanup in the error cases below |
2629 | */ |
2630 | r = sca_switch_to_extended(kvm); |
2631 | if (r) |
2632 | break; |
2633 | |
2634 | mmap_write_lock(current->mm); |
2635 | r = gmap_mark_unmergeable(); |
2636 | mmap_write_unlock(current->mm); |
2637 | if (r) |
2638 | break; |
2639 | |
2640 | r = kvm_s390_pv_init_vm(kvm, rc: &cmd->rc, rrc: &cmd->rrc); |
2641 | if (r) |
2642 | break; |
2643 | |
2644 | r = kvm_s390_cpus_to_pv(kvm, rc: &cmd->rc, rrc: &cmd->rrc); |
2645 | if (r) |
2646 | kvm_s390_pv_deinit_vm(kvm, rc: &dummy, rrc: &dummy); |
2647 | |
2648 | /* we need to block service interrupts from now on */ |
2649 | set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); |
2650 | break; |
2651 | } |
2652 | case KVM_PV_ASYNC_CLEANUP_PREPARE: |
2653 | r = -EINVAL; |
2654 | if (!kvm_s390_pv_is_protected(kvm) || !async_destroy) |
2655 | break; |
2656 | |
2657 | r = kvm_s390_cpus_from_pv(kvm, rc: &cmd->rc, rrc: &cmd->rrc); |
2658 | /* |
2659 | * If a CPU could not be destroyed, destroy VM will also fail. |
2660 | * There is no point in trying to destroy it. Instead return |
2661 | * the rc and rrc from the first CPU that failed destroying. |
2662 | */ |
2663 | if (r) |
2664 | break; |
2665 | r = kvm_s390_pv_set_aside(kvm, rc: &cmd->rc, rrc: &cmd->rrc); |
2666 | |
2667 | /* no need to block service interrupts any more */ |
2668 | clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); |
2669 | break; |
2670 | case KVM_PV_ASYNC_CLEANUP_PERFORM: |
2671 | r = -EINVAL; |
2672 | if (!async_destroy) |
2673 | break; |
2674 | /* kvm->lock must not be held; this is asserted inside the function. */ |
2675 | r = kvm_s390_pv_deinit_aside_vm(kvm, rc: &cmd->rc, rrc: &cmd->rrc); |
2676 | break; |
2677 | case KVM_PV_DISABLE: { |
2678 | r = -EINVAL; |
2679 | if (!kvm_s390_pv_is_protected(kvm)) |
2680 | break; |
2681 | |
2682 | r = kvm_s390_cpus_from_pv(kvm, rc: &cmd->rc, rrc: &cmd->rrc); |
2683 | /* |
2684 | * If a CPU could not be destroyed, destroy VM will also fail. |
2685 | * There is no point in trying to destroy it. Instead return |
2686 | * the rc and rrc from the first CPU that failed destroying. |
2687 | */ |
2688 | if (r) |
2689 | break; |
2690 | r = kvm_s390_pv_deinit_cleanup_all(kvm, rc: &cmd->rc, rrc: &cmd->rrc); |
2691 | |
2692 | /* no need to block service interrupts any more */ |
2693 | clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); |
2694 | break; |
2695 | } |
2696 | case KVM_PV_SET_SEC_PARMS: { |
2697 | struct kvm_s390_pv_sec_parm parms = {}; |
2698 | void *hdr; |
2699 | |
2700 | r = -EINVAL; |
2701 | if (!kvm_s390_pv_is_protected(kvm)) |
2702 | break; |
2703 | |
2704 | r = -EFAULT; |
2705 | if (copy_from_user(to: &parms, from: argp, n: sizeof(parms))) |
2706 | break; |
2707 | |
2708 | /* Currently restricted to 8KB */ |
2709 | r = -EINVAL; |
2710 | if (parms.length > PAGE_SIZE * 2) |
2711 | break; |
2712 | |
2713 | r = -ENOMEM; |
2714 | hdr = vmalloc(size: parms.length); |
2715 | if (!hdr) |
2716 | break; |
2717 | |
2718 | r = -EFAULT; |
2719 | if (!copy_from_user(to: hdr, from: (void __user *)parms.origin, |
2720 | n: parms.length)) |
2721 | r = kvm_s390_pv_set_sec_parms(kvm, hdr, length: parms.length, |
2722 | rc: &cmd->rc, rrc: &cmd->rrc); |
2723 | |
2724 | vfree(addr: hdr); |
2725 | break; |
2726 | } |
2727 | case KVM_PV_UNPACK: { |
2728 | struct kvm_s390_pv_unp unp = {}; |
2729 | |
2730 | r = -EINVAL; |
2731 | if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm)) |
2732 | break; |
2733 | |
2734 | r = -EFAULT; |
2735 | if (copy_from_user(to: &unp, from: argp, n: sizeof(unp))) |
2736 | break; |
2737 | |
2738 | r = kvm_s390_pv_unpack(kvm, addr: unp.addr, size: unp.size, tweak: unp.tweak, |
2739 | rc: &cmd->rc, rrc: &cmd->rrc); |
2740 | break; |
2741 | } |
2742 | case KVM_PV_VERIFY: { |
2743 | r = -EINVAL; |
2744 | if (!kvm_s390_pv_is_protected(kvm)) |
2745 | break; |
2746 | |
2747 | r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), |
2748 | UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc); |
2749 | KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x" , cmd->rc, |
2750 | cmd->rrc); |
2751 | break; |
2752 | } |
2753 | case KVM_PV_PREP_RESET: { |
2754 | r = -EINVAL; |
2755 | if (!kvm_s390_pv_is_protected(kvm)) |
2756 | break; |
2757 | |
2758 | r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), |
2759 | UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc); |
2760 | KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x" , |
2761 | cmd->rc, cmd->rrc); |
2762 | break; |
2763 | } |
2764 | case KVM_PV_UNSHARE_ALL: { |
2765 | r = -EINVAL; |
2766 | if (!kvm_s390_pv_is_protected(kvm)) |
2767 | break; |
2768 | |
2769 | r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), |
2770 | UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc); |
2771 | KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x" , |
2772 | cmd->rc, cmd->rrc); |
2773 | break; |
2774 | } |
2775 | case KVM_PV_INFO: { |
2776 | struct kvm_s390_pv_info info = {}; |
2777 | ssize_t data_len; |
2778 | |
2779 | /* |
2780 | * No need to check the VM protection here. |
2781 | * |
2782 | * Maybe user space wants to query some of the data |
2783 | * when the VM is still unprotected. If we see the |
2784 | * need to fence a new data command we can still |
2785 | * return an error in the info handler. |
2786 | */ |
2787 | |
2788 | r = -EFAULT; |
2789 | if (copy_from_user(to: &info, from: argp, n: sizeof(info.header))) |
2790 | break; |
2791 | |
2792 | r = -EINVAL; |
2793 | if (info.header.len_max < sizeof(info.header)) |
2794 | break; |
2795 | |
2796 | data_len = kvm_s390_handle_pv_info(info: &info); |
2797 | if (data_len < 0) { |
2798 | r = data_len; |
2799 | break; |
2800 | } |
2801 | /* |
2802 | * If a data command struct is extended (multiple |
2803 | * times) this can be used to determine how much of it |
2804 | * is valid. |
2805 | */ |
2806 | info.header.len_written = data_len; |
2807 | |
2808 | r = -EFAULT; |
2809 | if (copy_to_user(to: argp, from: &info, n: data_len)) |
2810 | break; |
2811 | |
2812 | r = 0; |
2813 | break; |
2814 | } |
2815 | case KVM_PV_DUMP: { |
2816 | struct kvm_s390_pv_dmp dmp; |
2817 | |
2818 | r = -EINVAL; |
2819 | if (!kvm_s390_pv_is_protected(kvm)) |
2820 | break; |
2821 | |
2822 | r = -EFAULT; |
2823 | if (copy_from_user(to: &dmp, from: argp, n: sizeof(dmp))) |
2824 | break; |
2825 | |
2826 | r = kvm_s390_pv_dmp(kvm, cmd, dmp: dmp); |
2827 | if (r) |
2828 | break; |
2829 | |
2830 | if (copy_to_user(to: argp, from: &dmp, n: sizeof(dmp))) { |
2831 | r = -EFAULT; |
2832 | break; |
2833 | } |
2834 | |
2835 | break; |
2836 | } |
2837 | default: |
2838 | r = -ENOTTY; |
2839 | } |
2840 | if (need_lock) |
2841 | mutex_unlock(lock: &kvm->lock); |
2842 | |
2843 | return r; |
2844 | } |
2845 | |
2846 | static int mem_op_validate_common(struct kvm_s390_mem_op *mop, u64 supported_flags) |
2847 | { |
2848 | if (mop->flags & ~supported_flags || !mop->size) |
2849 | return -EINVAL; |
2850 | if (mop->size > MEM_OP_MAX_SIZE) |
2851 | return -E2BIG; |
2852 | if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) { |
2853 | if (mop->key > 0xf) |
2854 | return -EINVAL; |
2855 | } else { |
2856 | mop->key = 0; |
2857 | } |
2858 | return 0; |
2859 | } |
2860 | |
2861 | static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop) |
2862 | { |
2863 | void __user *uaddr = (void __user *)mop->buf; |
2864 | enum gacc_mode acc_mode; |
2865 | void *tmpbuf = NULL; |
2866 | int r, srcu_idx; |
2867 | |
2868 | r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION | |
2869 | KVM_S390_MEMOP_F_CHECK_ONLY); |
2870 | if (r) |
2871 | return r; |
2872 | |
2873 | if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { |
2874 | tmpbuf = vmalloc(size: mop->size); |
2875 | if (!tmpbuf) |
2876 | return -ENOMEM; |
2877 | } |
2878 | |
2879 | srcu_idx = srcu_read_lock(ssp: &kvm->srcu); |
2880 | |
2881 | if (!kvm_is_gpa_in_memslot(kvm, gpa: mop->gaddr)) { |
2882 | r = PGM_ADDRESSING; |
2883 | goto out_unlock; |
2884 | } |
2885 | |
2886 | acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE; |
2887 | if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { |
2888 | r = check_gpa_range(kvm, gpa: mop->gaddr, length: mop->size, mode: acc_mode, access_key: mop->key); |
2889 | goto out_unlock; |
2890 | } |
2891 | if (acc_mode == GACC_FETCH) { |
2892 | r = access_guest_abs_with_key(kvm, gpa: mop->gaddr, data: tmpbuf, |
2893 | len: mop->size, mode: GACC_FETCH, access_key: mop->key); |
2894 | if (r) |
2895 | goto out_unlock; |
2896 | if (copy_to_user(to: uaddr, from: tmpbuf, n: mop->size)) |
2897 | r = -EFAULT; |
2898 | } else { |
2899 | if (copy_from_user(to: tmpbuf, from: uaddr, n: mop->size)) { |
2900 | r = -EFAULT; |
2901 | goto out_unlock; |
2902 | } |
2903 | r = access_guest_abs_with_key(kvm, gpa: mop->gaddr, data: tmpbuf, |
2904 | len: mop->size, mode: GACC_STORE, access_key: mop->key); |
2905 | } |
2906 | |
2907 | out_unlock: |
2908 | srcu_read_unlock(ssp: &kvm->srcu, idx: srcu_idx); |
2909 | |
2910 | vfree(addr: tmpbuf); |
2911 | return r; |
2912 | } |
2913 | |
2914 | static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *mop) |
2915 | { |
2916 | void __user *uaddr = (void __user *)mop->buf; |
2917 | void __user *old_addr = (void __user *)mop->old_addr; |
2918 | union { |
2919 | __uint128_t quad; |
2920 | char raw[sizeof(__uint128_t)]; |
2921 | } old = { .quad = 0}, new = { .quad = 0 }; |
2922 | unsigned int off_in_quad = sizeof(new) - mop->size; |
2923 | int r, srcu_idx; |
2924 | bool success; |
2925 | |
2926 | r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION); |
2927 | if (r) |
2928 | return r; |
2929 | /* |
2930 | * This validates off_in_quad. Checking that size is a power |
2931 | * of two is not necessary, as cmpxchg_guest_abs_with_key |
2932 | * takes care of that |
2933 | */ |
2934 | if (mop->size > sizeof(new)) |
2935 | return -EINVAL; |
2936 | if (copy_from_user(to: &new.raw[off_in_quad], from: uaddr, n: mop->size)) |
2937 | return -EFAULT; |
2938 | if (copy_from_user(to: &old.raw[off_in_quad], from: old_addr, n: mop->size)) |
2939 | return -EFAULT; |
2940 | |
2941 | srcu_idx = srcu_read_lock(ssp: &kvm->srcu); |
2942 | |
2943 | if (!kvm_is_gpa_in_memslot(kvm, gpa: mop->gaddr)) { |
2944 | r = PGM_ADDRESSING; |
2945 | goto out_unlock; |
2946 | } |
2947 | |
2948 | r = cmpxchg_guest_abs_with_key(kvm, gpa: mop->gaddr, len: mop->size, old: &old.quad, |
2949 | new: new.quad, access_key: mop->key, success: &success); |
2950 | if (!success && copy_to_user(to: old_addr, from: &old.raw[off_in_quad], n: mop->size)) |
2951 | r = -EFAULT; |
2952 | |
2953 | out_unlock: |
2954 | srcu_read_unlock(ssp: &kvm->srcu, idx: srcu_idx); |
2955 | return r; |
2956 | } |
2957 | |
2958 | static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop) |
2959 | { |
2960 | /* |
2961 | * This is technically a heuristic only, if the kvm->lock is not |
2962 | * taken, it is not guaranteed that the vm is/remains non-protected. |
2963 | * This is ok from a kernel perspective, wrongdoing is detected |
2964 | * on the access, -EFAULT is returned and the vm may crash the |
2965 | * next time it accesses the memory in question. |
2966 | * There is no sane usecase to do switching and a memop on two |
2967 | * different CPUs at the same time. |
2968 | */ |
2969 | if (kvm_s390_pv_get_handle(kvm)) |
2970 | return -EINVAL; |
2971 | |
2972 | switch (mop->op) { |
2973 | case KVM_S390_MEMOP_ABSOLUTE_READ: |
2974 | case KVM_S390_MEMOP_ABSOLUTE_WRITE: |
2975 | return kvm_s390_vm_mem_op_abs(kvm, mop); |
2976 | case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG: |
2977 | return kvm_s390_vm_mem_op_cmpxchg(kvm, mop); |
2978 | default: |
2979 | return -EINVAL; |
2980 | } |
2981 | } |
2982 | |
2983 | int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) |
2984 | { |
2985 | struct kvm *kvm = filp->private_data; |
2986 | void __user *argp = (void __user *)arg; |
2987 | struct kvm_device_attr attr; |
2988 | int r; |
2989 | |
2990 | switch (ioctl) { |
2991 | case KVM_S390_INTERRUPT: { |
2992 | struct kvm_s390_interrupt s390int; |
2993 | |
2994 | r = -EFAULT; |
2995 | if (copy_from_user(to: &s390int, from: argp, n: sizeof(s390int))) |
2996 | break; |
2997 | r = kvm_s390_inject_vm(kvm, s390int: &s390int); |
2998 | break; |
2999 | } |
3000 | case KVM_CREATE_IRQCHIP: { |
3001 | struct kvm_irq_routing_entry routing; |
3002 | |
3003 | r = -EINVAL; |
3004 | if (kvm->arch.use_irqchip) { |
3005 | /* Set up dummy routing. */ |
3006 | memset(&routing, 0, sizeof(routing)); |
3007 | r = kvm_set_irq_routing(kvm, entries: &routing, nr: 0, flags: 0); |
3008 | } |
3009 | break; |
3010 | } |
3011 | case KVM_SET_DEVICE_ATTR: { |
3012 | r = -EFAULT; |
3013 | if (copy_from_user(to: &attr, from: (void __user *)arg, n: sizeof(attr))) |
3014 | break; |
3015 | r = kvm_s390_vm_set_attr(kvm, attr: &attr); |
3016 | break; |
3017 | } |
3018 | case KVM_GET_DEVICE_ATTR: { |
3019 | r = -EFAULT; |
3020 | if (copy_from_user(to: &attr, from: (void __user *)arg, n: sizeof(attr))) |
3021 | break; |
3022 | r = kvm_s390_vm_get_attr(kvm, attr: &attr); |
3023 | break; |
3024 | } |
3025 | case KVM_HAS_DEVICE_ATTR: { |
3026 | r = -EFAULT; |
3027 | if (copy_from_user(to: &attr, from: (void __user *)arg, n: sizeof(attr))) |
3028 | break; |
3029 | r = kvm_s390_vm_has_attr(kvm, attr: &attr); |
3030 | break; |
3031 | } |
3032 | case KVM_S390_GET_SKEYS: { |
3033 | struct kvm_s390_skeys args; |
3034 | |
3035 | r = -EFAULT; |
3036 | if (copy_from_user(&args, argp, |
3037 | sizeof(struct kvm_s390_skeys))) |
3038 | break; |
3039 | r = kvm_s390_get_skeys(kvm, args: &args); |
3040 | break; |
3041 | } |
3042 | case KVM_S390_SET_SKEYS: { |
3043 | struct kvm_s390_skeys args; |
3044 | |
3045 | r = -EFAULT; |
3046 | if (copy_from_user(&args, argp, |
3047 | sizeof(struct kvm_s390_skeys))) |
3048 | break; |
3049 | r = kvm_s390_set_skeys(kvm, args: &args); |
3050 | break; |
3051 | } |
3052 | case KVM_S390_GET_CMMA_BITS: { |
3053 | struct kvm_s390_cmma_log args; |
3054 | |
3055 | r = -EFAULT; |
3056 | if (copy_from_user(to: &args, from: argp, n: sizeof(args))) |
3057 | break; |
3058 | mutex_lock(&kvm->slots_lock); |
3059 | r = kvm_s390_get_cmma_bits(kvm, args: &args); |
3060 | mutex_unlock(lock: &kvm->slots_lock); |
3061 | if (!r) { |
3062 | r = copy_to_user(to: argp, from: &args, n: sizeof(args)); |
3063 | if (r) |
3064 | r = -EFAULT; |
3065 | } |
3066 | break; |
3067 | } |
3068 | case KVM_S390_SET_CMMA_BITS: { |
3069 | struct kvm_s390_cmma_log args; |
3070 | |
3071 | r = -EFAULT; |
3072 | if (copy_from_user(to: &args, from: argp, n: sizeof(args))) |
3073 | break; |
3074 | mutex_lock(&kvm->slots_lock); |
3075 | r = kvm_s390_set_cmma_bits(kvm, args: &args); |
3076 | mutex_unlock(lock: &kvm->slots_lock); |
3077 | break; |
3078 | } |
3079 | case KVM_S390_PV_COMMAND: { |
3080 | struct kvm_pv_cmd args; |
3081 | |
3082 | /* protvirt means user cpu state */ |
3083 | kvm_s390_set_user_cpu_state_ctrl(kvm); |
3084 | r = 0; |
3085 | if (!is_prot_virt_host()) { |
3086 | r = -EINVAL; |
3087 | break; |
3088 | } |
3089 | if (copy_from_user(to: &args, from: argp, n: sizeof(args))) { |
3090 | r = -EFAULT; |
3091 | break; |
3092 | } |
3093 | if (args.flags) { |
3094 | r = -EINVAL; |
3095 | break; |
3096 | } |
3097 | /* must be called without kvm->lock */ |
3098 | r = kvm_s390_handle_pv(kvm, cmd: &args); |
3099 | if (copy_to_user(to: argp, from: &args, n: sizeof(args))) { |
3100 | r = -EFAULT; |
3101 | break; |
3102 | } |
3103 | break; |
3104 | } |
3105 | case KVM_S390_MEM_OP: { |
3106 | struct kvm_s390_mem_op mem_op; |
3107 | |
3108 | if (copy_from_user(to: &mem_op, from: argp, n: sizeof(mem_op)) == 0) |
3109 | r = kvm_s390_vm_mem_op(kvm, mop: &mem_op); |
3110 | else |
3111 | r = -EFAULT; |
3112 | break; |
3113 | } |
3114 | case KVM_S390_ZPCI_OP: { |
3115 | struct kvm_s390_zpci_op args; |
3116 | |
3117 | r = -EINVAL; |
3118 | if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) |
3119 | break; |
3120 | if (copy_from_user(to: &args, from: argp, n: sizeof(args))) { |
3121 | r = -EFAULT; |
3122 | break; |
3123 | } |
3124 | r = kvm_s390_pci_zpci_op(kvm, args: &args); |
3125 | break; |
3126 | } |
3127 | default: |
3128 | r = -ENOTTY; |
3129 | } |
3130 | |
3131 | return r; |
3132 | } |
3133 | |
3134 | static int kvm_s390_apxa_installed(void) |
3135 | { |
3136 | struct ap_config_info info; |
3137 | |
3138 | if (ap_instructions_available()) { |
3139 | if (ap_qci(&info) == 0) |
3140 | return info.apxa; |
3141 | } |
3142 | |
3143 | return 0; |
3144 | } |
3145 | |
3146 | /* |
3147 | * The format of the crypto control block (CRYCB) is specified in the 3 low |
3148 | * order bits of the CRYCB designation (CRYCBD) field as follows: |
3149 | * Format 0: Neither the message security assist extension 3 (MSAX3) nor the |
3150 | * AP extended addressing (APXA) facility are installed. |
3151 | * Format 1: The APXA facility is not installed but the MSAX3 facility is. |
3152 | * Format 2: Both the APXA and MSAX3 facilities are installed |
3153 | */ |
3154 | static void kvm_s390_set_crycb_format(struct kvm *kvm) |
3155 | { |
3156 | kvm->arch.crypto.crycbd = virt_to_phys(address: kvm->arch.crypto.crycb); |
3157 | |
3158 | /* Clear the CRYCB format bits - i.e., set format 0 by default */ |
3159 | kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); |
3160 | |
3161 | /* Check whether MSAX3 is installed */ |
3162 | if (!test_kvm_facility(kvm, nr: 76)) |
3163 | return; |
3164 | |
3165 | if (kvm_s390_apxa_installed()) |
3166 | kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; |
3167 | else |
3168 | kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; |
3169 | } |
3170 | |
3171 | /* |
3172 | * kvm_arch_crypto_set_masks |
3173 | * |
3174 | * @kvm: pointer to the target guest's KVM struct containing the crypto masks |
3175 | * to be set. |
3176 | * @apm: the mask identifying the accessible AP adapters |
3177 | * @aqm: the mask identifying the accessible AP domains |
3178 | * @adm: the mask identifying the accessible AP control domains |
3179 | * |
3180 | * Set the masks that identify the adapters, domains and control domains to |
3181 | * which the KVM guest is granted access. |
3182 | * |
3183 | * Note: The kvm->lock mutex must be locked by the caller before invoking this |
3184 | * function. |
3185 | */ |
3186 | void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, |
3187 | unsigned long *aqm, unsigned long *adm) |
3188 | { |
3189 | struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; |
3190 | |
3191 | kvm_s390_vcpu_block_all(kvm); |
3192 | |
3193 | switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { |
3194 | case CRYCB_FORMAT2: /* APCB1 use 256 bits */ |
3195 | memcpy(crycb->apcb1.apm, apm, 32); |
3196 | VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx" , |
3197 | apm[0], apm[1], apm[2], apm[3]); |
3198 | memcpy(crycb->apcb1.aqm, aqm, 32); |
3199 | VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx" , |
3200 | aqm[0], aqm[1], aqm[2], aqm[3]); |
3201 | memcpy(crycb->apcb1.adm, adm, 32); |
3202 | VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx" , |
3203 | adm[0], adm[1], adm[2], adm[3]); |
3204 | break; |
3205 | case CRYCB_FORMAT1: |
3206 | case CRYCB_FORMAT0: /* Fall through both use APCB0 */ |
3207 | memcpy(crycb->apcb0.apm, apm, 8); |
3208 | memcpy(crycb->apcb0.aqm, aqm, 2); |
3209 | memcpy(crycb->apcb0.adm, adm, 2); |
3210 | VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x" , |
3211 | apm[0], *((unsigned short *)aqm), |
3212 | *((unsigned short *)adm)); |
3213 | break; |
3214 | default: /* Can not happen */ |
3215 | break; |
3216 | } |
3217 | |
3218 | /* recreate the shadow crycb for each vcpu */ |
3219 | kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); |
3220 | kvm_s390_vcpu_unblock_all(kvm); |
3221 | } |
3222 | EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks); |
3223 | |
3224 | /* |
3225 | * kvm_arch_crypto_clear_masks |
3226 | * |
3227 | * @kvm: pointer to the target guest's KVM struct containing the crypto masks |
3228 | * to be cleared. |
3229 | * |
3230 | * Clear the masks that identify the adapters, domains and control domains to |
3231 | * which the KVM guest is granted access. |
3232 | * |
3233 | * Note: The kvm->lock mutex must be locked by the caller before invoking this |
3234 | * function. |
3235 | */ |
3236 | void kvm_arch_crypto_clear_masks(struct kvm *kvm) |
3237 | { |
3238 | kvm_s390_vcpu_block_all(kvm); |
3239 | |
3240 | memset(&kvm->arch.crypto.crycb->apcb0, 0, |
3241 | sizeof(kvm->arch.crypto.crycb->apcb0)); |
3242 | memset(&kvm->arch.crypto.crycb->apcb1, 0, |
3243 | sizeof(kvm->arch.crypto.crycb->apcb1)); |
3244 | |
3245 | VM_EVENT(kvm, 3, "%s" , "CLR CRYCB:" ); |
3246 | /* recreate the shadow crycb for each vcpu */ |
3247 | kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); |
3248 | kvm_s390_vcpu_unblock_all(kvm); |
3249 | } |
3250 | EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks); |
3251 | |
3252 | static u64 kvm_s390_get_initial_cpuid(void) |
3253 | { |
3254 | struct cpuid cpuid; |
3255 | |
3256 | get_cpu_id(&cpuid); |
3257 | cpuid.version = 0xff; |
3258 | return *((u64 *) &cpuid); |
3259 | } |
3260 | |
3261 | static void kvm_s390_crypto_init(struct kvm *kvm) |
3262 | { |
3263 | kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; |
3264 | kvm_s390_set_crycb_format(kvm); |
3265 | init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem); |
3266 | |
3267 | if (!test_kvm_facility(kvm, nr: 76)) |
3268 | return; |
3269 | |
3270 | /* Enable AES/DEA protected key functions by default */ |
3271 | kvm->arch.crypto.aes_kw = 1; |
3272 | kvm->arch.crypto.dea_kw = 1; |
3273 | get_random_bytes(buf: kvm->arch.crypto.crycb->aes_wrapping_key_mask, |
3274 | len: sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); |
3275 | get_random_bytes(buf: kvm->arch.crypto.crycb->dea_wrapping_key_mask, |
3276 | len: sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); |
3277 | } |
3278 | |
3279 | static void sca_dispose(struct kvm *kvm) |
3280 | { |
3281 | if (kvm->arch.use_esca) |
3282 | free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); |
3283 | else |
3284 | free_page((unsigned long)(kvm->arch.sca)); |
3285 | kvm->arch.sca = NULL; |
3286 | } |
3287 | |
3288 | void kvm_arch_free_vm(struct kvm *kvm) |
3289 | { |
3290 | if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) |
3291 | kvm_s390_pci_clear_list(kvm); |
3292 | |
3293 | __kvm_arch_free_vm(kvm); |
3294 | } |
3295 | |
3296 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
3297 | { |
3298 | gfp_t alloc_flags = GFP_KERNEL_ACCOUNT; |
3299 | int i, rc; |
3300 | char debug_name[16]; |
3301 | static unsigned long sca_offset; |
3302 | |
3303 | rc = -EINVAL; |
3304 | #ifdef CONFIG_KVM_S390_UCONTROL |
3305 | if (type & ~KVM_VM_S390_UCONTROL) |
3306 | goto out_err; |
3307 | if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN))) |
3308 | goto out_err; |
3309 | #else |
3310 | if (type) |
3311 | goto out_err; |
3312 | #endif |
3313 | |
3314 | rc = s390_enable_sie(); |
3315 | if (rc) |
3316 | goto out_err; |
3317 | |
3318 | rc = -ENOMEM; |
3319 | |
3320 | if (!sclp.has_64bscao) |
3321 | alloc_flags |= GFP_DMA; |
3322 | rwlock_init(&kvm->arch.sca_lock); |
3323 | /* start with basic SCA */ |
3324 | kvm->arch.sca = (struct bsca_block *) get_zeroed_page(gfp_mask: alloc_flags); |
3325 | if (!kvm->arch.sca) |
3326 | goto out_err; |
3327 | mutex_lock(&kvm_lock); |
3328 | sca_offset += 16; |
3329 | if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE) |
3330 | sca_offset = 0; |
3331 | kvm->arch.sca = (struct bsca_block *) |
3332 | ((char *) kvm->arch.sca + sca_offset); |
3333 | mutex_unlock(lock: &kvm_lock); |
3334 | |
3335 | sprintf(buf: debug_name, fmt: "kvm-%u" , current->pid); |
3336 | |
3337 | kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); |
3338 | if (!kvm->arch.dbf) |
3339 | goto out_err; |
3340 | |
3341 | BUILD_BUG_ON(sizeof(struct sie_page2) != 4096); |
3342 | kvm->arch.sie_page2 = |
3343 | (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA); |
3344 | if (!kvm->arch.sie_page2) |
3345 | goto out_err; |
3346 | |
3347 | kvm->arch.sie_page2->kvm = kvm; |
3348 | kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; |
3349 | |
3350 | for (i = 0; i < kvm_s390_fac_size(); i++) { |
3351 | kvm->arch.model.fac_mask[i] = stfle_fac_list[i] & |
3352 | (kvm_s390_fac_base[i] | |
3353 | kvm_s390_fac_ext[i]); |
3354 | kvm->arch.model.fac_list[i] = stfle_fac_list[i] & |
3355 | kvm_s390_fac_base[i]; |
3356 | } |
3357 | kvm->arch.model.subfuncs = kvm_s390_available_subfunc; |
3358 | |
3359 | /* we are always in czam mode - even on pre z14 machines */ |
3360 | set_kvm_facility(fac_list: kvm->arch.model.fac_mask, nr: 138); |
3361 | set_kvm_facility(fac_list: kvm->arch.model.fac_list, nr: 138); |
3362 | /* we emulate STHYI in kvm */ |
3363 | set_kvm_facility(fac_list: kvm->arch.model.fac_mask, nr: 74); |
3364 | set_kvm_facility(fac_list: kvm->arch.model.fac_list, nr: 74); |
3365 | if (MACHINE_HAS_TLB_GUEST) { |
3366 | set_kvm_facility(fac_list: kvm->arch.model.fac_mask, nr: 147); |
3367 | set_kvm_facility(fac_list: kvm->arch.model.fac_list, nr: 147); |
3368 | } |
3369 | |
3370 | if (css_general_characteristics.aiv && test_facility(65)) |
3371 | set_kvm_facility(fac_list: kvm->arch.model.fac_mask, nr: 65); |
3372 | |
3373 | kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); |
3374 | kvm->arch.model.ibc = sclp.ibc & 0x0fff; |
3375 | |
3376 | kvm->arch.model.uv_feat_guest.feat = 0; |
3377 | |
3378 | kvm_s390_crypto_init(kvm); |
3379 | |
3380 | if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) { |
3381 | mutex_lock(&kvm->lock); |
3382 | kvm_s390_pci_init_list(kvm); |
3383 | kvm_s390_vcpu_pci_enable_interp(kvm); |
3384 | mutex_unlock(lock: &kvm->lock); |
3385 | } |
3386 | |
3387 | mutex_init(&kvm->arch.float_int.ais_lock); |
3388 | spin_lock_init(&kvm->arch.float_int.lock); |
3389 | for (i = 0; i < FIRQ_LIST_COUNT; i++) |
3390 | INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); |
3391 | init_waitqueue_head(&kvm->arch.ipte_wq); |
3392 | mutex_init(&kvm->arch.ipte_mutex); |
3393 | |
3394 | debug_register_view(kvm->arch.dbf, &debug_sprintf_view); |
3395 | VM_EVENT(kvm, 3, "vm created with type %lu" , type); |
3396 | |
3397 | if (type & KVM_VM_S390_UCONTROL) { |
3398 | kvm->arch.gmap = NULL; |
3399 | kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; |
3400 | } else { |
3401 | if (sclp.hamax == U64_MAX) |
3402 | kvm->arch.mem_limit = TASK_SIZE_MAX; |
3403 | else |
3404 | kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, |
3405 | sclp.hamax + 1); |
3406 | kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); |
3407 | if (!kvm->arch.gmap) |
3408 | goto out_err; |
3409 | kvm->arch.gmap->private = kvm; |
3410 | kvm->arch.gmap->pfault_enabled = 0; |
3411 | } |
3412 | |
3413 | kvm->arch.use_pfmfi = sclp.has_pfmfi; |
3414 | kvm->arch.use_skf = sclp.has_skey; |
3415 | spin_lock_init(&kvm->arch.start_stop_lock); |
3416 | kvm_s390_vsie_init(kvm); |
3417 | if (use_gisa) |
3418 | kvm_s390_gisa_init(kvm); |
3419 | INIT_LIST_HEAD(list: &kvm->arch.pv.need_cleanup); |
3420 | kvm->arch.pv.set_aside = NULL; |
3421 | KVM_EVENT(3, "vm 0x%pK created by pid %u" , kvm, current->pid); |
3422 | |
3423 | return 0; |
3424 | out_err: |
3425 | free_page((unsigned long)kvm->arch.sie_page2); |
3426 | debug_unregister(kvm->arch.dbf); |
3427 | sca_dispose(kvm); |
3428 | KVM_EVENT(3, "creation of vm failed: %d" , rc); |
3429 | return rc; |
3430 | } |
3431 | |
3432 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
3433 | { |
3434 | u16 rc, rrc; |
3435 | |
3436 | VCPU_EVENT(vcpu, 3, "%s" , "free cpu" ); |
3437 | trace_kvm_s390_destroy_vcpu(id: vcpu->vcpu_id); |
3438 | kvm_s390_clear_local_irqs(vcpu); |
3439 | kvm_clear_async_pf_completion_queue(vcpu); |
3440 | if (!kvm_is_ucontrol(kvm: vcpu->kvm)) |
3441 | sca_del_vcpu(vcpu); |
3442 | kvm_s390_update_topology_change_report(kvm: vcpu->kvm, val: 1); |
3443 | |
3444 | if (kvm_is_ucontrol(kvm: vcpu->kvm)) |
3445 | gmap_remove(vcpu->arch.gmap); |
3446 | |
3447 | if (vcpu->kvm->arch.use_cmma) |
3448 | kvm_s390_vcpu_unsetup_cmma(vcpu); |
3449 | /* We can not hold the vcpu mutex here, we are already dying */ |
3450 | if (kvm_s390_pv_cpu_get_handle(vcpu)) |
3451 | kvm_s390_pv_destroy_cpu(vcpu, rc: &rc, rrc: &rrc); |
3452 | free_page((unsigned long)(vcpu->arch.sie_block)); |
3453 | } |
3454 | |
3455 | void kvm_arch_destroy_vm(struct kvm *kvm) |
3456 | { |
3457 | u16 rc, rrc; |
3458 | |
3459 | kvm_destroy_vcpus(kvm); |
3460 | sca_dispose(kvm); |
3461 | kvm_s390_gisa_destroy(kvm); |
3462 | /* |
3463 | * We are already at the end of life and kvm->lock is not taken. |
3464 | * This is ok as the file descriptor is closed by now and nobody |
3465 | * can mess with the pv state. |
3466 | */ |
3467 | kvm_s390_pv_deinit_cleanup_all(kvm, rc: &rc, rrc: &rrc); |
3468 | /* |
3469 | * Remove the mmu notifier only when the whole KVM VM is torn down, |
3470 | * and only if one was registered to begin with. If the VM is |
3471 | * currently not protected, but has been previously been protected, |
3472 | * then it's possible that the notifier is still registered. |
3473 | */ |
3474 | if (kvm->arch.pv.mmu_notifier.ops) |
3475 | mmu_notifier_unregister(subscription: &kvm->arch.pv.mmu_notifier, mm: kvm->mm); |
3476 | |
3477 | debug_unregister(kvm->arch.dbf); |
3478 | free_page((unsigned long)kvm->arch.sie_page2); |
3479 | if (!kvm_is_ucontrol(kvm)) |
3480 | gmap_remove(kvm->arch.gmap); |
3481 | kvm_s390_destroy_adapters(kvm); |
3482 | kvm_s390_clear_float_irqs(kvm); |
3483 | kvm_s390_vsie_destroy(kvm); |
3484 | KVM_EVENT(3, "vm 0x%pK destroyed" , kvm); |
3485 | } |
3486 | |
3487 | /* Section: vcpu related */ |
3488 | static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu) |
3489 | { |
3490 | vcpu->arch.gmap = gmap_create(current->mm, -1UL); |
3491 | if (!vcpu->arch.gmap) |
3492 | return -ENOMEM; |
3493 | vcpu->arch.gmap->private = vcpu->kvm; |
3494 | |
3495 | return 0; |
3496 | } |
3497 | |
3498 | static void sca_del_vcpu(struct kvm_vcpu *vcpu) |
3499 | { |
3500 | if (!kvm_s390_use_sca_entries()) |
3501 | return; |
3502 | read_lock(&vcpu->kvm->arch.sca_lock); |
3503 | if (vcpu->kvm->arch.use_esca) { |
3504 | struct esca_block *sca = vcpu->kvm->arch.sca; |
3505 | |
3506 | clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); |
3507 | sca->cpu[vcpu->vcpu_id].sda = 0; |
3508 | } else { |
3509 | struct bsca_block *sca = vcpu->kvm->arch.sca; |
3510 | |
3511 | clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); |
3512 | sca->cpu[vcpu->vcpu_id].sda = 0; |
3513 | } |
3514 | read_unlock(&vcpu->kvm->arch.sca_lock); |
3515 | } |
3516 | |
3517 | static void sca_add_vcpu(struct kvm_vcpu *vcpu) |
3518 | { |
3519 | if (!kvm_s390_use_sca_entries()) { |
3520 | phys_addr_t sca_phys = virt_to_phys(address: vcpu->kvm->arch.sca); |
3521 | |
3522 | /* we still need the basic sca for the ipte control */ |
3523 | vcpu->arch.sie_block->scaoh = sca_phys >> 32; |
3524 | vcpu->arch.sie_block->scaol = sca_phys; |
3525 | return; |
3526 | } |
3527 | read_lock(&vcpu->kvm->arch.sca_lock); |
3528 | if (vcpu->kvm->arch.use_esca) { |
3529 | struct esca_block *sca = vcpu->kvm->arch.sca; |
3530 | phys_addr_t sca_phys = virt_to_phys(address: sca); |
3531 | |
3532 | sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(address: vcpu->arch.sie_block); |
3533 | vcpu->arch.sie_block->scaoh = sca_phys >> 32; |
3534 | vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK; |
3535 | vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; |
3536 | set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); |
3537 | } else { |
3538 | struct bsca_block *sca = vcpu->kvm->arch.sca; |
3539 | phys_addr_t sca_phys = virt_to_phys(address: sca); |
3540 | |
3541 | sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(address: vcpu->arch.sie_block); |
3542 | vcpu->arch.sie_block->scaoh = sca_phys >> 32; |
3543 | vcpu->arch.sie_block->scaol = sca_phys; |
3544 | set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); |
3545 | } |
3546 | read_unlock(&vcpu->kvm->arch.sca_lock); |
3547 | } |
3548 | |
3549 | /* Basic SCA to Extended SCA data copy routines */ |
3550 | static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s) |
3551 | { |
3552 | d->sda = s->sda; |
3553 | d->sigp_ctrl.c = s->sigp_ctrl.c; |
3554 | d->sigp_ctrl.scn = s->sigp_ctrl.scn; |
3555 | } |
3556 | |
3557 | static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s) |
3558 | { |
3559 | int i; |
3560 | |
3561 | d->ipte_control = s->ipte_control; |
3562 | d->mcn[0] = s->mcn; |
3563 | for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++) |
3564 | sca_copy_entry(&d->cpu[i], &s->cpu[i]); |
3565 | } |
3566 | |
3567 | static int sca_switch_to_extended(struct kvm *kvm) |
3568 | { |
3569 | struct bsca_block *old_sca = kvm->arch.sca; |
3570 | struct esca_block *new_sca; |
3571 | struct kvm_vcpu *vcpu; |
3572 | unsigned long vcpu_idx; |
3573 | u32 scaol, scaoh; |
3574 | phys_addr_t new_sca_phys; |
3575 | |
3576 | if (kvm->arch.use_esca) |
3577 | return 0; |
3578 | |
3579 | new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO); |
3580 | if (!new_sca) |
3581 | return -ENOMEM; |
3582 | |
3583 | new_sca_phys = virt_to_phys(address: new_sca); |
3584 | scaoh = new_sca_phys >> 32; |
3585 | scaol = new_sca_phys & ESCA_SCAOL_MASK; |
3586 | |
3587 | kvm_s390_vcpu_block_all(kvm); |
3588 | write_lock(&kvm->arch.sca_lock); |
3589 | |
3590 | sca_copy_b_to_e(d: new_sca, s: old_sca); |
3591 | |
3592 | kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) { |
3593 | vcpu->arch.sie_block->scaoh = scaoh; |
3594 | vcpu->arch.sie_block->scaol = scaol; |
3595 | vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; |
3596 | } |
3597 | kvm->arch.sca = new_sca; |
3598 | kvm->arch.use_esca = 1; |
3599 | |
3600 | write_unlock(&kvm->arch.sca_lock); |
3601 | kvm_s390_vcpu_unblock_all(kvm); |
3602 | |
3603 | free_page((unsigned long)old_sca); |
3604 | |
3605 | VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)" , |
3606 | old_sca, kvm->arch.sca); |
3607 | return 0; |
3608 | } |
3609 | |
3610 | static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id) |
3611 | { |
3612 | int rc; |
3613 | |
3614 | if (!kvm_s390_use_sca_entries()) { |
3615 | if (id < KVM_MAX_VCPUS) |
3616 | return true; |
3617 | return false; |
3618 | } |
3619 | if (id < KVM_S390_BSCA_CPU_SLOTS) |
3620 | return true; |
3621 | if (!sclp.has_esca || !sclp.has_64bscao) |
3622 | return false; |
3623 | |
3624 | rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); |
3625 | |
3626 | return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS; |
3627 | } |
3628 | |
3629 | /* needs disabled preemption to protect from TOD sync and vcpu_load/put */ |
3630 | static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu) |
3631 | { |
3632 | WARN_ON_ONCE(vcpu->arch.cputm_start != 0); |
3633 | raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); |
3634 | vcpu->arch.cputm_start = get_tod_clock_fast(); |
3635 | raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); |
3636 | } |
3637 | |
3638 | /* needs disabled preemption to protect from TOD sync and vcpu_load/put */ |
3639 | static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu) |
3640 | { |
3641 | WARN_ON_ONCE(vcpu->arch.cputm_start == 0); |
3642 | raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); |
3643 | vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start; |
3644 | vcpu->arch.cputm_start = 0; |
3645 | raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); |
3646 | } |
3647 | |
3648 | /* needs disabled preemption to protect from TOD sync and vcpu_load/put */ |
3649 | static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu) |
3650 | { |
3651 | WARN_ON_ONCE(vcpu->arch.cputm_enabled); |
3652 | vcpu->arch.cputm_enabled = true; |
3653 | __start_cpu_timer_accounting(vcpu); |
3654 | } |
3655 | |
3656 | /* needs disabled preemption to protect from TOD sync and vcpu_load/put */ |
3657 | static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu) |
3658 | { |
3659 | WARN_ON_ONCE(!vcpu->arch.cputm_enabled); |
3660 | __stop_cpu_timer_accounting(vcpu); |
3661 | vcpu->arch.cputm_enabled = false; |
3662 | } |
3663 | |
3664 | static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu) |
3665 | { |
3666 | preempt_disable(); /* protect from TOD sync and vcpu_load/put */ |
3667 | __enable_cpu_timer_accounting(vcpu); |
3668 | preempt_enable(); |
3669 | } |
3670 | |
3671 | static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu) |
3672 | { |
3673 | preempt_disable(); /* protect from TOD sync and vcpu_load/put */ |
3674 | __disable_cpu_timer_accounting(vcpu); |
3675 | preempt_enable(); |
3676 | } |
3677 | |
3678 | /* set the cpu timer - may only be called from the VCPU thread itself */ |
3679 | void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm) |
3680 | { |
3681 | preempt_disable(); /* protect from TOD sync and vcpu_load/put */ |
3682 | raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); |
3683 | if (vcpu->arch.cputm_enabled) |
3684 | vcpu->arch.cputm_start = get_tod_clock_fast(); |
3685 | vcpu->arch.sie_block->cputm = cputm; |
3686 | raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); |
3687 | preempt_enable(); |
3688 | } |
3689 | |
3690 | /* update and get the cpu timer - can also be called from other VCPU threads */ |
3691 | __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu) |
3692 | { |
3693 | unsigned int seq; |
3694 | __u64 value; |
3695 | |
3696 | if (unlikely(!vcpu->arch.cputm_enabled)) |
3697 | return vcpu->arch.sie_block->cputm; |
3698 | |
3699 | preempt_disable(); /* protect from TOD sync and vcpu_load/put */ |
3700 | do { |
3701 | seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount); |
3702 | /* |
3703 | * If the writer would ever execute a read in the critical |
3704 | * section, e.g. in irq context, we have a deadlock. |
3705 | */ |
3706 | WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu); |
3707 | value = vcpu->arch.sie_block->cputm; |
3708 | /* if cputm_start is 0, accounting is being started/stopped */ |
3709 | if (likely(vcpu->arch.cputm_start)) |
3710 | value -= get_tod_clock_fast() - vcpu->arch.cputm_start; |
3711 | } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1)); |
3712 | preempt_enable(); |
3713 | return value; |
3714 | } |
3715 | |
3716 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
3717 | { |
3718 | |
3719 | gmap_enable(vcpu->arch.enabled_gmap); |
3720 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING); |
3721 | if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) |
3722 | __start_cpu_timer_accounting(vcpu); |
3723 | vcpu->cpu = cpu; |
3724 | } |
3725 | |
3726 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
3727 | { |
3728 | vcpu->cpu = -1; |
3729 | if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) |
3730 | __stop_cpu_timer_accounting(vcpu); |
3731 | kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING); |
3732 | vcpu->arch.enabled_gmap = gmap_get_enabled(); |
3733 | gmap_disable(vcpu->arch.enabled_gmap); |
3734 | |
3735 | } |
3736 | |
3737 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
3738 | { |
3739 | mutex_lock(&vcpu->kvm->lock); |
3740 | preempt_disable(); |
3741 | vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; |
3742 | vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; |
3743 | preempt_enable(); |
3744 | mutex_unlock(lock: &vcpu->kvm->lock); |
3745 | if (!kvm_is_ucontrol(kvm: vcpu->kvm)) { |
3746 | vcpu->arch.gmap = vcpu->kvm->arch.gmap; |
3747 | sca_add_vcpu(vcpu); |
3748 | } |
3749 | if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) |
3750 | vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; |
3751 | /* make vcpu_load load the right gmap on the first trigger */ |
3752 | vcpu->arch.enabled_gmap = vcpu->arch.gmap; |
3753 | } |
3754 | |
3755 | static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr) |
3756 | { |
3757 | if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && |
3758 | test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo)) |
3759 | return true; |
3760 | return false; |
3761 | } |
3762 | |
3763 | static bool kvm_has_pckmo_ecc(struct kvm *kvm) |
3764 | { |
3765 | /* At least one ECC subfunction must be present */ |
3766 | return kvm_has_pckmo_subfunc(kvm, nr: 32) || |
3767 | kvm_has_pckmo_subfunc(kvm, nr: 33) || |
3768 | kvm_has_pckmo_subfunc(kvm, nr: 34) || |
3769 | kvm_has_pckmo_subfunc(kvm, nr: 40) || |
3770 | kvm_has_pckmo_subfunc(kvm, nr: 41); |
3771 | |
3772 | } |
3773 | |
3774 | static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) |
3775 | { |
3776 | /* |
3777 | * If the AP instructions are not being interpreted and the MSAX3 |
3778 | * facility is not configured for the guest, there is nothing to set up. |
3779 | */ |
3780 | if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(kvm: vcpu->kvm, nr: 76)) |
3781 | return; |
3782 | |
3783 | vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; |
3784 | vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); |
3785 | vcpu->arch.sie_block->eca &= ~ECA_APIE; |
3786 | vcpu->arch.sie_block->ecd &= ~ECD_ECC; |
3787 | |
3788 | if (vcpu->kvm->arch.crypto.apie) |
3789 | vcpu->arch.sie_block->eca |= ECA_APIE; |
3790 | |
3791 | /* Set up protected key support */ |
3792 | if (vcpu->kvm->arch.crypto.aes_kw) { |
3793 | vcpu->arch.sie_block->ecb3 |= ECB3_AES; |
3794 | /* ecc is also wrapped with AES key */ |
3795 | if (kvm_has_pckmo_ecc(vcpu->kvm)) |
3796 | vcpu->arch.sie_block->ecd |= ECD_ECC; |
3797 | } |
3798 | |
3799 | if (vcpu->kvm->arch.crypto.dea_kw) |
3800 | vcpu->arch.sie_block->ecb3 |= ECB3_DEA; |
3801 | } |
3802 | |
3803 | void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) |
3804 | { |
3805 | free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo)); |
3806 | vcpu->arch.sie_block->cbrlo = 0; |
3807 | } |
3808 | |
3809 | int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) |
3810 | { |
3811 | void *cbrlo_page = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); |
3812 | |
3813 | if (!cbrlo_page) |
3814 | return -ENOMEM; |
3815 | |
3816 | vcpu->arch.sie_block->cbrlo = virt_to_phys(address: cbrlo_page); |
3817 | return 0; |
3818 | } |
3819 | |
3820 | static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu) |
3821 | { |
3822 | struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; |
3823 | |
3824 | vcpu->arch.sie_block->ibc = model->ibc; |
3825 | if (test_kvm_facility(kvm: vcpu->kvm, nr: 7)) |
3826 | vcpu->arch.sie_block->fac = virt_to_phys(address: model->fac_list); |
3827 | } |
3828 | |
3829 | static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu) |
3830 | { |
3831 | int rc = 0; |
3832 | u16 uvrc, uvrrc; |
3833 | |
3834 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | |
3835 | CPUSTAT_SM | |
3836 | CPUSTAT_STOPPED); |
3837 | |
3838 | if (test_kvm_facility(vcpu->kvm, 78)) |
3839 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2); |
3840 | else if (test_kvm_facility(vcpu->kvm, 8)) |
3841 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED); |
3842 | |
3843 | kvm_s390_vcpu_setup_model(vcpu); |
3844 | |
3845 | /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */ |
3846 | if (MACHINE_HAS_ESOP) |
3847 | vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; |
3848 | if (test_kvm_facility(vcpu->kvm, 9)) |
3849 | vcpu->arch.sie_block->ecb |= ECB_SRSI; |
3850 | if (test_kvm_facility(vcpu->kvm, 11)) |
3851 | vcpu->arch.sie_block->ecb |= ECB_PTF; |
3852 | if (test_kvm_facility(vcpu->kvm, 73)) |
3853 | vcpu->arch.sie_block->ecb |= ECB_TE; |
3854 | if (!kvm_is_ucontrol(vcpu->kvm)) |
3855 | vcpu->arch.sie_block->ecb |= ECB_SPECI; |
3856 | |
3857 | if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) |
3858 | vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; |
3859 | if (test_kvm_facility(vcpu->kvm, 130)) |
3860 | vcpu->arch.sie_block->ecb2 |= ECB2_IEP; |
3861 | vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI; |
3862 | if (sclp.has_cei) |
3863 | vcpu->arch.sie_block->eca |= ECA_CEI; |
3864 | if (sclp.has_ib) |
3865 | vcpu->arch.sie_block->eca |= ECA_IB; |
3866 | if (sclp.has_siif) |
3867 | vcpu->arch.sie_block->eca |= ECA_SII; |
3868 | if (sclp.has_sigpif) |
3869 | vcpu->arch.sie_block->eca |= ECA_SIGPI; |
3870 | if (test_kvm_facility(kvm: vcpu->kvm, nr: 129)) { |
3871 | vcpu->arch.sie_block->eca |= ECA_VX; |
3872 | vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; |
3873 | } |
3874 | if (test_kvm_facility(vcpu->kvm, 139)) |
3875 | vcpu->arch.sie_block->ecd |= ECD_MEF; |
3876 | if (test_kvm_facility(vcpu->kvm, 156)) |
3877 | vcpu->arch.sie_block->ecd |= ECD_ETOKENF; |
3878 | if (vcpu->arch.sie_block->gd) { |
3879 | vcpu->arch.sie_block->eca |= ECA_AIV; |
3880 | VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u" , |
3881 | vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id); |
3882 | } |
3883 | vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC; |
3884 | vcpu->arch.sie_block->riccbd = virt_to_phys(address: &vcpu->run->s.regs.riccb); |
3885 | |
3886 | if (sclp.has_kss) |
3887 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS); |
3888 | else |
3889 | vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; |
3890 | |
3891 | if (vcpu->kvm->arch.use_cmma) { |
3892 | rc = kvm_s390_vcpu_setup_cmma(vcpu); |
3893 | if (rc) |
3894 | return rc; |
3895 | } |
3896 | hrtimer_init(timer: &vcpu->arch.ckc_timer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL); |
3897 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; |
3898 | |
3899 | vcpu->arch.sie_block->hpid = HPID_KVM; |
3900 | |
3901 | kvm_s390_vcpu_crypto_setup(vcpu); |
3902 | |
3903 | kvm_s390_vcpu_pci_setup(vcpu); |
3904 | |
3905 | mutex_lock(&vcpu->kvm->lock); |
3906 | if (kvm_s390_pv_is_protected(vcpu->kvm)) { |
3907 | rc = kvm_s390_pv_create_cpu(vcpu, rc: &uvrc, rrc: &uvrrc); |
3908 | if (rc) |
3909 | kvm_s390_vcpu_unsetup_cmma(vcpu); |
3910 | } |
3911 | mutex_unlock(lock: &vcpu->kvm->lock); |
3912 | |
3913 | return rc; |
3914 | } |
3915 | |
3916 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) |
3917 | { |
3918 | if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id)) |
3919 | return -EINVAL; |
3920 | return 0; |
3921 | } |
3922 | |
3923 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) |
3924 | { |
3925 | struct sie_page *sie_page; |
3926 | int rc; |
3927 | |
3928 | BUILD_BUG_ON(sizeof(struct sie_page) != 4096); |
3929 | sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT); |
3930 | if (!sie_page) |
3931 | return -ENOMEM; |
3932 | |
3933 | vcpu->arch.sie_block = &sie_page->sie_block; |
3934 | vcpu->arch.sie_block->itdba = virt_to_phys(address: &sie_page->itdb); |
3935 | |
3936 | /* the real guest size will always be smaller than msl */ |
3937 | vcpu->arch.sie_block->mso = 0; |
3938 | vcpu->arch.sie_block->msl = sclp.hamax; |
3939 | |
3940 | vcpu->arch.sie_block->icpua = vcpu->vcpu_id; |
3941 | spin_lock_init(&vcpu->arch.local_int.lock); |
3942 | vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(kvm: vcpu->kvm); |
3943 | seqcount_init(&vcpu->arch.cputm_seqcount); |
3944 | |
3945 | vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; |
3946 | kvm_clear_async_pf_completion_queue(vcpu); |
3947 | vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | |
3948 | KVM_SYNC_GPRS | |
3949 | KVM_SYNC_ACRS | |
3950 | KVM_SYNC_CRS | |
3951 | KVM_SYNC_ARCH0 | |
3952 | KVM_SYNC_PFAULT | |
3953 | KVM_SYNC_DIAG318; |
3954 | vcpu->arch.acrs_loaded = false; |
3955 | kvm_s390_set_prefix(vcpu, prefix: 0); |
3956 | if (test_kvm_facility(vcpu->kvm, 64)) |
3957 | vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; |
3958 | if (test_kvm_facility(vcpu->kvm, 82)) |
3959 | vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC; |
3960 | if (test_kvm_facility(vcpu->kvm, 133)) |
3961 | vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; |
3962 | if (test_kvm_facility(vcpu->kvm, 156)) |
3963 | vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN; |
3964 | /* fprs can be synchronized via vrs, even if the guest has no vx. With |
3965 | * cpu_has_vx(), (load|store)_fpu_regs() will work with vrs format. |
3966 | */ |
3967 | if (cpu_has_vx()) |
3968 | vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; |
3969 | else |
3970 | vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS; |
3971 | |
3972 | if (kvm_is_ucontrol(kvm: vcpu->kvm)) { |
3973 | rc = __kvm_ucontrol_vcpu_init(vcpu); |
3974 | if (rc) |
3975 | goto out_free_sie_block; |
3976 | } |
3977 | |
3978 | VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK" , |
3979 | vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); |
3980 | trace_kvm_s390_create_vcpu(id: vcpu->vcpu_id, vcpu, sie_block: vcpu->arch.sie_block); |
3981 | |
3982 | rc = kvm_s390_vcpu_setup(vcpu); |
3983 | if (rc) |
3984 | goto out_ucontrol_uninit; |
3985 | |
3986 | kvm_s390_update_topology_change_report(kvm: vcpu->kvm, val: 1); |
3987 | return 0; |
3988 | |
3989 | out_ucontrol_uninit: |
3990 | if (kvm_is_ucontrol(kvm: vcpu->kvm)) |
3991 | gmap_remove(vcpu->arch.gmap); |
3992 | out_free_sie_block: |
3993 | free_page((unsigned long)(vcpu->arch.sie_block)); |
3994 | return rc; |
3995 | } |
3996 | |
3997 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
3998 | { |
3999 | clear_bit(nr: vcpu->vcpu_idx, addr: vcpu->kvm->arch.gisa_int.kicked_mask); |
4000 | return kvm_s390_vcpu_has_irq(vcpu, exclude_stop: 0); |
4001 | } |
4002 | |
4003 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) |
4004 | { |
4005 | return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE); |
4006 | } |
4007 | |
4008 | void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) |
4009 | { |
4010 | atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); |
4011 | exit_sie(vcpu); |
4012 | } |
4013 | |
4014 | void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu) |
4015 | { |
4016 | atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); |
4017 | } |
4018 | |
4019 | static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) |
4020 | { |
4021 | atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); |
4022 | exit_sie(vcpu); |
4023 | } |
4024 | |
4025 | bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu) |
4026 | { |
4027 | return atomic_read(&vcpu->arch.sie_block->prog20) & |
4028 | (PROG_BLOCK_SIE | PROG_REQUEST); |
4029 | } |
4030 | |
4031 | static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) |
4032 | { |
4033 | atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); |
4034 | } |
4035 | |
4036 | /* |
4037 | * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running. |
4038 | * If the CPU is not running (e.g. waiting as idle) the function will |
4039 | * return immediately. */ |
4040 | void exit_sie(struct kvm_vcpu *vcpu) |
4041 | { |
4042 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT); |
4043 | kvm_s390_vsie_kick(vcpu); |
4044 | while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) |
4045 | cpu_relax(); |
4046 | } |
4047 | |
4048 | /* Kick a guest cpu out of SIE to process a request synchronously */ |
4049 | void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu) |
4050 | { |
4051 | __kvm_make_request(req, vcpu); |
4052 | kvm_s390_vcpu_request(vcpu); |
4053 | } |
4054 | |
4055 | static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, |
4056 | unsigned long end) |
4057 | { |
4058 | struct kvm *kvm = gmap->private; |
4059 | struct kvm_vcpu *vcpu; |
4060 | unsigned long prefix; |
4061 | unsigned long i; |
4062 | |
4063 | trace_kvm_s390_gmap_notifier(start, end, shadow: gmap_is_shadow(gmap)); |
4064 | |
4065 | if (gmap_is_shadow(gmap)) |
4066 | return; |
4067 | if (start >= 1UL << 31) |
4068 | /* We are only interested in prefix pages */ |
4069 | return; |
4070 | kvm_for_each_vcpu(i, vcpu, kvm) { |
4071 | /* match against both prefix pages */ |
4072 | prefix = kvm_s390_get_prefix(vcpu); |
4073 | if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) { |
4074 | VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx" , |
4075 | start, end); |
4076 | kvm_s390_sync_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu); |
4077 | } |
4078 | } |
4079 | } |
4080 | |
4081 | bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) |
4082 | { |
4083 | /* do not poll with more than halt_poll_max_steal percent of steal time */ |
4084 | if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >= |
4085 | READ_ONCE(halt_poll_max_steal)) { |
4086 | vcpu->stat.halt_no_poll_steal++; |
4087 | return true; |
4088 | } |
4089 | return false; |
4090 | } |
4091 | |
4092 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
4093 | { |
4094 | /* kvm common code refers to this, but never calls it */ |
4095 | BUG(); |
4096 | return 0; |
4097 | } |
4098 | |
4099 | static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, |
4100 | struct kvm_one_reg *reg) |
4101 | { |
4102 | int r = -EINVAL; |
4103 | |
4104 | switch (reg->id) { |
4105 | case KVM_REG_S390_TODPR: |
4106 | r = put_user(vcpu->arch.sie_block->todpr, |
4107 | (u32 __user *)reg->addr); |
4108 | break; |
4109 | case KVM_REG_S390_EPOCHDIFF: |
4110 | r = put_user(vcpu->arch.sie_block->epoch, |
4111 | (u64 __user *)reg->addr); |
4112 | break; |
4113 | case KVM_REG_S390_CPU_TIMER: |
4114 | r = put_user(kvm_s390_get_cpu_timer(vcpu), |
4115 | (u64 __user *)reg->addr); |
4116 | break; |
4117 | case KVM_REG_S390_CLOCK_COMP: |
4118 | r = put_user(vcpu->arch.sie_block->ckc, |
4119 | (u64 __user *)reg->addr); |
4120 | break; |
4121 | case KVM_REG_S390_PFTOKEN: |
4122 | r = put_user(vcpu->arch.pfault_token, |
4123 | (u64 __user *)reg->addr); |
4124 | break; |
4125 | case KVM_REG_S390_PFCOMPARE: |
4126 | r = put_user(vcpu->arch.pfault_compare, |
4127 | (u64 __user *)reg->addr); |
4128 | break; |
4129 | case KVM_REG_S390_PFSELECT: |
4130 | r = put_user(vcpu->arch.pfault_select, |
4131 | (u64 __user *)reg->addr); |
4132 | break; |
4133 | case KVM_REG_S390_PP: |
4134 | r = put_user(vcpu->arch.sie_block->pp, |
4135 | (u64 __user *)reg->addr); |
4136 | break; |
4137 | case KVM_REG_S390_GBEA: |
4138 | r = put_user(vcpu->arch.sie_block->gbea, |
4139 | (u64 __user *)reg->addr); |
4140 | break; |
4141 | default: |
4142 | break; |
4143 | } |
4144 | |
4145 | return r; |
4146 | } |
4147 | |
4148 | static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, |
4149 | struct kvm_one_reg *reg) |
4150 | { |
4151 | int r = -EINVAL; |
4152 | __u64 val; |
4153 | |
4154 | switch (reg->id) { |
4155 | case KVM_REG_S390_TODPR: |
4156 | r = get_user(vcpu->arch.sie_block->todpr, |
4157 | (u32 __user *)reg->addr); |
4158 | break; |
4159 | case KVM_REG_S390_EPOCHDIFF: |
4160 | r = get_user(vcpu->arch.sie_block->epoch, |
4161 | (u64 __user *)reg->addr); |
4162 | break; |
4163 | case KVM_REG_S390_CPU_TIMER: |
4164 | r = get_user(val, (u64 __user *)reg->addr); |
4165 | if (!r) |
4166 | kvm_s390_set_cpu_timer(vcpu, cputm: val); |
4167 | break; |
4168 | case KVM_REG_S390_CLOCK_COMP: |
4169 | r = get_user(vcpu->arch.sie_block->ckc, |
4170 | (u64 __user *)reg->addr); |
4171 | break; |
4172 | case KVM_REG_S390_PFTOKEN: |
4173 | r = get_user(vcpu->arch.pfault_token, |
4174 | (u64 __user *)reg->addr); |
4175 | if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) |
4176 | kvm_clear_async_pf_completion_queue(vcpu); |
4177 | break; |
4178 | case KVM_REG_S390_PFCOMPARE: |
4179 | r = get_user(vcpu->arch.pfault_compare, |
4180 | (u64 __user *)reg->addr); |
4181 | break; |
4182 | case KVM_REG_S390_PFSELECT: |
4183 | r = get_user(vcpu->arch.pfault_select, |
4184 | (u64 __user *)reg->addr); |
4185 | break; |
4186 | case KVM_REG_S390_PP: |
4187 | r = get_user(vcpu->arch.sie_block->pp, |
4188 | (u64 __user *)reg->addr); |
4189 | break; |
4190 | case KVM_REG_S390_GBEA: |
4191 | r = get_user(vcpu->arch.sie_block->gbea, |
4192 | (u64 __user *)reg->addr); |
4193 | break; |
4194 | default: |
4195 | break; |
4196 | } |
4197 | |
4198 | return r; |
4199 | } |
4200 | |
4201 | static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu) |
4202 | { |
4203 | vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI; |
4204 | vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; |
4205 | memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb)); |
4206 | |
4207 | kvm_clear_async_pf_completion_queue(vcpu); |
4208 | if (!kvm_s390_user_cpu_state_ctrl(kvm: vcpu->kvm)) |
4209 | kvm_s390_vcpu_stop(vcpu); |
4210 | kvm_s390_clear_local_irqs(vcpu); |
4211 | } |
4212 | |
4213 | static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) |
4214 | { |
4215 | /* Initial reset is a superset of the normal reset */ |
4216 | kvm_arch_vcpu_ioctl_normal_reset(vcpu); |
4217 | |
4218 | /* |
4219 | * This equals initial cpu reset in pop, but we don't switch to ESA. |
4220 | * We do not only reset the internal data, but also ... |
4221 | */ |
4222 | vcpu->arch.sie_block->gpsw.mask = 0; |
4223 | vcpu->arch.sie_block->gpsw.addr = 0; |
4224 | kvm_s390_set_prefix(vcpu, prefix: 0); |
4225 | kvm_s390_set_cpu_timer(vcpu, cputm: 0); |
4226 | vcpu->arch.sie_block->ckc = 0; |
4227 | memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr)); |
4228 | vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK; |
4229 | vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK; |
4230 | |
4231 | /* ... the data in sync regs */ |
4232 | memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs)); |
4233 | vcpu->run->s.regs.ckc = 0; |
4234 | vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK; |
4235 | vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK; |
4236 | vcpu->run->psw_addr = 0; |
4237 | vcpu->run->psw_mask = 0; |
4238 | vcpu->run->s.regs.todpr = 0; |
4239 | vcpu->run->s.regs.cputm = 0; |
4240 | vcpu->run->s.regs.ckc = 0; |
4241 | vcpu->run->s.regs.pp = 0; |
4242 | vcpu->run->s.regs.gbea = 1; |
4243 | vcpu->run->s.regs.fpc = 0; |
4244 | /* |
4245 | * Do not reset these registers in the protected case, as some of |
4246 | * them are overlaid and they are not accessible in this case |
4247 | * anyway. |
4248 | */ |
4249 | if (!kvm_s390_pv_cpu_is_protected(vcpu)) { |
4250 | vcpu->arch.sie_block->gbea = 1; |
4251 | vcpu->arch.sie_block->pp = 0; |
4252 | vcpu->arch.sie_block->fpf &= ~FPF_BPBC; |
4253 | vcpu->arch.sie_block->todpr = 0; |
4254 | } |
4255 | } |
4256 | |
4257 | static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu) |
4258 | { |
4259 | struct kvm_sync_regs *regs = &vcpu->run->s.regs; |
4260 | |
4261 | /* Clear reset is a superset of the initial reset */ |
4262 | kvm_arch_vcpu_ioctl_initial_reset(vcpu); |
4263 | |
4264 | memset(®s->gprs, 0, sizeof(regs->gprs)); |
4265 | memset(®s->vrs, 0, sizeof(regs->vrs)); |
4266 | memset(®s->acrs, 0, sizeof(regs->acrs)); |
4267 | memset(®s->gscb, 0, sizeof(regs->gscb)); |
4268 | |
4269 | regs->etoken = 0; |
4270 | regs->etoken_extension = 0; |
4271 | } |
4272 | |
4273 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
4274 | { |
4275 | vcpu_load(vcpu); |
4276 | memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); |
4277 | vcpu_put(vcpu); |
4278 | return 0; |
4279 | } |
4280 | |
4281 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
4282 | { |
4283 | vcpu_load(vcpu); |
4284 | memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); |
4285 | vcpu_put(vcpu); |
4286 | return 0; |
4287 | } |
4288 | |
4289 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
4290 | struct kvm_sregs *sregs) |
4291 | { |
4292 | vcpu_load(vcpu); |
4293 | |
4294 | memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); |
4295 | memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); |
4296 | |
4297 | vcpu_put(vcpu); |
4298 | return 0; |
4299 | } |
4300 | |
4301 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
4302 | struct kvm_sregs *sregs) |
4303 | { |
4304 | vcpu_load(vcpu); |
4305 | |
4306 | memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); |
4307 | memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); |
4308 | |
4309 | vcpu_put(vcpu); |
4310 | return 0; |
4311 | } |
4312 | |
4313 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
4314 | { |
4315 | int ret = 0; |
4316 | |
4317 | vcpu_load(vcpu); |
4318 | |
4319 | vcpu->run->s.regs.fpc = fpu->fpc; |
4320 | if (cpu_has_vx()) |
4321 | convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs, |
4322 | (freg_t *) fpu->fprs); |
4323 | else |
4324 | memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs)); |
4325 | |
4326 | vcpu_put(vcpu); |
4327 | return ret; |
4328 | } |
4329 | |
4330 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
4331 | { |
4332 | vcpu_load(vcpu); |
4333 | |
4334 | if (cpu_has_vx()) |
4335 | convert_vx_to_fp((freg_t *) fpu->fprs, |
4336 | (__vector128 *) vcpu->run->s.regs.vrs); |
4337 | else |
4338 | memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs)); |
4339 | fpu->fpc = vcpu->run->s.regs.fpc; |
4340 | |
4341 | vcpu_put(vcpu); |
4342 | return 0; |
4343 | } |
4344 | |
4345 | static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) |
4346 | { |
4347 | int rc = 0; |
4348 | |
4349 | if (!is_vcpu_stopped(vcpu)) |
4350 | rc = -EBUSY; |
4351 | else { |
4352 | vcpu->run->psw_mask = psw.mask; |
4353 | vcpu->run->psw_addr = psw.addr; |
4354 | } |
4355 | return rc; |
4356 | } |
4357 | |
4358 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
4359 | struct kvm_translation *tr) |
4360 | { |
4361 | return -EINVAL; /* not implemented yet */ |
4362 | } |
4363 | |
4364 | #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \ |
4365 | KVM_GUESTDBG_USE_HW_BP | \ |
4366 | KVM_GUESTDBG_ENABLE) |
4367 | |
4368 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
4369 | struct kvm_guest_debug *dbg) |
4370 | { |
4371 | int rc = 0; |
4372 | |
4373 | vcpu_load(vcpu); |
4374 | |
4375 | vcpu->guest_debug = 0; |
4376 | kvm_s390_clear_bp_data(vcpu); |
4377 | |
4378 | if (dbg->control & ~VALID_GUESTDBG_FLAGS) { |
4379 | rc = -EINVAL; |
4380 | goto out; |
4381 | } |
4382 | if (!sclp.has_gpere) { |
4383 | rc = -EINVAL; |
4384 | goto out; |
4385 | } |
4386 | |
4387 | if (dbg->control & KVM_GUESTDBG_ENABLE) { |
4388 | vcpu->guest_debug = dbg->control; |
4389 | /* enforce guest PER */ |
4390 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_P); |
4391 | |
4392 | if (dbg->control & KVM_GUESTDBG_USE_HW_BP) |
4393 | rc = kvm_s390_import_bp_data(vcpu, dbg); |
4394 | } else { |
4395 | kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P); |
4396 | vcpu->arch.guestdbg.last_bp = 0; |
4397 | } |
4398 | |
4399 | if (rc) { |
4400 | vcpu->guest_debug = 0; |
4401 | kvm_s390_clear_bp_data(vcpu); |
4402 | kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P); |
4403 | } |
4404 | |
4405 | out: |
4406 | vcpu_put(vcpu); |
4407 | return rc; |
4408 | } |
4409 | |
4410 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
4411 | struct kvm_mp_state *mp_state) |
4412 | { |
4413 | int ret; |
4414 | |
4415 | vcpu_load(vcpu); |
4416 | |
4417 | /* CHECK_STOP and LOAD are not supported yet */ |
4418 | ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED : |
4419 | KVM_MP_STATE_OPERATING; |
4420 | |
4421 | vcpu_put(vcpu); |
4422 | return ret; |
4423 | } |
4424 | |
4425 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
4426 | struct kvm_mp_state *mp_state) |
4427 | { |
4428 | int rc = 0; |
4429 | |
4430 | vcpu_load(vcpu); |
4431 | |
4432 | /* user space knows about this interface - let it control the state */ |
4433 | kvm_s390_set_user_cpu_state_ctrl(kvm: vcpu->kvm); |
4434 | |
4435 | switch (mp_state->mp_state) { |
4436 | case KVM_MP_STATE_STOPPED: |
4437 | rc = kvm_s390_vcpu_stop(vcpu); |
4438 | break; |
4439 | case KVM_MP_STATE_OPERATING: |
4440 | rc = kvm_s390_vcpu_start(vcpu); |
4441 | break; |
4442 | case KVM_MP_STATE_LOAD: |
4443 | if (!kvm_s390_pv_cpu_is_protected(vcpu)) { |
4444 | rc = -ENXIO; |
4445 | break; |
4446 | } |
4447 | rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD); |
4448 | break; |
4449 | case KVM_MP_STATE_CHECK_STOP: |
4450 | fallthrough; /* CHECK_STOP and LOAD are not supported yet */ |
4451 | default: |
4452 | rc = -ENXIO; |
4453 | } |
4454 | |
4455 | vcpu_put(vcpu); |
4456 | return rc; |
4457 | } |
4458 | |
4459 | static bool ibs_enabled(struct kvm_vcpu *vcpu) |
4460 | { |
4461 | return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS); |
4462 | } |
4463 | |
4464 | static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) |
4465 | { |
4466 | retry: |
4467 | kvm_s390_vcpu_request_handled(vcpu); |
4468 | if (!kvm_request_pending(vcpu)) |
4469 | return 0; |
4470 | /* |
4471 | * If the guest prefix changed, re-arm the ipte notifier for the |
4472 | * guest prefix page. gmap_mprotect_notify will wait on the ptl lock. |
4473 | * This ensures that the ipte instruction for this request has |
4474 | * already finished. We might race against a second unmapper that |
4475 | * wants to set the blocking bit. Lets just retry the request loop. |
4476 | */ |
4477 | if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) { |
4478 | int rc; |
4479 | rc = gmap_mprotect_notify(vcpu->arch.gmap, |
4480 | kvm_s390_get_prefix(vcpu), |
4481 | PAGE_SIZE * 2, PROT_WRITE); |
4482 | if (rc) { |
4483 | kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu); |
4484 | return rc; |
4485 | } |
4486 | goto retry; |
4487 | } |
4488 | |
4489 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { |
4490 | vcpu->arch.sie_block->ihcpu = 0xffff; |
4491 | goto retry; |
4492 | } |
4493 | |
4494 | if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { |
4495 | if (!ibs_enabled(vcpu)) { |
4496 | trace_kvm_s390_enable_disable_ibs(id: vcpu->vcpu_id, state: 1); |
4497 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS); |
4498 | } |
4499 | goto retry; |
4500 | } |
4501 | |
4502 | if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { |
4503 | if (ibs_enabled(vcpu)) { |
4504 | trace_kvm_s390_enable_disable_ibs(id: vcpu->vcpu_id, state: 0); |
4505 | kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS); |
4506 | } |
4507 | goto retry; |
4508 | } |
4509 | |
4510 | if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) { |
4511 | vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; |
4512 | goto retry; |
4513 | } |
4514 | |
4515 | if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) { |
4516 | /* |
4517 | * Disable CMM virtualization; we will emulate the ESSA |
4518 | * instruction manually, in order to provide additional |
4519 | * functionalities needed for live migration. |
4520 | */ |
4521 | vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA; |
4522 | goto retry; |
4523 | } |
4524 | |
4525 | if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) { |
4526 | /* |
4527 | * Re-enable CMM virtualization if CMMA is available and |
4528 | * CMM has been used. |
4529 | */ |
4530 | if ((vcpu->kvm->arch.use_cmma) && |
4531 | (vcpu->kvm->mm->context.uses_cmm)) |
4532 | vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; |
4533 | goto retry; |
4534 | } |
4535 | |
4536 | /* we left the vsie handler, nothing to do, just clear the request */ |
4537 | kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu); |
4538 | |
4539 | return 0; |
4540 | } |
4541 | |
4542 | static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) |
4543 | { |
4544 | struct kvm_vcpu *vcpu; |
4545 | union tod_clock clk; |
4546 | unsigned long i; |
4547 | |
4548 | preempt_disable(); |
4549 | |
4550 | store_tod_clock_ext(&clk); |
4551 | |
4552 | kvm->arch.epoch = gtod->tod - clk.tod; |
4553 | kvm->arch.epdx = 0; |
4554 | if (test_kvm_facility(kvm, nr: 139)) { |
4555 | kvm->arch.epdx = gtod->epoch_idx - clk.ei; |
4556 | if (kvm->arch.epoch > gtod->tod) |
4557 | kvm->arch.epdx -= 1; |
4558 | } |
4559 | |
4560 | kvm_s390_vcpu_block_all(kvm); |
4561 | kvm_for_each_vcpu(i, vcpu, kvm) { |
4562 | vcpu->arch.sie_block->epoch = kvm->arch.epoch; |
4563 | vcpu->arch.sie_block->epdx = kvm->arch.epdx; |
4564 | } |
4565 | |
4566 | kvm_s390_vcpu_unblock_all(kvm); |
4567 | preempt_enable(); |
4568 | } |
4569 | |
4570 | int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) |
4571 | { |
4572 | if (!mutex_trylock(lock: &kvm->lock)) |
4573 | return 0; |
4574 | __kvm_s390_set_tod_clock(kvm, gtod); |
4575 | mutex_unlock(lock: &kvm->lock); |
4576 | return 1; |
4577 | } |
4578 | |
4579 | /** |
4580 | * kvm_arch_fault_in_page - fault-in guest page if necessary |
4581 | * @vcpu: The corresponding virtual cpu |
4582 | * @gpa: Guest physical address |
4583 | * @writable: Whether the page should be writable or not |
4584 | * |
4585 | * Make sure that a guest page has been faulted-in on the host. |
4586 | * |
4587 | * Return: Zero on success, negative error code otherwise. |
4588 | */ |
4589 | long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable) |
4590 | { |
4591 | return gmap_fault(vcpu->arch.gmap, gpa, |
4592 | writable ? FAULT_FLAG_WRITE : 0); |
4593 | } |
4594 | |
4595 | static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, |
4596 | unsigned long token) |
4597 | { |
4598 | struct kvm_s390_interrupt inti; |
4599 | struct kvm_s390_irq irq; |
4600 | |
4601 | if (start_token) { |
4602 | irq.u.ext.ext_params2 = token; |
4603 | irq.type = KVM_S390_INT_PFAULT_INIT; |
4604 | WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq)); |
4605 | } else { |
4606 | inti.type = KVM_S390_INT_PFAULT_DONE; |
4607 | inti.parm64 = token; |
4608 | WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); |
4609 | } |
4610 | } |
4611 | |
4612 | bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, |
4613 | struct kvm_async_pf *work) |
4614 | { |
4615 | trace_kvm_s390_pfault_init(vcpu, pfault_token: work->arch.pfault_token); |
4616 | __kvm_inject_pfault_token(vcpu, start_token: true, token: work->arch.pfault_token); |
4617 | |
4618 | return true; |
4619 | } |
4620 | |
4621 | void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, |
4622 | struct kvm_async_pf *work) |
4623 | { |
4624 | trace_kvm_s390_pfault_done(vcpu, pfault_token: work->arch.pfault_token); |
4625 | __kvm_inject_pfault_token(vcpu, start_token: false, token: work->arch.pfault_token); |
4626 | } |
4627 | |
4628 | void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, |
4629 | struct kvm_async_pf *work) |
4630 | { |
4631 | /* s390 will always inject the page directly */ |
4632 | } |
4633 | |
4634 | bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) |
4635 | { |
4636 | /* |
4637 | * s390 will always inject the page directly, |
4638 | * but we still want check_async_completion to cleanup |
4639 | */ |
4640 | return true; |
4641 | } |
4642 | |
4643 | static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) |
4644 | { |
4645 | hva_t hva; |
4646 | struct kvm_arch_async_pf arch; |
4647 | |
4648 | if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) |
4649 | return false; |
4650 | if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != |
4651 | vcpu->arch.pfault_compare) |
4652 | return false; |
4653 | if (psw_extint_disabled(vcpu)) |
4654 | return false; |
4655 | if (kvm_s390_vcpu_has_irq(vcpu, exclude_stop: 0)) |
4656 | return false; |
4657 | if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) |
4658 | return false; |
4659 | if (!vcpu->arch.gmap->pfault_enabled) |
4660 | return false; |
4661 | |
4662 | hva = gfn_to_hva(kvm: vcpu->kvm, gfn: gpa_to_gfn(current->thread.gmap_addr)); |
4663 | hva += current->thread.gmap_addr & ~PAGE_MASK; |
4664 | if (read_guest_real(vcpu, gra: vcpu->arch.pfault_token, data: &arch.pfault_token, len: 8)) |
4665 | return false; |
4666 | |
4667 | return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, arch: &arch); |
4668 | } |
4669 | |
4670 | static int vcpu_pre_run(struct kvm_vcpu *vcpu) |
4671 | { |
4672 | int rc, cpuflags; |
4673 | |
4674 | /* |
4675 | * On s390 notifications for arriving pages will be delivered directly |
4676 | * to the guest but the house keeping for completed pfaults is |
4677 | * handled outside the worker. |
4678 | */ |
4679 | kvm_check_async_pf_completion(vcpu); |
4680 | |
4681 | vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14]; |
4682 | vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15]; |
4683 | |
4684 | if (need_resched()) |
4685 | schedule(); |
4686 | |
4687 | if (!kvm_is_ucontrol(kvm: vcpu->kvm)) { |
4688 | rc = kvm_s390_deliver_pending_interrupts(vcpu); |
4689 | if (rc || guestdbg_exit_pending(vcpu)) |
4690 | return rc; |
4691 | } |
4692 | |
4693 | rc = kvm_s390_handle_requests(vcpu); |
4694 | if (rc) |
4695 | return rc; |
4696 | |
4697 | if (guestdbg_enabled(vcpu)) { |
4698 | kvm_s390_backup_guest_per_regs(vcpu); |
4699 | kvm_s390_patch_guest_per_regs(vcpu); |
4700 | } |
4701 | |
4702 | clear_bit(nr: vcpu->vcpu_idx, addr: vcpu->kvm->arch.gisa_int.kicked_mask); |
4703 | |
4704 | vcpu->arch.sie_block->icptcode = 0; |
4705 | cpuflags = atomic_read(v: &vcpu->arch.sie_block->cpuflags); |
4706 | VCPU_EVENT(vcpu, 6, "entering sie flags %x" , cpuflags); |
4707 | trace_kvm_s390_sie_enter(vcpu, cpuflags); |
4708 | |
4709 | return 0; |
4710 | } |
4711 | |
4712 | static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) |
4713 | { |
4714 | struct kvm_s390_pgm_info pgm_info = { |
4715 | .code = PGM_ADDRESSING, |
4716 | }; |
4717 | u8 opcode, ilen; |
4718 | int rc; |
4719 | |
4720 | VCPU_EVENT(vcpu, 3, "%s" , "fault in sie instruction" ); |
4721 | trace_kvm_s390_sie_fault(vcpu); |
4722 | |
4723 | /* |
4724 | * We want to inject an addressing exception, which is defined as a |
4725 | * suppressing or terminating exception. However, since we came here |
4726 | * by a DAT access exception, the PSW still points to the faulting |
4727 | * instruction since DAT exceptions are nullifying. So we've got |
4728 | * to look up the current opcode to get the length of the instruction |
4729 | * to be able to forward the PSW. |
4730 | */ |
4731 | rc = read_guest_instr(vcpu, ga: vcpu->arch.sie_block->gpsw.addr, data: &opcode, len: 1); |
4732 | ilen = insn_length(opcode); |
4733 | if (rc < 0) { |
4734 | return rc; |
4735 | } else if (rc) { |
4736 | /* Instruction-Fetching Exceptions - we can't detect the ilen. |
4737 | * Forward by arbitrary ilc, injection will take care of |
4738 | * nullification if necessary. |
4739 | */ |
4740 | pgm_info = vcpu->arch.pgm; |
4741 | ilen = 4; |
4742 | } |
4743 | pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID; |
4744 | kvm_s390_forward_psw(vcpu, ilen); |
4745 | return kvm_s390_inject_prog_irq(vcpu, pgm_info: &pgm_info); |
4746 | } |
4747 | |
4748 | static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) |
4749 | { |
4750 | struct mcck_volatile_info *mcck_info; |
4751 | struct sie_page *sie_page; |
4752 | |
4753 | VCPU_EVENT(vcpu, 6, "exit sie icptcode %d" , |
4754 | vcpu->arch.sie_block->icptcode); |
4755 | trace_kvm_s390_sie_exit(vcpu, icptcode: vcpu->arch.sie_block->icptcode); |
4756 | |
4757 | if (guestdbg_enabled(vcpu)) |
4758 | kvm_s390_restore_guest_per_regs(vcpu); |
4759 | |
4760 | vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14; |
4761 | vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15; |
4762 | |
4763 | if (exit_reason == -EINTR) { |
4764 | VCPU_EVENT(vcpu, 3, "%s" , "machine check" ); |
4765 | sie_page = container_of(vcpu->arch.sie_block, |
4766 | struct sie_page, sie_block); |
4767 | mcck_info = &sie_page->mcck_info; |
4768 | kvm_s390_reinject_machine_check(vcpu, mcck_info); |
4769 | return 0; |
4770 | } |
4771 | |
4772 | if (vcpu->arch.sie_block->icptcode > 0) { |
4773 | int rc = kvm_handle_sie_intercept(vcpu); |
4774 | |
4775 | if (rc != -EOPNOTSUPP) |
4776 | return rc; |
4777 | vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC; |
4778 | vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; |
4779 | vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; |
4780 | vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; |
4781 | return -EREMOTE; |
4782 | } else if (exit_reason != -EFAULT) { |
4783 | vcpu->stat.exit_null++; |
4784 | return 0; |
4785 | } else if (kvm_is_ucontrol(kvm: vcpu->kvm)) { |
4786 | vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; |
4787 | vcpu->run->s390_ucontrol.trans_exc_code = |
4788 | current->thread.gmap_addr; |
4789 | vcpu->run->s390_ucontrol.pgm_code = 0x10; |
4790 | return -EREMOTE; |
4791 | } else if (current->thread.gmap_pfault) { |
4792 | trace_kvm_s390_major_guest_pfault(vcpu); |
4793 | current->thread.gmap_pfault = 0; |
4794 | if (kvm_arch_setup_async_pf(vcpu)) |
4795 | return 0; |
4796 | vcpu->stat.pfault_sync++; |
4797 | return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, writable: 1); |
4798 | } |
4799 | return vcpu_post_run_fault_in_sie(vcpu); |
4800 | } |
4801 | |
4802 | #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK) |
4803 | static int __vcpu_run(struct kvm_vcpu *vcpu) |
4804 | { |
4805 | int rc, exit_reason; |
4806 | struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block; |
4807 | |
4808 | /* |
4809 | * We try to hold kvm->srcu during most of vcpu_run (except when run- |
4810 | * ning the guest), so that memslots (and other stuff) are protected |
4811 | */ |
4812 | kvm_vcpu_srcu_read_lock(vcpu); |
4813 | |
4814 | do { |
4815 | rc = vcpu_pre_run(vcpu); |
4816 | if (rc || guestdbg_exit_pending(vcpu)) |
4817 | break; |
4818 | |
4819 | kvm_vcpu_srcu_read_unlock(vcpu); |
4820 | /* |
4821 | * As PF_VCPU will be used in fault handler, between |
4822 | * guest_enter and guest_exit should be no uaccess. |
4823 | */ |
4824 | local_irq_disable(); |
4825 | guest_enter_irqoff(); |
4826 | __disable_cpu_timer_accounting(vcpu); |
4827 | local_irq_enable(); |
4828 | if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
4829 | memcpy(sie_page->pv_grregs, |
4830 | vcpu->run->s.regs.gprs, |
4831 | sizeof(sie_page->pv_grregs)); |
4832 | } |
4833 | exit_reason = sie64a(vcpu->arch.sie_block, |
4834 | vcpu->run->s.regs.gprs); |
4835 | if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
4836 | memcpy(vcpu->run->s.regs.gprs, |
4837 | sie_page->pv_grregs, |
4838 | sizeof(sie_page->pv_grregs)); |
4839 | /* |
4840 | * We're not allowed to inject interrupts on intercepts |
4841 | * that leave the guest state in an "in-between" state |
4842 | * where the next SIE entry will do a continuation. |
4843 | * Fence interrupts in our "internal" PSW. |
4844 | */ |
4845 | if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR || |
4846 | vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) { |
4847 | vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; |
4848 | } |
4849 | } |
4850 | local_irq_disable(); |
4851 | __enable_cpu_timer_accounting(vcpu); |
4852 | guest_exit_irqoff(); |
4853 | local_irq_enable(); |
4854 | kvm_vcpu_srcu_read_lock(vcpu); |
4855 | |
4856 | rc = vcpu_post_run(vcpu, exit_reason); |
4857 | } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); |
4858 | |
4859 | kvm_vcpu_srcu_read_unlock(vcpu); |
4860 | return rc; |
4861 | } |
4862 | |
4863 | static void sync_regs_fmt2(struct kvm_vcpu *vcpu) |
4864 | { |
4865 | struct kvm_run *kvm_run = vcpu->run; |
4866 | struct runtime_instr_cb *riccb; |
4867 | struct gs_cb *gscb; |
4868 | |
4869 | riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; |
4870 | gscb = (struct gs_cb *) &kvm_run->s.regs.gscb; |
4871 | vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; |
4872 | vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; |
4873 | if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { |
4874 | vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; |
4875 | vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; |
4876 | vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; |
4877 | } |
4878 | if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { |
4879 | vcpu->arch.pfault_token = kvm_run->s.regs.pft; |
4880 | vcpu->arch.pfault_select = kvm_run->s.regs.pfs; |
4881 | vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; |
4882 | if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) |
4883 | kvm_clear_async_pf_completion_queue(vcpu); |
4884 | } |
4885 | if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) { |
4886 | vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318; |
4887 | vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc; |
4888 | VCPU_EVENT(vcpu, 3, "setting cpnc to %d" , vcpu->arch.diag318_info.cpnc); |
4889 | } |
4890 | /* |
4891 | * If userspace sets the riccb (e.g. after migration) to a valid state, |
4892 | * we should enable RI here instead of doing the lazy enablement. |
4893 | */ |
4894 | if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) && |
4895 | test_kvm_facility(vcpu->kvm, 64) && |
4896 | riccb->v && |
4897 | !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) { |
4898 | VCPU_EVENT(vcpu, 3, "%s" , "ENABLE: RI (sync_regs)" ); |
4899 | vcpu->arch.sie_block->ecb3 |= ECB3_RI; |
4900 | } |
4901 | /* |
4902 | * If userspace sets the gscb (e.g. after migration) to non-zero, |
4903 | * we should enable GS here instead of doing the lazy enablement. |
4904 | */ |
4905 | if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) && |
4906 | test_kvm_facility(vcpu->kvm, 133) && |
4907 | gscb->gssm && |
4908 | !vcpu->arch.gs_enabled) { |
4909 | VCPU_EVENT(vcpu, 3, "%s" , "ENABLE: GS (sync_regs)" ); |
4910 | vcpu->arch.sie_block->ecb |= ECB_GS; |
4911 | vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; |
4912 | vcpu->arch.gs_enabled = 1; |
4913 | } |
4914 | if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) && |
4915 | test_kvm_facility(vcpu->kvm, 82)) { |
4916 | vcpu->arch.sie_block->fpf &= ~FPF_BPBC; |
4917 | vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; |
4918 | } |
4919 | if (MACHINE_HAS_GS) { |
4920 | preempt_disable(); |
4921 | local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT); |
4922 | if (current->thread.gs_cb) { |
4923 | vcpu->arch.host_gscb = current->thread.gs_cb; |
4924 | save_gs_cb(vcpu->arch.host_gscb); |
4925 | } |
4926 | if (vcpu->arch.gs_enabled) { |
4927 | current->thread.gs_cb = (struct gs_cb *) |
4928 | &vcpu->run->s.regs.gscb; |
4929 | restore_gs_cb(current->thread.gs_cb); |
4930 | } |
4931 | preempt_enable(); |
4932 | } |
4933 | /* SIE will load etoken directly from SDNX and therefore kvm_run */ |
4934 | } |
4935 | |
4936 | static void sync_regs(struct kvm_vcpu *vcpu) |
4937 | { |
4938 | struct kvm_run *kvm_run = vcpu->run; |
4939 | |
4940 | if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) |
4941 | kvm_s390_set_prefix(vcpu, prefix: kvm_run->s.regs.prefix); |
4942 | if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { |
4943 | memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); |
4944 | /* some control register changes require a tlb flush */ |
4945 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
4946 | } |
4947 | if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { |
4948 | kvm_s390_set_cpu_timer(vcpu, cputm: kvm_run->s.regs.cputm); |
4949 | vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; |
4950 | } |
4951 | save_access_regs(vcpu->arch.host_acrs); |
4952 | restore_access_regs(vcpu->run->s.regs.acrs); |
4953 | vcpu->arch.acrs_loaded = true; |
4954 | kvm_s390_fpu_load(run: vcpu->run); |
4955 | /* Sync fmt2 only data */ |
4956 | if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) { |
4957 | sync_regs_fmt2(vcpu); |
4958 | } else { |
4959 | /* |
4960 | * In several places we have to modify our internal view to |
4961 | * not do things that are disallowed by the ultravisor. For |
4962 | * example we must not inject interrupts after specific exits |
4963 | * (e.g. 112 prefix page not secure). We do this by turning |
4964 | * off the machine check, external and I/O interrupt bits |
4965 | * of our PSW copy. To avoid getting validity intercepts, we |
4966 | * do only accept the condition code from userspace. |
4967 | */ |
4968 | vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC; |
4969 | vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask & |
4970 | PSW_MASK_CC; |
4971 | } |
4972 | |
4973 | kvm_run->kvm_dirty_regs = 0; |
4974 | } |
4975 | |
4976 | static void store_regs_fmt2(struct kvm_vcpu *vcpu) |
4977 | { |
4978 | struct kvm_run *kvm_run = vcpu->run; |
4979 | |
4980 | kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; |
4981 | kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; |
4982 | kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; |
4983 | kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; |
4984 | kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val; |
4985 | if (MACHINE_HAS_GS) { |
4986 | preempt_disable(); |
4987 | local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT); |
4988 | if (vcpu->arch.gs_enabled) |
4989 | save_gs_cb(current->thread.gs_cb); |
4990 | current->thread.gs_cb = vcpu->arch.host_gscb; |
4991 | restore_gs_cb(vcpu->arch.host_gscb); |
4992 | if (!vcpu->arch.host_gscb) |
4993 | local_ctl_clear_bit(2, CR2_GUARDED_STORAGE_BIT); |
4994 | vcpu->arch.host_gscb = NULL; |
4995 | preempt_enable(); |
4996 | } |
4997 | /* SIE will save etoken directly into SDNX and therefore kvm_run */ |
4998 | } |
4999 | |
5000 | static void store_regs(struct kvm_vcpu *vcpu) |
5001 | { |
5002 | struct kvm_run *kvm_run = vcpu->run; |
5003 | |
5004 | kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; |
5005 | kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; |
5006 | kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); |
5007 | memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); |
5008 | kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu); |
5009 | kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; |
5010 | kvm_run->s.regs.pft = vcpu->arch.pfault_token; |
5011 | kvm_run->s.regs.pfs = vcpu->arch.pfault_select; |
5012 | kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; |
5013 | save_access_regs(vcpu->run->s.regs.acrs); |
5014 | restore_access_regs(vcpu->arch.host_acrs); |
5015 | vcpu->arch.acrs_loaded = false; |
5016 | kvm_s390_fpu_store(run: vcpu->run); |
5017 | if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) |
5018 | store_regs_fmt2(vcpu); |
5019 | } |
5020 | |
5021 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) |
5022 | { |
5023 | struct kvm_run *kvm_run = vcpu->run; |
5024 | DECLARE_KERNEL_FPU_ONSTACK32(fpu); |
5025 | int rc; |
5026 | |
5027 | /* |
5028 | * Running a VM while dumping always has the potential to |
5029 | * produce inconsistent dump data. But for PV vcpus a SIE |
5030 | * entry while dumping could also lead to a fatal validity |
5031 | * intercept which we absolutely want to avoid. |
5032 | */ |
5033 | if (vcpu->kvm->arch.pv.dumping) |
5034 | return -EINVAL; |
5035 | |
5036 | if (kvm_run->immediate_exit) |
5037 | return -EINTR; |
5038 | |
5039 | if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS || |
5040 | kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS) |
5041 | return -EINVAL; |
5042 | |
5043 | vcpu_load(vcpu); |
5044 | |
5045 | if (guestdbg_exit_pending(vcpu)) { |
5046 | kvm_s390_prepare_debug_exit(vcpu); |
5047 | rc = 0; |
5048 | goto out; |
5049 | } |
5050 | |
5051 | kvm_sigset_activate(vcpu); |
5052 | |
5053 | /* |
5054 | * no need to check the return value of vcpu_start as it can only have |
5055 | * an error for protvirt, but protvirt means user cpu state |
5056 | */ |
5057 | if (!kvm_s390_user_cpu_state_ctrl(kvm: vcpu->kvm)) { |
5058 | kvm_s390_vcpu_start(vcpu); |
5059 | } else if (is_vcpu_stopped(vcpu)) { |
5060 | pr_err_ratelimited("can't run stopped vcpu %d\n" , |
5061 | vcpu->vcpu_id); |
5062 | rc = -EINVAL; |
5063 | goto out; |
5064 | } |
5065 | |
5066 | kernel_fpu_begin(&fpu, KERNEL_FPC | KERNEL_VXR); |
5067 | sync_regs(vcpu); |
5068 | enable_cpu_timer_accounting(vcpu); |
5069 | |
5070 | might_fault(); |
5071 | rc = __vcpu_run(vcpu); |
5072 | |
5073 | if (signal_pending(current) && !rc) { |
5074 | kvm_run->exit_reason = KVM_EXIT_INTR; |
5075 | rc = -EINTR; |
5076 | } |
5077 | |
5078 | if (guestdbg_exit_pending(vcpu) && !rc) { |
5079 | kvm_s390_prepare_debug_exit(vcpu); |
5080 | rc = 0; |
5081 | } |
5082 | |
5083 | if (rc == -EREMOTE) { |
5084 | /* userspace support is needed, kvm_run has been prepared */ |
5085 | rc = 0; |
5086 | } |
5087 | |
5088 | disable_cpu_timer_accounting(vcpu); |
5089 | store_regs(vcpu); |
5090 | kernel_fpu_end(&fpu, KERNEL_FPC | KERNEL_VXR); |
5091 | |
5092 | kvm_sigset_deactivate(vcpu); |
5093 | |
5094 | vcpu->stat.exit_userspace++; |
5095 | out: |
5096 | vcpu_put(vcpu); |
5097 | return rc; |
5098 | } |
5099 | |
5100 | /* |
5101 | * store status at address |
5102 | * we use have two special cases: |
5103 | * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit |
5104 | * KVM_S390_STORE_STATUS_PREFIXED: -> prefix |
5105 | */ |
5106 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) |
5107 | { |
5108 | unsigned char archmode = 1; |
5109 | freg_t fprs[NUM_FPRS]; |
5110 | unsigned int px; |
5111 | u64 clkcomp, cputm; |
5112 | int rc; |
5113 | |
5114 | px = kvm_s390_get_prefix(vcpu); |
5115 | if (gpa == KVM_S390_STORE_STATUS_NOADDR) { |
5116 | if (write_guest_abs(vcpu, gpa: 163, data: &archmode, len: 1)) |
5117 | return -EFAULT; |
5118 | gpa = 0; |
5119 | } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) { |
5120 | if (write_guest_real(vcpu, gra: 163, data: &archmode, len: 1)) |
5121 | return -EFAULT; |
5122 | gpa = px; |
5123 | } else |
5124 | gpa -= __LC_FPREGS_SAVE_AREA; |
5125 | |
5126 | /* manually convert vector registers if necessary */ |
5127 | if (cpu_has_vx()) { |
5128 | convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs); |
5129 | rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, |
5130 | fprs, 128); |
5131 | } else { |
5132 | rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, |
5133 | vcpu->run->s.regs.fprs, 128); |
5134 | } |
5135 | rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA, |
5136 | vcpu->run->s.regs.gprs, 128); |
5137 | rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA, |
5138 | &vcpu->arch.sie_block->gpsw, 16); |
5139 | rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA, |
5140 | &px, 4); |
5141 | rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA, |
5142 | &vcpu->run->s.regs.fpc, 4); |
5143 | rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA, |
5144 | &vcpu->arch.sie_block->todpr, 4); |
5145 | cputm = kvm_s390_get_cpu_timer(vcpu); |
5146 | rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA, |
5147 | &cputm, 8); |
5148 | clkcomp = vcpu->arch.sie_block->ckc >> 8; |
5149 | rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA, |
5150 | &clkcomp, 8); |
5151 | rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA, |
5152 | &vcpu->run->s.regs.acrs, 64); |
5153 | rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA, |
5154 | &vcpu->arch.sie_block->gcr, 128); |
5155 | return rc ? -EFAULT : 0; |
5156 | } |
5157 | |
5158 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) |
5159 | { |
5160 | /* |
5161 | * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy |
5162 | * switch in the run ioctl. Let's update our copies before we save |
5163 | * it into the save area |
5164 | */ |
5165 | kvm_s390_fpu_store(run: vcpu->run); |
5166 | save_access_regs(vcpu->run->s.regs.acrs); |
5167 | |
5168 | return kvm_s390_store_status_unloaded(vcpu, gpa: addr); |
5169 | } |
5170 | |
5171 | static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) |
5172 | { |
5173 | kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); |
5174 | kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu); |
5175 | } |
5176 | |
5177 | static void __disable_ibs_on_all_vcpus(struct kvm *kvm) |
5178 | { |
5179 | unsigned long i; |
5180 | struct kvm_vcpu *vcpu; |
5181 | |
5182 | kvm_for_each_vcpu(i, vcpu, kvm) { |
5183 | __disable_ibs_on_vcpu(vcpu); |
5184 | } |
5185 | } |
5186 | |
5187 | static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) |
5188 | { |
5189 | if (!sclp.has_ibs) |
5190 | return; |
5191 | kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); |
5192 | kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu); |
5193 | } |
5194 | |
5195 | int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) |
5196 | { |
5197 | int i, online_vcpus, r = 0, started_vcpus = 0; |
5198 | |
5199 | if (!is_vcpu_stopped(vcpu)) |
5200 | return 0; |
5201 | |
5202 | trace_kvm_s390_vcpu_start_stop(id: vcpu->vcpu_id, state: 1); |
5203 | /* Only one cpu at a time may enter/leave the STOPPED state. */ |
5204 | spin_lock(lock: &vcpu->kvm->arch.start_stop_lock); |
5205 | online_vcpus = atomic_read(v: &vcpu->kvm->online_vcpus); |
5206 | |
5207 | /* Let's tell the UV that we want to change into the operating state */ |
5208 | if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
5209 | r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR); |
5210 | if (r) { |
5211 | spin_unlock(lock: &vcpu->kvm->arch.start_stop_lock); |
5212 | return r; |
5213 | } |
5214 | } |
5215 | |
5216 | for (i = 0; i < online_vcpus; i++) { |
5217 | if (!is_vcpu_stopped(vcpu: kvm_get_vcpu(kvm: vcpu->kvm, i))) |
5218 | started_vcpus++; |
5219 | } |
5220 | |
5221 | if (started_vcpus == 0) { |
5222 | /* we're the only active VCPU -> speed it up */ |
5223 | __enable_ibs_on_vcpu(vcpu); |
5224 | } else if (started_vcpus == 1) { |
5225 | /* |
5226 | * As we are starting a second VCPU, we have to disable |
5227 | * the IBS facility on all VCPUs to remove potentially |
5228 | * outstanding ENABLE requests. |
5229 | */ |
5230 | __disable_ibs_on_all_vcpus(kvm: vcpu->kvm); |
5231 | } |
5232 | |
5233 | kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED); |
5234 | /* |
5235 | * The real PSW might have changed due to a RESTART interpreted by the |
5236 | * ultravisor. We block all interrupts and let the next sie exit |
5237 | * refresh our view. |
5238 | */ |
5239 | if (kvm_s390_pv_cpu_is_protected(vcpu)) |
5240 | vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; |
5241 | /* |
5242 | * Another VCPU might have used IBS while we were offline. |
5243 | * Let's play safe and flush the VCPU at startup. |
5244 | */ |
5245 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
5246 | spin_unlock(lock: &vcpu->kvm->arch.start_stop_lock); |
5247 | return 0; |
5248 | } |
5249 | |
5250 | int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) |
5251 | { |
5252 | int i, online_vcpus, r = 0, started_vcpus = 0; |
5253 | struct kvm_vcpu *started_vcpu = NULL; |
5254 | |
5255 | if (is_vcpu_stopped(vcpu)) |
5256 | return 0; |
5257 | |
5258 | trace_kvm_s390_vcpu_start_stop(id: vcpu->vcpu_id, state: 0); |
5259 | /* Only one cpu at a time may enter/leave the STOPPED state. */ |
5260 | spin_lock(lock: &vcpu->kvm->arch.start_stop_lock); |
5261 | online_vcpus = atomic_read(v: &vcpu->kvm->online_vcpus); |
5262 | |
5263 | /* Let's tell the UV that we want to change into the stopped state */ |
5264 | if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
5265 | r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP); |
5266 | if (r) { |
5267 | spin_unlock(lock: &vcpu->kvm->arch.start_stop_lock); |
5268 | return r; |
5269 | } |
5270 | } |
5271 | |
5272 | /* |
5273 | * Set the VCPU to STOPPED and THEN clear the interrupt flag, |
5274 | * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders |
5275 | * have been fully processed. This will ensure that the VCPU |
5276 | * is kept BUSY if another VCPU is inquiring with SIGP SENSE. |
5277 | */ |
5278 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED); |
5279 | kvm_s390_clear_stop_irq(vcpu); |
5280 | |
5281 | __disable_ibs_on_vcpu(vcpu); |
5282 | |
5283 | for (i = 0; i < online_vcpus; i++) { |
5284 | struct kvm_vcpu *tmp = kvm_get_vcpu(kvm: vcpu->kvm, i); |
5285 | |
5286 | if (!is_vcpu_stopped(vcpu: tmp)) { |
5287 | started_vcpus++; |
5288 | started_vcpu = tmp; |
5289 | } |
5290 | } |
5291 | |
5292 | if (started_vcpus == 1) { |
5293 | /* |
5294 | * As we only have one VCPU left, we want to enable the |
5295 | * IBS facility for that VCPU to speed it up. |
5296 | */ |
5297 | __enable_ibs_on_vcpu(vcpu: started_vcpu); |
5298 | } |
5299 | |
5300 | spin_unlock(lock: &vcpu->kvm->arch.start_stop_lock); |
5301 | return 0; |
5302 | } |
5303 | |
5304 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
5305 | struct kvm_enable_cap *cap) |
5306 | { |
5307 | int r; |
5308 | |
5309 | if (cap->flags) |
5310 | return -EINVAL; |
5311 | |
5312 | switch (cap->cap) { |
5313 | case KVM_CAP_S390_CSS_SUPPORT: |
5314 | if (!vcpu->kvm->arch.css_support) { |
5315 | vcpu->kvm->arch.css_support = 1; |
5316 | VM_EVENT(vcpu->kvm, 3, "%s" , "ENABLE: CSS support" ); |
5317 | trace_kvm_s390_enable_css(kvm: vcpu->kvm); |
5318 | } |
5319 | r = 0; |
5320 | break; |
5321 | default: |
5322 | r = -EINVAL; |
5323 | break; |
5324 | } |
5325 | return r; |
5326 | } |
5327 | |
5328 | static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu, |
5329 | struct kvm_s390_mem_op *mop) |
5330 | { |
5331 | void __user *uaddr = (void __user *)mop->buf; |
5332 | void *sida_addr; |
5333 | int r = 0; |
5334 | |
5335 | if (mop->flags || !mop->size) |
5336 | return -EINVAL; |
5337 | if (mop->size + mop->sida_offset < mop->size) |
5338 | return -EINVAL; |
5339 | if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) |
5340 | return -E2BIG; |
5341 | if (!kvm_s390_pv_cpu_is_protected(vcpu)) |
5342 | return -EINVAL; |
5343 | |
5344 | sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset; |
5345 | |
5346 | switch (mop->op) { |
5347 | case KVM_S390_MEMOP_SIDA_READ: |
5348 | if (copy_to_user(to: uaddr, from: sida_addr, n: mop->size)) |
5349 | r = -EFAULT; |
5350 | |
5351 | break; |
5352 | case KVM_S390_MEMOP_SIDA_WRITE: |
5353 | if (copy_from_user(to: sida_addr, from: uaddr, n: mop->size)) |
5354 | r = -EFAULT; |
5355 | break; |
5356 | } |
5357 | return r; |
5358 | } |
5359 | |
5360 | static long kvm_s390_vcpu_mem_op(struct kvm_vcpu *vcpu, |
5361 | struct kvm_s390_mem_op *mop) |
5362 | { |
5363 | void __user *uaddr = (void __user *)mop->buf; |
5364 | enum gacc_mode acc_mode; |
5365 | void *tmpbuf = NULL; |
5366 | int r; |
5367 | |
5368 | r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_INJECT_EXCEPTION | |
5369 | KVM_S390_MEMOP_F_CHECK_ONLY | |
5370 | KVM_S390_MEMOP_F_SKEY_PROTECTION); |
5371 | if (r) |
5372 | return r; |
5373 | if (mop->ar >= NUM_ACRS) |
5374 | return -EINVAL; |
5375 | if (kvm_s390_pv_cpu_is_protected(vcpu)) |
5376 | return -EINVAL; |
5377 | if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { |
5378 | tmpbuf = vmalloc(size: mop->size); |
5379 | if (!tmpbuf) |
5380 | return -ENOMEM; |
5381 | } |
5382 | |
5383 | acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE; |
5384 | if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { |
5385 | r = check_gva_range(vcpu, gva: mop->gaddr, ar: mop->ar, length: mop->size, |
5386 | mode: acc_mode, access_key: mop->key); |
5387 | goto out_inject; |
5388 | } |
5389 | if (acc_mode == GACC_FETCH) { |
5390 | r = read_guest_with_key(vcpu, ga: mop->gaddr, ar: mop->ar, data: tmpbuf, |
5391 | len: mop->size, access_key: mop->key); |
5392 | if (r) |
5393 | goto out_inject; |
5394 | if (copy_to_user(to: uaddr, from: tmpbuf, n: mop->size)) { |
5395 | r = -EFAULT; |
5396 | goto out_free; |
5397 | } |
5398 | } else { |
5399 | if (copy_from_user(to: tmpbuf, from: uaddr, n: mop->size)) { |
5400 | r = -EFAULT; |
5401 | goto out_free; |
5402 | } |
5403 | r = write_guest_with_key(vcpu, ga: mop->gaddr, ar: mop->ar, data: tmpbuf, |
5404 | len: mop->size, access_key: mop->key); |
5405 | } |
5406 | |
5407 | out_inject: |
5408 | if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) |
5409 | kvm_s390_inject_prog_irq(vcpu, pgm_info: &vcpu->arch.pgm); |
5410 | |
5411 | out_free: |
5412 | vfree(addr: tmpbuf); |
5413 | return r; |
5414 | } |
5415 | |
5416 | static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu, |
5417 | struct kvm_s390_mem_op *mop) |
5418 | { |
5419 | int r, srcu_idx; |
5420 | |
5421 | srcu_idx = srcu_read_lock(ssp: &vcpu->kvm->srcu); |
5422 | |
5423 | switch (mop->op) { |
5424 | case KVM_S390_MEMOP_LOGICAL_READ: |
5425 | case KVM_S390_MEMOP_LOGICAL_WRITE: |
5426 | r = kvm_s390_vcpu_mem_op(vcpu, mop); |
5427 | break; |
5428 | case KVM_S390_MEMOP_SIDA_READ: |
5429 | case KVM_S390_MEMOP_SIDA_WRITE: |
5430 | /* we are locked against sida going away by the vcpu->mutex */ |
5431 | r = kvm_s390_vcpu_sida_op(vcpu, mop); |
5432 | break; |
5433 | default: |
5434 | r = -EINVAL; |
5435 | } |
5436 | |
5437 | srcu_read_unlock(ssp: &vcpu->kvm->srcu, idx: srcu_idx); |
5438 | return r; |
5439 | } |
5440 | |
5441 | long kvm_arch_vcpu_async_ioctl(struct file *filp, |
5442 | unsigned int ioctl, unsigned long arg) |
5443 | { |
5444 | struct kvm_vcpu *vcpu = filp->private_data; |
5445 | void __user *argp = (void __user *)arg; |
5446 | int rc; |
5447 | |
5448 | switch (ioctl) { |
5449 | case KVM_S390_IRQ: { |
5450 | struct kvm_s390_irq s390irq; |
5451 | |
5452 | if (copy_from_user(to: &s390irq, from: argp, n: sizeof(s390irq))) |
5453 | return -EFAULT; |
5454 | rc = kvm_s390_inject_vcpu(vcpu, irq: &s390irq); |
5455 | break; |
5456 | } |
5457 | case KVM_S390_INTERRUPT: { |
5458 | struct kvm_s390_interrupt s390int; |
5459 | struct kvm_s390_irq s390irq = {}; |
5460 | |
5461 | if (copy_from_user(to: &s390int, from: argp, n: sizeof(s390int))) |
5462 | return -EFAULT; |
5463 | if (s390int_to_s390irq(s390int: &s390int, s390irq: &s390irq)) |
5464 | return -EINVAL; |
5465 | rc = kvm_s390_inject_vcpu(vcpu, irq: &s390irq); |
5466 | break; |
5467 | } |
5468 | default: |
5469 | rc = -ENOIOCTLCMD; |
5470 | break; |
5471 | } |
5472 | |
5473 | /* |
5474 | * To simplify single stepping of userspace-emulated instructions, |
5475 | * KVM_EXIT_S390_SIEIC exit sets KVM_GUESTDBG_EXIT_PENDING (see |
5476 | * should_handle_per_ifetch()). However, if userspace emulation injects |
5477 | * an interrupt, it needs to be cleared, so that KVM_EXIT_DEBUG happens |
5478 | * after (and not before) the interrupt delivery. |
5479 | */ |
5480 | if (!rc) |
5481 | vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING; |
5482 | |
5483 | return rc; |
5484 | } |
5485 | |
5486 | static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu, |
5487 | struct kvm_pv_cmd *cmd) |
5488 | { |
5489 | struct kvm_s390_pv_dmp dmp; |
5490 | void *data; |
5491 | int ret; |
5492 | |
5493 | /* Dump initialization is a prerequisite */ |
5494 | if (!vcpu->kvm->arch.pv.dumping) |
5495 | return -EINVAL; |
5496 | |
5497 | if (copy_from_user(to: &dmp, from: (__u8 __user *)cmd->data, n: sizeof(dmp))) |
5498 | return -EFAULT; |
5499 | |
5500 | /* We only handle this subcmd right now */ |
5501 | if (dmp.subcmd != KVM_PV_DUMP_CPU) |
5502 | return -EINVAL; |
5503 | |
5504 | /* CPU dump length is the same as create cpu storage donation. */ |
5505 | if (dmp.buff_len != uv_info.guest_cpu_stor_len) |
5506 | return -EINVAL; |
5507 | |
5508 | data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL); |
5509 | if (!data) |
5510 | return -ENOMEM; |
5511 | |
5512 | ret = kvm_s390_pv_dump_cpu(vcpu, buff: data, rc: &cmd->rc, rrc: &cmd->rrc); |
5513 | |
5514 | VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x" , |
5515 | vcpu->vcpu_id, cmd->rc, cmd->rrc); |
5516 | |
5517 | if (ret) |
5518 | ret = -EINVAL; |
5519 | |
5520 | /* On success copy over the dump data */ |
5521 | if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len)) |
5522 | ret = -EFAULT; |
5523 | |
5524 | kvfree(addr: data); |
5525 | return ret; |
5526 | } |
5527 | |
5528 | long kvm_arch_vcpu_ioctl(struct file *filp, |
5529 | unsigned int ioctl, unsigned long arg) |
5530 | { |
5531 | struct kvm_vcpu *vcpu = filp->private_data; |
5532 | void __user *argp = (void __user *)arg; |
5533 | int idx; |
5534 | long r; |
5535 | u16 rc, rrc; |
5536 | |
5537 | vcpu_load(vcpu); |
5538 | |
5539 | switch (ioctl) { |
5540 | case KVM_S390_STORE_STATUS: |
5541 | idx = srcu_read_lock(ssp: &vcpu->kvm->srcu); |
5542 | r = kvm_s390_store_status_unloaded(vcpu, gpa: arg); |
5543 | srcu_read_unlock(ssp: &vcpu->kvm->srcu, idx); |
5544 | break; |
5545 | case KVM_S390_SET_INITIAL_PSW: { |
5546 | psw_t psw; |
5547 | |
5548 | r = -EFAULT; |
5549 | if (copy_from_user(&psw, argp, sizeof(psw))) |
5550 | break; |
5551 | r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); |
5552 | break; |
5553 | } |
5554 | case KVM_S390_CLEAR_RESET: |
5555 | r = 0; |
5556 | kvm_arch_vcpu_ioctl_clear_reset(vcpu); |
5557 | if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
5558 | r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), |
5559 | UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc); |
5560 | VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x" , |
5561 | rc, rrc); |
5562 | } |
5563 | break; |
5564 | case KVM_S390_INITIAL_RESET: |
5565 | r = 0; |
5566 | kvm_arch_vcpu_ioctl_initial_reset(vcpu); |
5567 | if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
5568 | r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), |
5569 | UVC_CMD_CPU_RESET_INITIAL, |
5570 | &rc, &rrc); |
5571 | VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x" , |
5572 | rc, rrc); |
5573 | } |
5574 | break; |
5575 | case KVM_S390_NORMAL_RESET: |
5576 | r = 0; |
5577 | kvm_arch_vcpu_ioctl_normal_reset(vcpu); |
5578 | if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
5579 | r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), |
5580 | UVC_CMD_CPU_RESET, &rc, &rrc); |
5581 | VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x" , |
5582 | rc, rrc); |
5583 | } |
5584 | break; |
5585 | case KVM_SET_ONE_REG: |
5586 | case KVM_GET_ONE_REG: { |
5587 | struct kvm_one_reg reg; |
5588 | r = -EINVAL; |
5589 | if (kvm_s390_pv_cpu_is_protected(vcpu)) |
5590 | break; |
5591 | r = -EFAULT; |
5592 | if (copy_from_user(to: ®, from: argp, n: sizeof(reg))) |
5593 | break; |
5594 | if (ioctl == KVM_SET_ONE_REG) |
5595 | r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, reg: ®); |
5596 | else |
5597 | r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, reg: ®); |
5598 | break; |
5599 | } |
5600 | #ifdef CONFIG_KVM_S390_UCONTROL |
5601 | case KVM_S390_UCAS_MAP: { |
5602 | struct kvm_s390_ucas_mapping ucasmap; |
5603 | |
5604 | if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { |
5605 | r = -EFAULT; |
5606 | break; |
5607 | } |
5608 | |
5609 | if (!kvm_is_ucontrol(vcpu->kvm)) { |
5610 | r = -EINVAL; |
5611 | break; |
5612 | } |
5613 | |
5614 | r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, |
5615 | ucasmap.vcpu_addr, ucasmap.length); |
5616 | break; |
5617 | } |
5618 | case KVM_S390_UCAS_UNMAP: { |
5619 | struct kvm_s390_ucas_mapping ucasmap; |
5620 | |
5621 | if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { |
5622 | r = -EFAULT; |
5623 | break; |
5624 | } |
5625 | |
5626 | if (!kvm_is_ucontrol(vcpu->kvm)) { |
5627 | r = -EINVAL; |
5628 | break; |
5629 | } |
5630 | |
5631 | r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, |
5632 | ucasmap.length); |
5633 | break; |
5634 | } |
5635 | #endif |
5636 | case KVM_S390_VCPU_FAULT: { |
5637 | r = gmap_fault(vcpu->arch.gmap, arg, 0); |
5638 | break; |
5639 | } |
5640 | case KVM_ENABLE_CAP: |
5641 | { |
5642 | struct kvm_enable_cap cap; |
5643 | r = -EFAULT; |
5644 | if (copy_from_user(to: &cap, from: argp, n: sizeof(cap))) |
5645 | break; |
5646 | r = kvm_vcpu_ioctl_enable_cap(vcpu, cap: &cap); |
5647 | break; |
5648 | } |
5649 | case KVM_S390_MEM_OP: { |
5650 | struct kvm_s390_mem_op mem_op; |
5651 | |
5652 | if (copy_from_user(to: &mem_op, from: argp, n: sizeof(mem_op)) == 0) |
5653 | r = kvm_s390_vcpu_memsida_op(vcpu, mop: &mem_op); |
5654 | else |
5655 | r = -EFAULT; |
5656 | break; |
5657 | } |
5658 | case KVM_S390_SET_IRQ_STATE: { |
5659 | struct kvm_s390_irq_state irq_state; |
5660 | |
5661 | r = -EFAULT; |
5662 | if (copy_from_user(to: &irq_state, from: argp, n: sizeof(irq_state))) |
5663 | break; |
5664 | if (irq_state.len > VCPU_IRQS_MAX_BUF || |
5665 | irq_state.len == 0 || |
5666 | irq_state.len % sizeof(struct kvm_s390_irq) > 0) { |
5667 | r = -EINVAL; |
5668 | break; |
5669 | } |
5670 | /* do not use irq_state.flags, it will break old QEMUs */ |
5671 | r = kvm_s390_set_irq_state(vcpu, |
5672 | buf: (void __user *) irq_state.buf, |
5673 | len: irq_state.len); |
5674 | break; |
5675 | } |
5676 | case KVM_S390_GET_IRQ_STATE: { |
5677 | struct kvm_s390_irq_state irq_state; |
5678 | |
5679 | r = -EFAULT; |
5680 | if (copy_from_user(to: &irq_state, from: argp, n: sizeof(irq_state))) |
5681 | break; |
5682 | if (irq_state.len == 0) { |
5683 | r = -EINVAL; |
5684 | break; |
5685 | } |
5686 | /* do not use irq_state.flags, it will break old QEMUs */ |
5687 | r = kvm_s390_get_irq_state(vcpu, |
5688 | buf: (__u8 __user *) irq_state.buf, |
5689 | len: irq_state.len); |
5690 | break; |
5691 | } |
5692 | case KVM_S390_PV_CPU_COMMAND: { |
5693 | struct kvm_pv_cmd cmd; |
5694 | |
5695 | r = -EINVAL; |
5696 | if (!is_prot_virt_host()) |
5697 | break; |
5698 | |
5699 | r = -EFAULT; |
5700 | if (copy_from_user(to: &cmd, from: argp, n: sizeof(cmd))) |
5701 | break; |
5702 | |
5703 | r = -EINVAL; |
5704 | if (cmd.flags) |
5705 | break; |
5706 | |
5707 | /* We only handle this cmd right now */ |
5708 | if (cmd.cmd != KVM_PV_DUMP) |
5709 | break; |
5710 | |
5711 | r = kvm_s390_handle_pv_vcpu_dump(vcpu, cmd: &cmd); |
5712 | |
5713 | /* Always copy over UV rc / rrc data */ |
5714 | if (copy_to_user(to: (__u8 __user *)argp, from: &cmd.rc, |
5715 | n: sizeof(cmd.rc) + sizeof(cmd.rrc))) |
5716 | r = -EFAULT; |
5717 | break; |
5718 | } |
5719 | default: |
5720 | r = -ENOTTY; |
5721 | } |
5722 | |
5723 | vcpu_put(vcpu); |
5724 | return r; |
5725 | } |
5726 | |
5727 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
5728 | { |
5729 | #ifdef CONFIG_KVM_S390_UCONTROL |
5730 | if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) |
5731 | && (kvm_is_ucontrol(vcpu->kvm))) { |
5732 | vmf->page = virt_to_page(vcpu->arch.sie_block); |
5733 | get_page(vmf->page); |
5734 | return 0; |
5735 | } |
5736 | #endif |
5737 | return VM_FAULT_SIGBUS; |
5738 | } |
5739 | |
5740 | bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) |
5741 | { |
5742 | return true; |
5743 | } |
5744 | |
5745 | /* Section: memory related */ |
5746 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
5747 | const struct kvm_memory_slot *old, |
5748 | struct kvm_memory_slot *new, |
5749 | enum kvm_mr_change change) |
5750 | { |
5751 | gpa_t size; |
5752 | |
5753 | /* When we are protected, we should not change the memory slots */ |
5754 | if (kvm_s390_pv_get_handle(kvm)) |
5755 | return -EINVAL; |
5756 | |
5757 | if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) { |
5758 | /* |
5759 | * A few sanity checks. We can have memory slots which have to be |
5760 | * located/ended at a segment boundary (1MB). The memory in userland is |
5761 | * ok to be fragmented into various different vmas. It is okay to mmap() |
5762 | * and munmap() stuff in this slot after doing this call at any time |
5763 | */ |
5764 | |
5765 | if (new->userspace_addr & 0xffffful) |
5766 | return -EINVAL; |
5767 | |
5768 | size = new->npages * PAGE_SIZE; |
5769 | if (size & 0xffffful) |
5770 | return -EINVAL; |
5771 | |
5772 | if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) |
5773 | return -EINVAL; |
5774 | } |
5775 | |
5776 | if (!kvm->arch.migration_mode) |
5777 | return 0; |
5778 | |
5779 | /* |
5780 | * Turn off migration mode when: |
5781 | * - userspace creates a new memslot with dirty logging off, |
5782 | * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and |
5783 | * dirty logging is turned off. |
5784 | * Migration mode expects dirty page logging being enabled to store |
5785 | * its dirty bitmap. |
5786 | */ |
5787 | if (change != KVM_MR_DELETE && |
5788 | !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) |
5789 | WARN(kvm_s390_vm_stop_migration(kvm), |
5790 | "Failed to stop migration mode" ); |
5791 | |
5792 | return 0; |
5793 | } |
5794 | |
5795 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
5796 | struct kvm_memory_slot *old, |
5797 | const struct kvm_memory_slot *new, |
5798 | enum kvm_mr_change change) |
5799 | { |
5800 | int rc = 0; |
5801 | |
5802 | switch (change) { |
5803 | case KVM_MR_DELETE: |
5804 | rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, |
5805 | old->npages * PAGE_SIZE); |
5806 | break; |
5807 | case KVM_MR_MOVE: |
5808 | rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, |
5809 | old->npages * PAGE_SIZE); |
5810 | if (rc) |
5811 | break; |
5812 | fallthrough; |
5813 | case KVM_MR_CREATE: |
5814 | rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr, |
5815 | new->base_gfn * PAGE_SIZE, |
5816 | new->npages * PAGE_SIZE); |
5817 | break; |
5818 | case KVM_MR_FLAGS_ONLY: |
5819 | break; |
5820 | default: |
5821 | WARN(1, "Unknown KVM MR CHANGE: %d\n" , change); |
5822 | } |
5823 | if (rc) |
5824 | pr_warn("failed to commit memory region\n" ); |
5825 | return; |
5826 | } |
5827 | |
5828 | static inline unsigned long nonhyp_mask(int i) |
5829 | { |
5830 | unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30; |
5831 | |
5832 | return 0x0000ffffffffffffUL >> (nonhyp_fai << 4); |
5833 | } |
5834 | |
5835 | static int __init kvm_s390_init(void) |
5836 | { |
5837 | int i, r; |
5838 | |
5839 | if (!sclp.has_sief2) { |
5840 | pr_info("SIE is not available\n" ); |
5841 | return -ENODEV; |
5842 | } |
5843 | |
5844 | if (nested && hpage) { |
5845 | pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n" ); |
5846 | return -EINVAL; |
5847 | } |
5848 | |
5849 | for (i = 0; i < 16; i++) |
5850 | kvm_s390_fac_base[i] |= |
5851 | stfle_fac_list[i] & nonhyp_mask(i); |
5852 | |
5853 | r = __kvm_s390_init(); |
5854 | if (r) |
5855 | return r; |
5856 | |
5857 | r = kvm_init(vcpu_size: sizeof(struct kvm_vcpu), vcpu_align: 0, THIS_MODULE); |
5858 | if (r) { |
5859 | __kvm_s390_exit(); |
5860 | return r; |
5861 | } |
5862 | return 0; |
5863 | } |
5864 | |
5865 | static void __exit kvm_s390_exit(void) |
5866 | { |
5867 | kvm_exit(); |
5868 | |
5869 | __kvm_s390_exit(); |
5870 | } |
5871 | |
5872 | module_init(kvm_s390_init); |
5873 | module_exit(kvm_s390_exit); |
5874 | |
5875 | /* |
5876 | * Enable autoloading of the kvm module. |
5877 | * Note that we add the module alias here instead of virt/kvm/kvm_main.c |
5878 | * since x86 takes a different approach. |
5879 | */ |
5880 | #include <linux/miscdevice.h> |
5881 | MODULE_ALIAS_MISCDEV(KVM_MINOR); |
5882 | MODULE_ALIAS("devname:kvm" ); |
5883 | |