1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * in-kernel handling for sie intercepts |
4 | * |
5 | * Copyright IBM Corp. 2008, 2020 |
6 | * |
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
8 | * Christian Borntraeger <borntraeger@de.ibm.com> |
9 | */ |
10 | |
11 | #include <linux/kvm_host.h> |
12 | #include <linux/errno.h> |
13 | #include <linux/pagemap.h> |
14 | |
15 | #include <asm/asm-offsets.h> |
16 | #include <asm/irq.h> |
17 | #include <asm/sysinfo.h> |
18 | #include <asm/uv.h> |
19 | |
20 | #include "kvm-s390.h" |
21 | #include "gaccess.h" |
22 | #include "trace.h" |
23 | #include "trace-s390.h" |
24 | |
25 | u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu) |
26 | { |
27 | struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; |
28 | u8 ilen = 0; |
29 | |
30 | switch (vcpu->arch.sie_block->icptcode) { |
31 | case ICPT_INST: |
32 | case ICPT_INSTPROGI: |
33 | case ICPT_OPEREXC: |
34 | case ICPT_PARTEXEC: |
35 | case ICPT_IOINST: |
36 | /* instruction only stored for these icptcodes */ |
37 | ilen = insn_length(vcpu->arch.sie_block->ipa >> 8); |
38 | /* Use the length of the EXECUTE instruction if necessary */ |
39 | if (sie_block->icptstatus & 1) { |
40 | ilen = (sie_block->icptstatus >> 4) & 0x6; |
41 | if (!ilen) |
42 | ilen = 4; |
43 | } |
44 | break; |
45 | case ICPT_PROGI: |
46 | /* bit 1+2 of pgmilc are the ilc, so we directly get ilen */ |
47 | ilen = vcpu->arch.sie_block->pgmilc & 0x6; |
48 | break; |
49 | } |
50 | return ilen; |
51 | } |
52 | |
53 | static int handle_stop(struct kvm_vcpu *vcpu) |
54 | { |
55 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
56 | int rc = 0; |
57 | uint8_t flags, stop_pending; |
58 | |
59 | vcpu->stat.exit_stop_request++; |
60 | |
61 | /* delay the stop if any non-stop irq is pending */ |
62 | if (kvm_s390_vcpu_has_irq(vcpu, exclude_stop: 1)) |
63 | return 0; |
64 | |
65 | /* avoid races with the injection/SIGP STOP code */ |
66 | spin_lock(lock: &li->lock); |
67 | flags = li->irq.stop.flags; |
68 | stop_pending = kvm_s390_is_stop_irq_pending(vcpu); |
69 | spin_unlock(lock: &li->lock); |
70 | |
71 | trace_kvm_s390_stop_request(stop_irq: stop_pending, flags); |
72 | if (!stop_pending) |
73 | return 0; |
74 | |
75 | if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) { |
76 | rc = kvm_s390_vcpu_store_status(vcpu, |
77 | KVM_S390_STORE_STATUS_NOADDR); |
78 | if (rc) |
79 | return rc; |
80 | } |
81 | |
82 | /* |
83 | * no need to check the return value of vcpu_stop as it can only have |
84 | * an error for protvirt, but protvirt means user cpu state |
85 | */ |
86 | if (!kvm_s390_user_cpu_state_ctrl(kvm: vcpu->kvm)) |
87 | kvm_s390_vcpu_stop(vcpu); |
88 | return -EOPNOTSUPP; |
89 | } |
90 | |
91 | static int handle_validity(struct kvm_vcpu *vcpu) |
92 | { |
93 | int viwhy = vcpu->arch.sie_block->ipb >> 16; |
94 | |
95 | vcpu->stat.exit_validity++; |
96 | trace_kvm_s390_intercept_validity(vcpu, viwhy); |
97 | KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)" , viwhy, |
98 | current->pid, vcpu->kvm); |
99 | |
100 | /* do not warn on invalid runtime instrumentation mode */ |
101 | WARN_ONCE(viwhy != 0x44, "kvm: unhandled validity intercept 0x%x\n" , |
102 | viwhy); |
103 | return -EINVAL; |
104 | } |
105 | |
106 | static int handle_instruction(struct kvm_vcpu *vcpu) |
107 | { |
108 | vcpu->stat.exit_instruction++; |
109 | trace_kvm_s390_intercept_instruction(vcpu, |
110 | ipa: vcpu->arch.sie_block->ipa, |
111 | ipb: vcpu->arch.sie_block->ipb); |
112 | |
113 | switch (vcpu->arch.sie_block->ipa >> 8) { |
114 | case 0x01: |
115 | return kvm_s390_handle_01(vcpu); |
116 | case 0x82: |
117 | return kvm_s390_handle_lpsw(vcpu); |
118 | case 0x83: |
119 | return kvm_s390_handle_diag(vcpu); |
120 | case 0xaa: |
121 | return kvm_s390_handle_aa(vcpu); |
122 | case 0xae: |
123 | return kvm_s390_handle_sigp(vcpu); |
124 | case 0xb2: |
125 | return kvm_s390_handle_b2(vcpu); |
126 | case 0xb6: |
127 | return kvm_s390_handle_stctl(vcpu); |
128 | case 0xb7: |
129 | return kvm_s390_handle_lctl(vcpu); |
130 | case 0xb9: |
131 | return kvm_s390_handle_b9(vcpu); |
132 | case 0xe3: |
133 | return kvm_s390_handle_e3(vcpu); |
134 | case 0xe5: |
135 | return kvm_s390_handle_e5(vcpu); |
136 | case 0xeb: |
137 | return kvm_s390_handle_eb(vcpu); |
138 | default: |
139 | return -EOPNOTSUPP; |
140 | } |
141 | } |
142 | |
143 | static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu) |
144 | { |
145 | struct kvm_s390_pgm_info pgm_info = { |
146 | .code = vcpu->arch.sie_block->iprcc, |
147 | /* the PSW has already been rewound */ |
148 | .flags = KVM_S390_PGM_FLAGS_NO_REWIND, |
149 | }; |
150 | |
151 | switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) { |
152 | case PGM_AFX_TRANSLATION: |
153 | case PGM_ASX_TRANSLATION: |
154 | case PGM_EX_TRANSLATION: |
155 | case PGM_LFX_TRANSLATION: |
156 | case PGM_LSTE_SEQUENCE: |
157 | case PGM_LSX_TRANSLATION: |
158 | case PGM_LX_TRANSLATION: |
159 | case PGM_PRIMARY_AUTHORITY: |
160 | case PGM_SECONDARY_AUTHORITY: |
161 | case PGM_SPACE_SWITCH: |
162 | pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc; |
163 | break; |
164 | case PGM_ALEN_TRANSLATION: |
165 | case PGM_ALE_SEQUENCE: |
166 | case PGM_ASTE_INSTANCE: |
167 | case PGM_ASTE_SEQUENCE: |
168 | case PGM_ASTE_VALIDITY: |
169 | case PGM_EXTENDED_AUTHORITY: |
170 | pgm_info.exc_access_id = vcpu->arch.sie_block->eai; |
171 | break; |
172 | case PGM_ASCE_TYPE: |
173 | case PGM_PAGE_TRANSLATION: |
174 | case PGM_REGION_FIRST_TRANS: |
175 | case PGM_REGION_SECOND_TRANS: |
176 | case PGM_REGION_THIRD_TRANS: |
177 | case PGM_SEGMENT_TRANSLATION: |
178 | pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc; |
179 | pgm_info.exc_access_id = vcpu->arch.sie_block->eai; |
180 | pgm_info.op_access_id = vcpu->arch.sie_block->oai; |
181 | break; |
182 | case PGM_MONITOR: |
183 | pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn; |
184 | pgm_info.mon_code = vcpu->arch.sie_block->tecmc; |
185 | break; |
186 | case PGM_VECTOR_PROCESSING: |
187 | case PGM_DATA: |
188 | pgm_info.data_exc_code = vcpu->arch.sie_block->dxc; |
189 | break; |
190 | case PGM_PROTECTION: |
191 | pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc; |
192 | pgm_info.exc_access_id = vcpu->arch.sie_block->eai; |
193 | break; |
194 | default: |
195 | break; |
196 | } |
197 | |
198 | if (vcpu->arch.sie_block->iprcc & PGM_PER) { |
199 | pgm_info.per_code = vcpu->arch.sie_block->perc; |
200 | pgm_info.per_atmid = vcpu->arch.sie_block->peratmid; |
201 | pgm_info.per_address = vcpu->arch.sie_block->peraddr; |
202 | pgm_info.per_access_id = vcpu->arch.sie_block->peraid; |
203 | } |
204 | return kvm_s390_inject_prog_irq(vcpu, pgm_info: &pgm_info); |
205 | } |
206 | |
207 | /* |
208 | * restore ITDB to program-interruption TDB in guest lowcore |
209 | * and set TX abort indication if required |
210 | */ |
211 | static int handle_itdb(struct kvm_vcpu *vcpu) |
212 | { |
213 | struct kvm_s390_itdb *itdb; |
214 | int rc; |
215 | |
216 | if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu)) |
217 | return 0; |
218 | if (current->thread.per_flags & PER_FLAG_NO_TE) |
219 | return 0; |
220 | itdb = phys_to_virt(address: vcpu->arch.sie_block->itdba); |
221 | rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb)); |
222 | if (rc) |
223 | return rc; |
224 | memset(itdb, 0, sizeof(*itdb)); |
225 | |
226 | return 0; |
227 | } |
228 | |
229 | #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER) |
230 | |
231 | static bool should_handle_per_event(const struct kvm_vcpu *vcpu) |
232 | { |
233 | if (!guestdbg_enabled(vcpu) || !per_event(vcpu)) |
234 | return false; |
235 | if (guestdbg_sstep_enabled(vcpu) && |
236 | vcpu->arch.sie_block->iprcc != PGM_PER) { |
237 | /* |
238 | * __vcpu_run() will exit after delivering the concurrently |
239 | * indicated condition. |
240 | */ |
241 | return false; |
242 | } |
243 | return true; |
244 | } |
245 | |
246 | static int handle_prog(struct kvm_vcpu *vcpu) |
247 | { |
248 | psw_t psw; |
249 | int rc; |
250 | |
251 | vcpu->stat.exit_program_interruption++; |
252 | |
253 | /* |
254 | * Intercept 8 indicates a loop of specification exceptions |
255 | * for protected guests. |
256 | */ |
257 | if (kvm_s390_pv_cpu_is_protected(vcpu)) |
258 | return -EOPNOTSUPP; |
259 | |
260 | if (should_handle_per_event(vcpu)) { |
261 | rc = kvm_s390_handle_per_event(vcpu); |
262 | if (rc) |
263 | return rc; |
264 | /* the interrupt might have been filtered out completely */ |
265 | if (vcpu->arch.sie_block->iprcc == 0) |
266 | return 0; |
267 | } |
268 | |
269 | trace_kvm_s390_intercept_prog(vcpu, code: vcpu->arch.sie_block->iprcc); |
270 | if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) { |
271 | rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t)); |
272 | if (rc) |
273 | return rc; |
274 | /* Avoid endless loops of specification exceptions */ |
275 | if (!is_valid_psw(&psw)) |
276 | return -EOPNOTSUPP; |
277 | } |
278 | rc = handle_itdb(vcpu); |
279 | if (rc) |
280 | return rc; |
281 | |
282 | return inject_prog_on_prog_intercept(vcpu); |
283 | } |
284 | |
285 | /** |
286 | * handle_external_interrupt - used for external interruption interceptions |
287 | * @vcpu: virtual cpu |
288 | * |
289 | * This interception occurs if: |
290 | * - the CPUSTAT_EXT_INT bit was already set when the external interrupt |
291 | * occurred. In this case, the interrupt needs to be injected manually to |
292 | * preserve interrupt priority. |
293 | * - the external new PSW has external interrupts enabled, which will cause an |
294 | * interruption loop. We drop to userspace in this case. |
295 | * |
296 | * The latter case can be detected by inspecting the external mask bit in the |
297 | * external new psw. |
298 | * |
299 | * Under PV, only the latter case can occur, since interrupt priorities are |
300 | * handled in the ultravisor. |
301 | */ |
302 | static int handle_external_interrupt(struct kvm_vcpu *vcpu) |
303 | { |
304 | u16 eic = vcpu->arch.sie_block->eic; |
305 | struct kvm_s390_irq irq; |
306 | psw_t newpsw; |
307 | int rc; |
308 | |
309 | vcpu->stat.exit_external_interrupt++; |
310 | |
311 | if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
312 | newpsw = vcpu->arch.sie_block->gpsw; |
313 | } else { |
314 | rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t)); |
315 | if (rc) |
316 | return rc; |
317 | } |
318 | |
319 | /* |
320 | * Clock comparator or timer interrupt with external interrupt enabled |
321 | * will cause interrupt loop. Drop to userspace. |
322 | */ |
323 | if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) && |
324 | (newpsw.mask & PSW_MASK_EXT)) |
325 | return -EOPNOTSUPP; |
326 | |
327 | switch (eic) { |
328 | case EXT_IRQ_CLK_COMP: |
329 | irq.type = KVM_S390_INT_CLOCK_COMP; |
330 | break; |
331 | case EXT_IRQ_CPU_TIMER: |
332 | irq.type = KVM_S390_INT_CPU_TIMER; |
333 | break; |
334 | case EXT_IRQ_EXTERNAL_CALL: |
335 | irq.type = KVM_S390_INT_EXTERNAL_CALL; |
336 | irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr; |
337 | rc = kvm_s390_inject_vcpu(vcpu, irq: &irq); |
338 | /* ignore if another external call is already pending */ |
339 | if (rc == -EBUSY) |
340 | return 0; |
341 | return rc; |
342 | default: |
343 | return -EOPNOTSUPP; |
344 | } |
345 | |
346 | return kvm_s390_inject_vcpu(vcpu, irq: &irq); |
347 | } |
348 | |
349 | /** |
350 | * handle_mvpg_pei - Handle MOVE PAGE partial execution interception. |
351 | * @vcpu: virtual cpu |
352 | * |
353 | * This interception can only happen for guests with DAT disabled and |
354 | * addresses that are currently not mapped in the host. Thus we try to |
355 | * set up the mappings for the corresponding user pages here (or throw |
356 | * addressing exceptions in case of illegal guest addresses). |
357 | */ |
358 | static int handle_mvpg_pei(struct kvm_vcpu *vcpu) |
359 | { |
360 | unsigned long srcaddr, dstaddr; |
361 | int reg1, reg2, rc; |
362 | |
363 | kvm_s390_get_regs_rre(vcpu, r1: ®1, r2: ®2); |
364 | |
365 | /* Ensure that the source is paged-in, no actual access -> no key checking */ |
366 | rc = guest_translate_address_with_key(vcpu, gva: vcpu->run->s.regs.gprs[reg2], |
367 | ar: reg2, gpa: &srcaddr, mode: GACC_FETCH, access_key: 0); |
368 | if (rc) |
369 | return kvm_s390_inject_prog_cond(vcpu, rc); |
370 | rc = kvm_arch_fault_in_page(vcpu, gpa: srcaddr, writable: 0); |
371 | if (rc != 0) |
372 | return rc; |
373 | |
374 | /* Ensure that the source is paged-in, no actual access -> no key checking */ |
375 | rc = guest_translate_address_with_key(vcpu, gva: vcpu->run->s.regs.gprs[reg1], |
376 | ar: reg1, gpa: &dstaddr, mode: GACC_STORE, access_key: 0); |
377 | if (rc) |
378 | return kvm_s390_inject_prog_cond(vcpu, rc); |
379 | rc = kvm_arch_fault_in_page(vcpu, gpa: dstaddr, writable: 1); |
380 | if (rc != 0) |
381 | return rc; |
382 | |
383 | kvm_s390_retry_instr(vcpu); |
384 | |
385 | return 0; |
386 | } |
387 | |
388 | static int handle_partial_execution(struct kvm_vcpu *vcpu) |
389 | { |
390 | vcpu->stat.exit_pei++; |
391 | |
392 | if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */ |
393 | return handle_mvpg_pei(vcpu); |
394 | if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */ |
395 | return kvm_s390_handle_sigp_pei(vcpu); |
396 | |
397 | return -EOPNOTSUPP; |
398 | } |
399 | |
400 | /* |
401 | * Handle the sthyi instruction that provides the guest with system |
402 | * information, like current CPU resources available at each level of |
403 | * the machine. |
404 | */ |
405 | int handle_sthyi(struct kvm_vcpu *vcpu) |
406 | { |
407 | int reg1, reg2, cc = 0, r = 0; |
408 | u64 code, addr, rc = 0; |
409 | struct sthyi_sctns *sctns = NULL; |
410 | |
411 | if (!test_kvm_facility(vcpu->kvm, 74)) |
412 | return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); |
413 | |
414 | kvm_s390_get_regs_rre(vcpu, r1: ®1, r2: ®2); |
415 | code = vcpu->run->s.regs.gprs[reg1]; |
416 | addr = vcpu->run->s.regs.gprs[reg2]; |
417 | |
418 | vcpu->stat.instruction_sthyi++; |
419 | VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx" , code, addr); |
420 | trace_kvm_s390_handle_sthyi(vcpu, code, addr); |
421 | |
422 | if (reg1 == reg2 || reg1 & 1 || reg2 & 1) |
423 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
424 | |
425 | if (code & 0xffff) { |
426 | cc = 3; |
427 | rc = 4; |
428 | goto out; |
429 | } |
430 | |
431 | if (!kvm_s390_pv_cpu_is_protected(vcpu) && (addr & ~PAGE_MASK)) |
432 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
433 | |
434 | sctns = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); |
435 | if (!sctns) |
436 | return -ENOMEM; |
437 | |
438 | cc = sthyi_fill(sctns, &rc); |
439 | if (cc < 0) { |
440 | free_page((unsigned long)sctns); |
441 | return cc; |
442 | } |
443 | out: |
444 | if (!cc) { |
445 | if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
446 | memcpy(sida_addr(vcpu->arch.sie_block), sctns, PAGE_SIZE); |
447 | } else { |
448 | r = write_guest(vcpu, ga: addr, ar: reg2, data: sctns, PAGE_SIZE); |
449 | if (r) { |
450 | free_page((unsigned long)sctns); |
451 | return kvm_s390_inject_prog_cond(vcpu, rc: r); |
452 | } |
453 | } |
454 | } |
455 | |
456 | free_page((unsigned long)sctns); |
457 | vcpu->run->s.regs.gprs[reg2 + 1] = rc; |
458 | kvm_s390_set_psw_cc(vcpu, cc); |
459 | return r; |
460 | } |
461 | |
462 | static int handle_operexc(struct kvm_vcpu *vcpu) |
463 | { |
464 | psw_t oldpsw, newpsw; |
465 | int rc; |
466 | |
467 | vcpu->stat.exit_operation_exception++; |
468 | trace_kvm_s390_handle_operexc(vcpu, ipa: vcpu->arch.sie_block->ipa, |
469 | ipb: vcpu->arch.sie_block->ipb); |
470 | |
471 | if (vcpu->arch.sie_block->ipa == 0xb256) |
472 | return handle_sthyi(vcpu); |
473 | |
474 | if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0) |
475 | return -EOPNOTSUPP; |
476 | rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t)); |
477 | if (rc) |
478 | return rc; |
479 | /* |
480 | * Avoid endless loops of operation exceptions, if the pgm new |
481 | * PSW will cause a new operation exception. |
482 | * The heuristic checks if the pgm new psw is within 6 bytes before |
483 | * the faulting psw address (with same DAT, AS settings) and the |
484 | * new psw is not a wait psw and the fault was not triggered by |
485 | * problem state. |
486 | */ |
487 | oldpsw = vcpu->arch.sie_block->gpsw; |
488 | if (oldpsw.addr - newpsw.addr <= 6 && |
489 | !(newpsw.mask & PSW_MASK_WAIT) && |
490 | !(oldpsw.mask & PSW_MASK_PSTATE) && |
491 | (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) && |
492 | (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT)) |
493 | return -EOPNOTSUPP; |
494 | |
495 | return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); |
496 | } |
497 | |
498 | static int handle_pv_spx(struct kvm_vcpu *vcpu) |
499 | { |
500 | u32 pref = *(u32 *)sida_addr(vcpu->arch.sie_block); |
501 | |
502 | kvm_s390_set_prefix(vcpu, prefix: pref); |
503 | trace_kvm_s390_handle_prefix(vcpu, set: 1, address: pref); |
504 | return 0; |
505 | } |
506 | |
507 | static int handle_pv_sclp(struct kvm_vcpu *vcpu) |
508 | { |
509 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
510 | |
511 | spin_lock(lock: &fi->lock); |
512 | /* |
513 | * 2 cases: |
514 | * a: an sccb answering interrupt was already pending or in flight. |
515 | * As the sccb value is not known we can simply set some value to |
516 | * trigger delivery of a saved SCCB. UV will then use its saved |
517 | * copy of the SCCB value. |
518 | * b: an error SCCB interrupt needs to be injected so we also inject |
519 | * a fake SCCB address. Firmware will use the proper one. |
520 | * This makes sure, that both errors and real sccb returns will only |
521 | * be delivered after a notification intercept (instruction has |
522 | * finished) but not after others. |
523 | */ |
524 | fi->srv_signal.ext_params |= 0x43000; |
525 | set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); |
526 | clear_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs); |
527 | spin_unlock(lock: &fi->lock); |
528 | return 0; |
529 | } |
530 | |
531 | static int handle_pv_uvc(struct kvm_vcpu *vcpu) |
532 | { |
533 | struct uv_cb_share *guest_uvcb = sida_addr(vcpu->arch.sie_block); |
534 | struct uv_cb_cts uvcb = { |
535 | .header.cmd = UVC_CMD_UNPIN_PAGE_SHARED, |
536 | .header.len = sizeof(uvcb), |
537 | .guest_handle = kvm_s390_pv_get_handle(vcpu->kvm), |
538 | .gaddr = guest_uvcb->paddr, |
539 | }; |
540 | int rc; |
541 | |
542 | if (guest_uvcb->header.cmd != UVC_CMD_REMOVE_SHARED_ACCESS) { |
543 | WARN_ONCE(1, "Unexpected notification intercept for UVC 0x%x\n" , |
544 | guest_uvcb->header.cmd); |
545 | return 0; |
546 | } |
547 | rc = gmap_make_secure(vcpu->arch.gmap, uvcb.gaddr, &uvcb); |
548 | /* |
549 | * If the unpin did not succeed, the guest will exit again for the UVC |
550 | * and we will retry the unpin. |
551 | */ |
552 | if (rc == -EINVAL) |
553 | return 0; |
554 | /* |
555 | * If we got -EAGAIN here, we simply return it. It will eventually |
556 | * get propagated all the way to userspace, which should then try |
557 | * again. |
558 | */ |
559 | return rc; |
560 | } |
561 | |
562 | static int handle_pv_notification(struct kvm_vcpu *vcpu) |
563 | { |
564 | int ret; |
565 | |
566 | if (vcpu->arch.sie_block->ipa == 0xb210) |
567 | return handle_pv_spx(vcpu); |
568 | if (vcpu->arch.sie_block->ipa == 0xb220) |
569 | return handle_pv_sclp(vcpu); |
570 | if (vcpu->arch.sie_block->ipa == 0xb9a4) |
571 | return handle_pv_uvc(vcpu); |
572 | if (vcpu->arch.sie_block->ipa >> 8 == 0xae) { |
573 | /* |
574 | * Besides external call, other SIGP orders also cause a |
575 | * 108 (pv notify) intercept. In contrast to external call, |
576 | * these orders need to be emulated and hence the appropriate |
577 | * place to handle them is in handle_instruction(). |
578 | * So first try kvm_s390_handle_sigp_pei() and if that isn't |
579 | * successful, go on with handle_instruction(). |
580 | */ |
581 | ret = kvm_s390_handle_sigp_pei(vcpu); |
582 | if (!ret) |
583 | return ret; |
584 | } |
585 | |
586 | return handle_instruction(vcpu); |
587 | } |
588 | |
589 | static bool should_handle_per_ifetch(const struct kvm_vcpu *vcpu, int rc) |
590 | { |
591 | /* Process PER, also if the instruction is processed in user space. */ |
592 | if (!(vcpu->arch.sie_block->icptstatus & 0x02)) |
593 | return false; |
594 | if (rc != 0 && rc != -EOPNOTSUPP) |
595 | return false; |
596 | if (guestdbg_sstep_enabled(vcpu) && vcpu->arch.local_int.pending_irqs) |
597 | /* __vcpu_run() will exit after delivering the interrupt. */ |
598 | return false; |
599 | return true; |
600 | } |
601 | |
602 | int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) |
603 | { |
604 | int rc, per_rc = 0; |
605 | |
606 | if (kvm_is_ucontrol(kvm: vcpu->kvm)) |
607 | return -EOPNOTSUPP; |
608 | |
609 | switch (vcpu->arch.sie_block->icptcode) { |
610 | case ICPT_EXTREQ: |
611 | vcpu->stat.exit_external_request++; |
612 | return 0; |
613 | case ICPT_IOREQ: |
614 | vcpu->stat.exit_io_request++; |
615 | return 0; |
616 | case ICPT_INST: |
617 | rc = handle_instruction(vcpu); |
618 | break; |
619 | case ICPT_PROGI: |
620 | return handle_prog(vcpu); |
621 | case ICPT_EXTINT: |
622 | return handle_external_interrupt(vcpu); |
623 | case ICPT_WAIT: |
624 | return kvm_s390_handle_wait(vcpu); |
625 | case ICPT_VALIDITY: |
626 | return handle_validity(vcpu); |
627 | case ICPT_STOP: |
628 | return handle_stop(vcpu); |
629 | case ICPT_OPEREXC: |
630 | rc = handle_operexc(vcpu); |
631 | break; |
632 | case ICPT_PARTEXEC: |
633 | rc = handle_partial_execution(vcpu); |
634 | break; |
635 | case ICPT_KSS: |
636 | /* Instruction will be redriven, skip the PER check. */ |
637 | return kvm_s390_skey_check_enable(vcpu); |
638 | case ICPT_MCHKREQ: |
639 | case ICPT_INT_ENABLE: |
640 | /* |
641 | * PSW bit 13 or a CR (0, 6, 14) changed and we might |
642 | * now be able to deliver interrupts. The pre-run code |
643 | * will take care of this. |
644 | */ |
645 | rc = 0; |
646 | break; |
647 | case ICPT_PV_INSTR: |
648 | rc = handle_instruction(vcpu); |
649 | break; |
650 | case ICPT_PV_NOTIFY: |
651 | rc = handle_pv_notification(vcpu); |
652 | break; |
653 | case ICPT_PV_PREF: |
654 | rc = 0; |
655 | gmap_convert_to_secure(vcpu->arch.gmap, |
656 | kvm_s390_get_prefix(vcpu)); |
657 | gmap_convert_to_secure(vcpu->arch.gmap, |
658 | kvm_s390_get_prefix(vcpu) + PAGE_SIZE); |
659 | break; |
660 | default: |
661 | return -EOPNOTSUPP; |
662 | } |
663 | |
664 | if (should_handle_per_ifetch(vcpu, rc)) |
665 | per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu); |
666 | return per_rc ? per_rc : rc; |
667 | } |
668 | |