1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * handling interprocessor communication |
4 | * |
5 | * Copyright IBM Corp. 2008, 2013 |
6 | * |
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
8 | * Christian Borntraeger <borntraeger@de.ibm.com> |
9 | * Christian Ehrhardt <ehrhardt@de.ibm.com> |
10 | */ |
11 | |
12 | #include <linux/kvm.h> |
13 | #include <linux/kvm_host.h> |
14 | #include <linux/slab.h> |
15 | #include <asm/sigp.h> |
16 | #include "gaccess.h" |
17 | #include "kvm-s390.h" |
18 | #include "trace.h" |
19 | |
20 | static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, |
21 | u64 *reg) |
22 | { |
23 | const bool stopped = kvm_s390_test_cpuflags(vcpu: dst_vcpu, flags: CPUSTAT_STOPPED); |
24 | int rc; |
25 | int ext_call_pending; |
26 | |
27 | ext_call_pending = kvm_s390_ext_call_pending(vcpu: dst_vcpu); |
28 | if (!stopped && !ext_call_pending) |
29 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
30 | else { |
31 | *reg &= 0xffffffff00000000UL; |
32 | if (ext_call_pending) |
33 | *reg |= SIGP_STATUS_EXT_CALL_PENDING; |
34 | if (stopped) |
35 | *reg |= SIGP_STATUS_STOPPED; |
36 | rc = SIGP_CC_STATUS_STORED; |
37 | } |
38 | |
39 | VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x" , dst_vcpu->vcpu_id, |
40 | rc); |
41 | return rc; |
42 | } |
43 | |
44 | static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, |
45 | struct kvm_vcpu *dst_vcpu) |
46 | { |
47 | struct kvm_s390_irq irq = { |
48 | .type = KVM_S390_INT_EMERGENCY, |
49 | .u.emerg.code = vcpu->vcpu_id, |
50 | }; |
51 | int rc = 0; |
52 | |
53 | rc = kvm_s390_inject_vcpu(vcpu: dst_vcpu, irq: &irq); |
54 | if (!rc) |
55 | VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x" , |
56 | dst_vcpu->vcpu_id); |
57 | |
58 | return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; |
59 | } |
60 | |
61 | static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) |
62 | { |
63 | return __inject_sigp_emergency(vcpu, dst_vcpu); |
64 | } |
65 | |
66 | static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, |
67 | struct kvm_vcpu *dst_vcpu, |
68 | u16 asn, u64 *reg) |
69 | { |
70 | const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; |
71 | u16 p_asn, s_asn; |
72 | psw_t *psw; |
73 | bool idle; |
74 | |
75 | idle = is_vcpu_idle(vcpu); |
76 | psw = &dst_vcpu->arch.sie_block->gpsw; |
77 | p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ |
78 | s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ |
79 | |
80 | /* Inject the emergency signal? */ |
81 | if (!is_vcpu_stopped(vcpu) |
82 | || (psw->mask & psw_int_mask) != psw_int_mask |
83 | || (idle && psw->addr != 0) |
84 | || (!idle && (asn == p_asn || asn == s_asn))) { |
85 | return __inject_sigp_emergency(vcpu, dst_vcpu); |
86 | } else { |
87 | *reg &= 0xffffffff00000000UL; |
88 | *reg |= SIGP_STATUS_INCORRECT_STATE; |
89 | return SIGP_CC_STATUS_STORED; |
90 | } |
91 | } |
92 | |
93 | static int __sigp_external_call(struct kvm_vcpu *vcpu, |
94 | struct kvm_vcpu *dst_vcpu, u64 *reg) |
95 | { |
96 | struct kvm_s390_irq irq = { |
97 | .type = KVM_S390_INT_EXTERNAL_CALL, |
98 | .u.extcall.code = vcpu->vcpu_id, |
99 | }; |
100 | int rc; |
101 | |
102 | rc = kvm_s390_inject_vcpu(vcpu: dst_vcpu, irq: &irq); |
103 | if (rc == -EBUSY) { |
104 | *reg &= 0xffffffff00000000UL; |
105 | *reg |= SIGP_STATUS_EXT_CALL_PENDING; |
106 | return SIGP_CC_STATUS_STORED; |
107 | } else if (rc == 0) { |
108 | VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x" , |
109 | dst_vcpu->vcpu_id); |
110 | } |
111 | |
112 | return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; |
113 | } |
114 | |
115 | static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) |
116 | { |
117 | struct kvm_s390_irq irq = { |
118 | .type = KVM_S390_SIGP_STOP, |
119 | }; |
120 | int rc; |
121 | |
122 | rc = kvm_s390_inject_vcpu(vcpu: dst_vcpu, irq: &irq); |
123 | if (rc == -EBUSY) |
124 | rc = SIGP_CC_BUSY; |
125 | else if (rc == 0) |
126 | VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x" , |
127 | dst_vcpu->vcpu_id); |
128 | |
129 | return rc; |
130 | } |
131 | |
132 | static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu, |
133 | struct kvm_vcpu *dst_vcpu, u64 *reg) |
134 | { |
135 | struct kvm_s390_irq irq = { |
136 | .type = KVM_S390_SIGP_STOP, |
137 | .u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS, |
138 | }; |
139 | int rc; |
140 | |
141 | rc = kvm_s390_inject_vcpu(vcpu: dst_vcpu, irq: &irq); |
142 | if (rc == -EBUSY) |
143 | rc = SIGP_CC_BUSY; |
144 | else if (rc == 0) |
145 | VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x" , |
146 | dst_vcpu->vcpu_id); |
147 | |
148 | return rc; |
149 | } |
150 | |
151 | static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter, |
152 | u64 *status_reg) |
153 | { |
154 | *status_reg &= 0xffffffff00000000UL; |
155 | |
156 | /* Reject set arch order, with czam we're always in z/Arch mode. */ |
157 | *status_reg |= SIGP_STATUS_INVALID_PARAMETER; |
158 | return SIGP_CC_STATUS_STORED; |
159 | } |
160 | |
161 | static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, |
162 | u32 address, u64 *reg) |
163 | { |
164 | struct kvm_s390_irq irq = { |
165 | .type = KVM_S390_SIGP_SET_PREFIX, |
166 | .u.prefix.address = address & 0x7fffe000u, |
167 | }; |
168 | int rc; |
169 | |
170 | /* |
171 | * Make sure the new value is valid memory. We only need to check the |
172 | * first page, since address is 8k aligned and memory pieces are always |
173 | * at least 1MB aligned and have at least a size of 1MB. |
174 | */ |
175 | if (!kvm_is_gpa_in_memslot(kvm: vcpu->kvm, gpa: irq.u.prefix.address)) { |
176 | *reg &= 0xffffffff00000000UL; |
177 | *reg |= SIGP_STATUS_INVALID_PARAMETER; |
178 | return SIGP_CC_STATUS_STORED; |
179 | } |
180 | |
181 | rc = kvm_s390_inject_vcpu(vcpu: dst_vcpu, irq: &irq); |
182 | if (rc == -EBUSY) { |
183 | *reg &= 0xffffffff00000000UL; |
184 | *reg |= SIGP_STATUS_INCORRECT_STATE; |
185 | return SIGP_CC_STATUS_STORED; |
186 | } |
187 | |
188 | return rc; |
189 | } |
190 | |
191 | static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, |
192 | struct kvm_vcpu *dst_vcpu, |
193 | u32 addr, u64 *reg) |
194 | { |
195 | int rc; |
196 | |
197 | if (!kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_STOPPED)) { |
198 | *reg &= 0xffffffff00000000UL; |
199 | *reg |= SIGP_STATUS_INCORRECT_STATE; |
200 | return SIGP_CC_STATUS_STORED; |
201 | } |
202 | |
203 | addr &= 0x7ffffe00; |
204 | rc = kvm_s390_store_status_unloaded(vcpu: dst_vcpu, addr); |
205 | if (rc == -EFAULT) { |
206 | *reg &= 0xffffffff00000000UL; |
207 | *reg |= SIGP_STATUS_INVALID_PARAMETER; |
208 | rc = SIGP_CC_STATUS_STORED; |
209 | } |
210 | return rc; |
211 | } |
212 | |
213 | static int __sigp_sense_running(struct kvm_vcpu *vcpu, |
214 | struct kvm_vcpu *dst_vcpu, u64 *reg) |
215 | { |
216 | int rc; |
217 | |
218 | if (!test_kvm_facility(kvm: vcpu->kvm, nr: 9)) { |
219 | *reg &= 0xffffffff00000000UL; |
220 | *reg |= SIGP_STATUS_INVALID_ORDER; |
221 | return SIGP_CC_STATUS_STORED; |
222 | } |
223 | |
224 | if (kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_RUNNING)) { |
225 | /* running */ |
226 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
227 | } else { |
228 | /* not running */ |
229 | *reg &= 0xffffffff00000000UL; |
230 | *reg |= SIGP_STATUS_NOT_RUNNING; |
231 | rc = SIGP_CC_STATUS_STORED; |
232 | } |
233 | |
234 | VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x" , |
235 | dst_vcpu->vcpu_id, rc); |
236 | |
237 | return rc; |
238 | } |
239 | |
240 | static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu, |
241 | struct kvm_vcpu *dst_vcpu, u8 order_code) |
242 | { |
243 | struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; |
244 | /* handle (RE)START in user space */ |
245 | int rc = -EOPNOTSUPP; |
246 | |
247 | /* make sure we don't race with STOP irq injection */ |
248 | spin_lock(lock: &li->lock); |
249 | if (kvm_s390_is_stop_irq_pending(dst_vcpu)) |
250 | rc = SIGP_CC_BUSY; |
251 | spin_unlock(lock: &li->lock); |
252 | |
253 | return rc; |
254 | } |
255 | |
256 | static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu, |
257 | struct kvm_vcpu *dst_vcpu, u8 order_code) |
258 | { |
259 | /* handle (INITIAL) CPU RESET in user space */ |
260 | return -EOPNOTSUPP; |
261 | } |
262 | |
263 | static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu, |
264 | struct kvm_vcpu *dst_vcpu) |
265 | { |
266 | /* handle unknown orders in user space */ |
267 | return -EOPNOTSUPP; |
268 | } |
269 | |
270 | static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, |
271 | u16 cpu_addr, u32 parameter, u64 *status_reg) |
272 | { |
273 | int rc; |
274 | struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(kvm: vcpu->kvm, id: cpu_addr); |
275 | |
276 | if (!dst_vcpu) |
277 | return SIGP_CC_NOT_OPERATIONAL; |
278 | |
279 | /* |
280 | * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders |
281 | * are processed asynchronously. Until the affected VCPU finishes |
282 | * its work and calls back into KVM to clear the (RESTART or STOP) |
283 | * interrupt, we need to return any new non-reset orders "busy". |
284 | * |
285 | * This is important because a single VCPU could issue: |
286 | * 1) SIGP STOP $DESTINATION |
287 | * 2) SIGP SENSE $DESTINATION |
288 | * |
289 | * If the SIGP SENSE would not be rejected as "busy", it could |
290 | * return an incorrect answer as to whether the VCPU is STOPPED |
291 | * or OPERATING. |
292 | */ |
293 | if (order_code != SIGP_INITIAL_CPU_RESET && |
294 | order_code != SIGP_CPU_RESET) { |
295 | /* |
296 | * Lockless check. Both SIGP STOP and SIGP (RE)START |
297 | * properly synchronize everything while processing |
298 | * their orders, while the guest cannot observe a |
299 | * difference when issuing other orders from two |
300 | * different VCPUs. |
301 | */ |
302 | if (kvm_s390_is_stop_irq_pending(dst_vcpu) || |
303 | kvm_s390_is_restart_irq_pending(dst_vcpu)) |
304 | return SIGP_CC_BUSY; |
305 | } |
306 | |
307 | switch (order_code) { |
308 | case SIGP_SENSE: |
309 | vcpu->stat.instruction_sigp_sense++; |
310 | rc = __sigp_sense(vcpu, dst_vcpu, reg: status_reg); |
311 | break; |
312 | case SIGP_EXTERNAL_CALL: |
313 | vcpu->stat.instruction_sigp_external_call++; |
314 | rc = __sigp_external_call(vcpu, dst_vcpu, reg: status_reg); |
315 | break; |
316 | case SIGP_EMERGENCY_SIGNAL: |
317 | vcpu->stat.instruction_sigp_emergency++; |
318 | rc = __sigp_emergency(vcpu, dst_vcpu); |
319 | break; |
320 | case SIGP_STOP: |
321 | vcpu->stat.instruction_sigp_stop++; |
322 | rc = __sigp_stop(vcpu, dst_vcpu); |
323 | break; |
324 | case SIGP_STOP_AND_STORE_STATUS: |
325 | vcpu->stat.instruction_sigp_stop_store_status++; |
326 | rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, reg: status_reg); |
327 | break; |
328 | case SIGP_STORE_STATUS_AT_ADDRESS: |
329 | vcpu->stat.instruction_sigp_store_status++; |
330 | rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, addr: parameter, |
331 | reg: status_reg); |
332 | break; |
333 | case SIGP_SET_PREFIX: |
334 | vcpu->stat.instruction_sigp_prefix++; |
335 | rc = __sigp_set_prefix(vcpu, dst_vcpu, address: parameter, reg: status_reg); |
336 | break; |
337 | case SIGP_COND_EMERGENCY_SIGNAL: |
338 | vcpu->stat.instruction_sigp_cond_emergency++; |
339 | rc = __sigp_conditional_emergency(vcpu, dst_vcpu, asn: parameter, |
340 | reg: status_reg); |
341 | break; |
342 | case SIGP_SENSE_RUNNING: |
343 | vcpu->stat.instruction_sigp_sense_running++; |
344 | rc = __sigp_sense_running(vcpu, dst_vcpu, reg: status_reg); |
345 | break; |
346 | case SIGP_START: |
347 | vcpu->stat.instruction_sigp_start++; |
348 | rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); |
349 | break; |
350 | case SIGP_RESTART: |
351 | vcpu->stat.instruction_sigp_restart++; |
352 | rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code); |
353 | break; |
354 | case SIGP_INITIAL_CPU_RESET: |
355 | vcpu->stat.instruction_sigp_init_cpu_reset++; |
356 | rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); |
357 | break; |
358 | case SIGP_CPU_RESET: |
359 | vcpu->stat.instruction_sigp_cpu_reset++; |
360 | rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code); |
361 | break; |
362 | default: |
363 | vcpu->stat.instruction_sigp_unknown++; |
364 | rc = __prepare_sigp_unknown(vcpu, dst_vcpu); |
365 | } |
366 | |
367 | if (rc == -EOPNOTSUPP) |
368 | VCPU_EVENT(vcpu, 4, |
369 | "sigp order %u -> cpu %x: handled in user space" , |
370 | order_code, dst_vcpu->vcpu_id); |
371 | |
372 | return rc; |
373 | } |
374 | |
375 | static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code, |
376 | u16 cpu_addr) |
377 | { |
378 | if (!vcpu->kvm->arch.user_sigp) |
379 | return 0; |
380 | |
381 | switch (order_code) { |
382 | case SIGP_SENSE: |
383 | case SIGP_EXTERNAL_CALL: |
384 | case SIGP_EMERGENCY_SIGNAL: |
385 | case SIGP_COND_EMERGENCY_SIGNAL: |
386 | case SIGP_SENSE_RUNNING: |
387 | return 0; |
388 | /* update counters as we're directly dropping to user space */ |
389 | case SIGP_STOP: |
390 | vcpu->stat.instruction_sigp_stop++; |
391 | break; |
392 | case SIGP_STOP_AND_STORE_STATUS: |
393 | vcpu->stat.instruction_sigp_stop_store_status++; |
394 | break; |
395 | case SIGP_STORE_STATUS_AT_ADDRESS: |
396 | vcpu->stat.instruction_sigp_store_status++; |
397 | break; |
398 | case SIGP_STORE_ADDITIONAL_STATUS: |
399 | vcpu->stat.instruction_sigp_store_adtl_status++; |
400 | break; |
401 | case SIGP_SET_PREFIX: |
402 | vcpu->stat.instruction_sigp_prefix++; |
403 | break; |
404 | case SIGP_START: |
405 | vcpu->stat.instruction_sigp_start++; |
406 | break; |
407 | case SIGP_RESTART: |
408 | vcpu->stat.instruction_sigp_restart++; |
409 | break; |
410 | case SIGP_INITIAL_CPU_RESET: |
411 | vcpu->stat.instruction_sigp_init_cpu_reset++; |
412 | break; |
413 | case SIGP_CPU_RESET: |
414 | vcpu->stat.instruction_sigp_cpu_reset++; |
415 | break; |
416 | default: |
417 | vcpu->stat.instruction_sigp_unknown++; |
418 | } |
419 | VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace" , |
420 | order_code, cpu_addr); |
421 | |
422 | return 1; |
423 | } |
424 | |
425 | int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) |
426 | { |
427 | int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; |
428 | int r3 = vcpu->arch.sie_block->ipa & 0x000f; |
429 | u32 parameter; |
430 | u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; |
431 | u8 order_code; |
432 | int rc; |
433 | |
434 | /* sigp in userspace can exit */ |
435 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
436 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
437 | |
438 | order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); |
439 | if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr)) |
440 | return -EOPNOTSUPP; |
441 | |
442 | if (r1 % 2) |
443 | parameter = vcpu->run->s.regs.gprs[r1]; |
444 | else |
445 | parameter = vcpu->run->s.regs.gprs[r1 + 1]; |
446 | |
447 | trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter); |
448 | switch (order_code) { |
449 | case SIGP_SET_ARCHITECTURE: |
450 | vcpu->stat.instruction_sigp_arch++; |
451 | rc = __sigp_set_arch(vcpu, parameter, |
452 | status_reg: &vcpu->run->s.regs.gprs[r1]); |
453 | break; |
454 | default: |
455 | rc = handle_sigp_dst(vcpu, order_code, cpu_addr, |
456 | parameter, |
457 | status_reg: &vcpu->run->s.regs.gprs[r1]); |
458 | } |
459 | |
460 | if (rc < 0) |
461 | return rc; |
462 | |
463 | kvm_s390_set_psw_cc(vcpu, cc: rc); |
464 | return 0; |
465 | } |
466 | |
467 | /* |
468 | * Handle SIGP partial execution interception. |
469 | * |
470 | * This interception will occur at the source cpu when a source cpu sends an |
471 | * external call to a target cpu and the target cpu has the WAIT bit set in |
472 | * its cpuflags. Interception will occur after the interrupt indicator bits at |
473 | * the target cpu have been set. All error cases will lead to instruction |
474 | * interception, therefore nothing is to be checked or prepared. |
475 | */ |
476 | int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) |
477 | { |
478 | int r3 = vcpu->arch.sie_block->ipa & 0x000f; |
479 | u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; |
480 | struct kvm_vcpu *dest_vcpu; |
481 | u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); |
482 | |
483 | if (order_code == SIGP_EXTERNAL_CALL) { |
484 | trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); |
485 | |
486 | dest_vcpu = kvm_get_vcpu_by_id(kvm: vcpu->kvm, id: cpu_addr); |
487 | BUG_ON(dest_vcpu == NULL); |
488 | |
489 | kvm_s390_vcpu_wakeup(vcpu: dest_vcpu); |
490 | kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); |
491 | return 0; |
492 | } |
493 | |
494 | return -EOPNOTSUPP; |
495 | } |
496 | |