1 /*
2 * handling interprocessor communication
3 *
4 * Copyright IBM Corp. 2008, 2013
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com>
13 */
14
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
18 #include <asm/sigp.h>
19 #include "gaccess.h"
20 #include "kvm-s390.h"
21 #include "trace.h"
22
__sigp_sense(struct kvm_vcpu * vcpu,struct kvm_vcpu * dst_vcpu,u64 * reg)23 static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
24 u64 *reg)
25 {
26 struct kvm_s390_local_interrupt *li;
27 int cpuflags;
28 int rc;
29 int ext_call_pending;
30
31 li = &dst_vcpu->arch.local_int;
32
33 cpuflags = atomic_read(li->cpuflags);
34 ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu);
35 if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending)
36 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
37 else {
38 *reg &= 0xffffffff00000000UL;
39 if (ext_call_pending)
40 *reg |= SIGP_STATUS_EXT_CALL_PENDING;
41 if (cpuflags & CPUSTAT_STOPPED)
42 *reg |= SIGP_STATUS_STOPPED;
43 rc = SIGP_CC_STATUS_STORED;
44 }
45
46 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id,
47 rc);
48 return rc;
49 }
50
__inject_sigp_emergency(struct kvm_vcpu * vcpu,struct kvm_vcpu * dst_vcpu)51 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
52 struct kvm_vcpu *dst_vcpu)
53 {
54 struct kvm_s390_irq irq = {
55 .type = KVM_S390_INT_EMERGENCY,
56 .u.emerg.code = vcpu->vcpu_id,
57 };
58 int rc = 0;
59
60 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
61 if (!rc)
62 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x",
63 dst_vcpu->vcpu_id);
64
65 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
66 }
67
__sigp_emergency(struct kvm_vcpu * vcpu,struct kvm_vcpu * dst_vcpu)68 static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
69 {
70 return __inject_sigp_emergency(vcpu, dst_vcpu);
71 }
72
__sigp_conditional_emergency(struct kvm_vcpu * vcpu,struct kvm_vcpu * dst_vcpu,u16 asn,u64 * reg)73 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
74 struct kvm_vcpu *dst_vcpu,
75 u16 asn, u64 *reg)
76 {
77 const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
78 u16 p_asn, s_asn;
79 psw_t *psw;
80 bool idle;
81
82 idle = is_vcpu_idle(vcpu);
83 psw = &dst_vcpu->arch.sie_block->gpsw;
84 p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
85 s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
86
87 /* Inject the emergency signal? */
88 if (!is_vcpu_stopped(vcpu)
89 || (psw->mask & psw_int_mask) != psw_int_mask
90 || (idle && psw->addr != 0)
91 || (!idle && (asn == p_asn || asn == s_asn))) {
92 return __inject_sigp_emergency(vcpu, dst_vcpu);
93 } else {
94 *reg &= 0xffffffff00000000UL;
95 *reg |= SIGP_STATUS_INCORRECT_STATE;
96 return SIGP_CC_STATUS_STORED;
97 }
98 }
99
__sigp_external_call(struct kvm_vcpu * vcpu,struct kvm_vcpu * dst_vcpu,u64 * reg)100 static int __sigp_external_call(struct kvm_vcpu *vcpu,
101 struct kvm_vcpu *dst_vcpu, u64 *reg)
102 {
103 struct kvm_s390_irq irq = {
104 .type = KVM_S390_INT_EXTERNAL_CALL,
105 .u.extcall.code = vcpu->vcpu_id,
106 };
107 int rc;
108
109 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
110 if (rc == -EBUSY) {
111 *reg &= 0xffffffff00000000UL;
112 *reg |= SIGP_STATUS_EXT_CALL_PENDING;
113 return SIGP_CC_STATUS_STORED;
114 } else if (rc == 0) {
115 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
116 dst_vcpu->vcpu_id);
117 }
118
119 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
120 }
121
__sigp_stop(struct kvm_vcpu * vcpu,struct kvm_vcpu * dst_vcpu)122 static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
123 {
124 struct kvm_s390_irq irq = {
125 .type = KVM_S390_SIGP_STOP,
126 };
127 int rc;
128
129 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
130 if (rc == -EBUSY)
131 rc = SIGP_CC_BUSY;
132 else if (rc == 0)
133 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x",
134 dst_vcpu->vcpu_id);
135
136 return rc;
137 }
138
__sigp_stop_and_store_status(struct kvm_vcpu * vcpu,struct kvm_vcpu * dst_vcpu,u64 * reg)139 static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
140 struct kvm_vcpu *dst_vcpu, u64 *reg)
141 {
142 struct kvm_s390_irq irq = {
143 .type = KVM_S390_SIGP_STOP,
144 .u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS,
145 };
146 int rc;
147
148 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
149 if (rc == -EBUSY)
150 rc = SIGP_CC_BUSY;
151 else if (rc == 0)
152 VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
153 dst_vcpu->vcpu_id);
154
155 return rc;
156 }
157
__sigp_set_arch(struct kvm_vcpu * vcpu,u32 parameter,u64 * status_reg)158 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter,
159 u64 *status_reg)
160 {
161 unsigned int i;
162 struct kvm_vcpu *v;
163 bool all_stopped = true;
164
165 kvm_for_each_vcpu(i, v, vcpu->kvm) {
166 if (v == vcpu)
167 continue;
168 if (!is_vcpu_stopped(v))
169 all_stopped = false;
170 }
171
172 *status_reg &= 0xffffffff00000000UL;
173
174 /* Reject set arch order, with czam we're always in z/Arch mode. */
175 *status_reg |= (all_stopped ? SIGP_STATUS_INVALID_PARAMETER :
176 SIGP_STATUS_INCORRECT_STATE);
177 return SIGP_CC_STATUS_STORED;
178 }
179
__sigp_set_prefix(struct kvm_vcpu * vcpu,struct kvm_vcpu * dst_vcpu,u32 address,u64 * reg)180 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
181 u32 address, u64 *reg)
182 {
183 struct kvm_s390_irq irq = {
184 .type = KVM_S390_SIGP_SET_PREFIX,
185 .u.prefix.address = address & 0x7fffe000u,
186 };
187 int rc;
188
189 /*
190 * Make sure the new value is valid memory. We only need to check the
191 * first page, since address is 8k aligned and memory pieces are always
192 * at least 1MB aligned and have at least a size of 1MB.
193 */
194 if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
195 *reg &= 0xffffffff00000000UL;
196 *reg |= SIGP_STATUS_INVALID_PARAMETER;
197 return SIGP_CC_STATUS_STORED;
198 }
199
200 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
201 if (rc == -EBUSY) {
202 *reg &= 0xffffffff00000000UL;
203 *reg |= SIGP_STATUS_INCORRECT_STATE;
204 return SIGP_CC_STATUS_STORED;
205 }
206
207 return rc;
208 }
209
__sigp_store_status_at_addr(struct kvm_vcpu * vcpu,struct kvm_vcpu * dst_vcpu,u32 addr,u64 * reg)210 static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
211 struct kvm_vcpu *dst_vcpu,
212 u32 addr, u64 *reg)
213 {
214 int flags;
215 int rc;
216
217 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
218 if (!(flags & CPUSTAT_STOPPED)) {
219 *reg &= 0xffffffff00000000UL;
220 *reg |= SIGP_STATUS_INCORRECT_STATE;
221 return SIGP_CC_STATUS_STORED;
222 }
223
224 addr &= 0x7ffffe00;
225 rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
226 if (rc == -EFAULT) {
227 *reg &= 0xffffffff00000000UL;
228 *reg |= SIGP_STATUS_INVALID_PARAMETER;
229 rc = SIGP_CC_STATUS_STORED;
230 }
231 return rc;
232 }
233
__sigp_sense_running(struct kvm_vcpu * vcpu,struct kvm_vcpu * dst_vcpu,u64 * reg)234 static int __sigp_sense_running(struct kvm_vcpu *vcpu,
235 struct kvm_vcpu *dst_vcpu, u64 *reg)
236 {
237 struct kvm_s390_local_interrupt *li;
238 int rc;
239
240 if (!test_kvm_facility(vcpu->kvm, 9)) {
241 *reg &= 0xffffffff00000000UL;
242 *reg |= SIGP_STATUS_INVALID_ORDER;
243 return SIGP_CC_STATUS_STORED;
244 }
245
246 li = &dst_vcpu->arch.local_int;
247 if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
248 /* running */
249 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
250 } else {
251 /* not running */
252 *reg &= 0xffffffff00000000UL;
253 *reg |= SIGP_STATUS_NOT_RUNNING;
254 rc = SIGP_CC_STATUS_STORED;
255 }
256
257 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x",
258 dst_vcpu->vcpu_id, rc);
259
260 return rc;
261 }
262
__prepare_sigp_re_start(struct kvm_vcpu * vcpu,struct kvm_vcpu * dst_vcpu,u8 order_code)263 static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
264 struct kvm_vcpu *dst_vcpu, u8 order_code)
265 {
266 struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
267 /* handle (RE)START in user space */
268 int rc = -EOPNOTSUPP;
269
270 /* make sure we don't race with STOP irq injection */
271 spin_lock(&li->lock);
272 if (kvm_s390_is_stop_irq_pending(dst_vcpu))
273 rc = SIGP_CC_BUSY;
274 spin_unlock(&li->lock);
275
276 return rc;
277 }
278
__prepare_sigp_cpu_reset(struct kvm_vcpu * vcpu,struct kvm_vcpu * dst_vcpu,u8 order_code)279 static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu,
280 struct kvm_vcpu *dst_vcpu, u8 order_code)
281 {
282 /* handle (INITIAL) CPU RESET in user space */
283 return -EOPNOTSUPP;
284 }
285
__prepare_sigp_unknown(struct kvm_vcpu * vcpu,struct kvm_vcpu * dst_vcpu)286 static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu,
287 struct kvm_vcpu *dst_vcpu)
288 {
289 /* handle unknown orders in user space */
290 return -EOPNOTSUPP;
291 }
292
handle_sigp_dst(struct kvm_vcpu * vcpu,u8 order_code,u16 cpu_addr,u32 parameter,u64 * status_reg)293 static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
294 u16 cpu_addr, u32 parameter, u64 *status_reg)
295 {
296 int rc;
297 struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
298
299 if (!dst_vcpu)
300 return SIGP_CC_NOT_OPERATIONAL;
301
302 switch (order_code) {
303 case SIGP_SENSE:
304 vcpu->stat.instruction_sigp_sense++;
305 rc = __sigp_sense(vcpu, dst_vcpu, status_reg);
306 break;
307 case SIGP_EXTERNAL_CALL:
308 vcpu->stat.instruction_sigp_external_call++;
309 rc = __sigp_external_call(vcpu, dst_vcpu, status_reg);
310 break;
311 case SIGP_EMERGENCY_SIGNAL:
312 vcpu->stat.instruction_sigp_emergency++;
313 rc = __sigp_emergency(vcpu, dst_vcpu);
314 break;
315 case SIGP_STOP:
316 vcpu->stat.instruction_sigp_stop++;
317 rc = __sigp_stop(vcpu, dst_vcpu);
318 break;
319 case SIGP_STOP_AND_STORE_STATUS:
320 vcpu->stat.instruction_sigp_stop_store_status++;
321 rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg);
322 break;
323 case SIGP_STORE_STATUS_AT_ADDRESS:
324 vcpu->stat.instruction_sigp_store_status++;
325 rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter,
326 status_reg);
327 break;
328 case SIGP_SET_PREFIX:
329 vcpu->stat.instruction_sigp_prefix++;
330 rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg);
331 break;
332 case SIGP_COND_EMERGENCY_SIGNAL:
333 vcpu->stat.instruction_sigp_cond_emergency++;
334 rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter,
335 status_reg);
336 break;
337 case SIGP_SENSE_RUNNING:
338 vcpu->stat.instruction_sigp_sense_running++;
339 rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg);
340 break;
341 case SIGP_START:
342 vcpu->stat.instruction_sigp_start++;
343 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
344 break;
345 case SIGP_RESTART:
346 vcpu->stat.instruction_sigp_restart++;
347 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
348 break;
349 case SIGP_INITIAL_CPU_RESET:
350 vcpu->stat.instruction_sigp_init_cpu_reset++;
351 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
352 break;
353 case SIGP_CPU_RESET:
354 vcpu->stat.instruction_sigp_cpu_reset++;
355 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
356 break;
357 default:
358 vcpu->stat.instruction_sigp_unknown++;
359 rc = __prepare_sigp_unknown(vcpu, dst_vcpu);
360 }
361
362 if (rc == -EOPNOTSUPP)
363 VCPU_EVENT(vcpu, 4,
364 "sigp order %u -> cpu %x: handled in user space",
365 order_code, dst_vcpu->vcpu_id);
366
367 return rc;
368 }
369
handle_sigp_order_in_user_space(struct kvm_vcpu * vcpu,u8 order_code,u16 cpu_addr)370 static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code,
371 u16 cpu_addr)
372 {
373 if (!vcpu->kvm->arch.user_sigp)
374 return 0;
375
376 switch (order_code) {
377 case SIGP_SENSE:
378 case SIGP_EXTERNAL_CALL:
379 case SIGP_EMERGENCY_SIGNAL:
380 case SIGP_COND_EMERGENCY_SIGNAL:
381 case SIGP_SENSE_RUNNING:
382 return 0;
383 /* update counters as we're directly dropping to user space */
384 case SIGP_STOP:
385 vcpu->stat.instruction_sigp_stop++;
386 break;
387 case SIGP_STOP_AND_STORE_STATUS:
388 vcpu->stat.instruction_sigp_stop_store_status++;
389 break;
390 case SIGP_STORE_STATUS_AT_ADDRESS:
391 vcpu->stat.instruction_sigp_store_status++;
392 break;
393 case SIGP_STORE_ADDITIONAL_STATUS:
394 vcpu->stat.instruction_sigp_store_adtl_status++;
395 break;
396 case SIGP_SET_PREFIX:
397 vcpu->stat.instruction_sigp_prefix++;
398 break;
399 case SIGP_START:
400 vcpu->stat.instruction_sigp_start++;
401 break;
402 case SIGP_RESTART:
403 vcpu->stat.instruction_sigp_restart++;
404 break;
405 case SIGP_INITIAL_CPU_RESET:
406 vcpu->stat.instruction_sigp_init_cpu_reset++;
407 break;
408 case SIGP_CPU_RESET:
409 vcpu->stat.instruction_sigp_cpu_reset++;
410 break;
411 default:
412 vcpu->stat.instruction_sigp_unknown++;
413 }
414 VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace",
415 order_code, cpu_addr);
416
417 return 1;
418 }
419
kvm_s390_handle_sigp(struct kvm_vcpu * vcpu)420 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
421 {
422 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
423 int r3 = vcpu->arch.sie_block->ipa & 0x000f;
424 u32 parameter;
425 u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
426 u8 order_code;
427 int rc;
428
429 /* sigp in userspace can exit */
430 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
431 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
432
433 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
434 if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr))
435 return -EOPNOTSUPP;
436
437 if (r1 % 2)
438 parameter = vcpu->run->s.regs.gprs[r1];
439 else
440 parameter = vcpu->run->s.regs.gprs[r1 + 1];
441
442 trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
443 switch (order_code) {
444 case SIGP_SET_ARCHITECTURE:
445 vcpu->stat.instruction_sigp_arch++;
446 rc = __sigp_set_arch(vcpu, parameter,
447 &vcpu->run->s.regs.gprs[r1]);
448 break;
449 default:
450 rc = handle_sigp_dst(vcpu, order_code, cpu_addr,
451 parameter,
452 &vcpu->run->s.regs.gprs[r1]);
453 }
454
455 if (rc < 0)
456 return rc;
457
458 kvm_s390_set_psw_cc(vcpu, rc);
459 return 0;
460 }
461
462 /*
463 * Handle SIGP partial execution interception.
464 *
465 * This interception will occur at the source cpu when a source cpu sends an
466 * external call to a target cpu and the target cpu has the WAIT bit set in
467 * its cpuflags. Interception will occurr after the interrupt indicator bits at
468 * the target cpu have been set. All error cases will lead to instruction
469 * interception, therefore nothing is to be checked or prepared.
470 */
kvm_s390_handle_sigp_pei(struct kvm_vcpu * vcpu)471 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
472 {
473 int r3 = vcpu->arch.sie_block->ipa & 0x000f;
474 u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
475 struct kvm_vcpu *dest_vcpu;
476 u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
477
478 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
479
480 if (order_code == SIGP_EXTERNAL_CALL) {
481 dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
482 BUG_ON(dest_vcpu == NULL);
483
484 kvm_s390_vcpu_wakeup(dest_vcpu);
485 kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
486 return 0;
487 }
488
489 return -EOPNOTSUPP;
490 }
491