1 /*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008,2009
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/switch_to.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
75 { "diagnose_10", VCPU_STAT(diagnose_10) },
76 { "diagnose_44", VCPU_STAT(diagnose_44) },
77 { NULL }
78 };
79
80 static unsigned long long *facilities;
81
82 /* Section: not file related */
kvm_arch_hardware_enable(void * garbage)83 int kvm_arch_hardware_enable(void *garbage)
84 {
85 /* every s390 is virtualization enabled ;-) */
86 return 0;
87 }
88
kvm_arch_hardware_disable(void * garbage)89 void kvm_arch_hardware_disable(void *garbage)
90 {
91 }
92
kvm_arch_hardware_setup(void)93 int kvm_arch_hardware_setup(void)
94 {
95 return 0;
96 }
97
kvm_arch_hardware_unsetup(void)98 void kvm_arch_hardware_unsetup(void)
99 {
100 }
101
kvm_arch_check_processor_compat(void * rtn)102 void kvm_arch_check_processor_compat(void *rtn)
103 {
104 }
105
kvm_arch_init(void * opaque)106 int kvm_arch_init(void *opaque)
107 {
108 return 0;
109 }
110
kvm_arch_exit(void)111 void kvm_arch_exit(void)
112 {
113 }
114
115 /* Section: device related */
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)116 long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
118 {
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
121 return -EINVAL;
122 }
123
kvm_dev_ioctl_check_extension(long ext)124 int kvm_dev_ioctl_check_extension(long ext)
125 {
126 int r;
127
128 switch (ext) {
129 case KVM_CAP_S390_PSW:
130 case KVM_CAP_S390_GMAP:
131 case KVM_CAP_SYNC_MMU:
132 #ifdef CONFIG_KVM_S390_UCONTROL
133 case KVM_CAP_S390_UCONTROL:
134 #endif
135 case KVM_CAP_SYNC_REGS:
136 r = 1;
137 break;
138 default:
139 r = 0;
140 }
141 return r;
142 }
143
144 /* Section: vm related */
145 /*
146 * Get (and clear) the dirty memory log for a memory slot.
147 */
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)148 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
149 struct kvm_dirty_log *log)
150 {
151 return 0;
152 }
153
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)154 long kvm_arch_vm_ioctl(struct file *filp,
155 unsigned int ioctl, unsigned long arg)
156 {
157 struct kvm *kvm = filp->private_data;
158 void __user *argp = (void __user *)arg;
159 int r;
160
161 switch (ioctl) {
162 case KVM_S390_INTERRUPT: {
163 struct kvm_s390_interrupt s390int;
164
165 r = -EFAULT;
166 if (copy_from_user(&s390int, argp, sizeof(s390int)))
167 break;
168 r = kvm_s390_inject_vm(kvm, &s390int);
169 break;
170 }
171 default:
172 r = -ENOTTY;
173 }
174
175 return r;
176 }
177
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)178 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
179 {
180 int rc;
181 char debug_name[16];
182
183 rc = -EINVAL;
184 #ifdef CONFIG_KVM_S390_UCONTROL
185 if (type & ~KVM_VM_S390_UCONTROL)
186 goto out_err;
187 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
188 goto out_err;
189 #else
190 if (type)
191 goto out_err;
192 #endif
193
194 rc = s390_enable_sie();
195 if (rc)
196 goto out_err;
197
198 rc = -ENOMEM;
199
200 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
201 if (!kvm->arch.sca)
202 goto out_err;
203
204 sprintf(debug_name, "kvm-%u", current->pid);
205
206 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
207 if (!kvm->arch.dbf)
208 goto out_nodbf;
209
210 spin_lock_init(&kvm->arch.float_int.lock);
211 INIT_LIST_HEAD(&kvm->arch.float_int.list);
212
213 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
214 VM_EVENT(kvm, 3, "%s", "vm created");
215
216 if (type & KVM_VM_S390_UCONTROL) {
217 kvm->arch.gmap = NULL;
218 } else {
219 kvm->arch.gmap = gmap_alloc(current->mm);
220 if (!kvm->arch.gmap)
221 goto out_nogmap;
222 }
223 return 0;
224 out_nogmap:
225 debug_unregister(kvm->arch.dbf);
226 out_nodbf:
227 free_page((unsigned long)(kvm->arch.sca));
228 out_err:
229 return rc;
230 }
231
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)232 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
233 {
234 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
235 if (!kvm_is_ucontrol(vcpu->kvm)) {
236 clear_bit(63 - vcpu->vcpu_id,
237 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
238 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
239 (__u64) vcpu->arch.sie_block)
240 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
241 }
242 smp_mb();
243
244 if (kvm_is_ucontrol(vcpu->kvm))
245 gmap_free(vcpu->arch.gmap);
246
247 free_page((unsigned long)(vcpu->arch.sie_block));
248 kvm_vcpu_uninit(vcpu);
249 kfree(vcpu);
250 }
251
kvm_free_vcpus(struct kvm * kvm)252 static void kvm_free_vcpus(struct kvm *kvm)
253 {
254 unsigned int i;
255 struct kvm_vcpu *vcpu;
256
257 kvm_for_each_vcpu(i, vcpu, kvm)
258 kvm_arch_vcpu_destroy(vcpu);
259
260 mutex_lock(&kvm->lock);
261 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
262 kvm->vcpus[i] = NULL;
263
264 atomic_set(&kvm->online_vcpus, 0);
265 mutex_unlock(&kvm->lock);
266 }
267
kvm_arch_sync_events(struct kvm * kvm)268 void kvm_arch_sync_events(struct kvm *kvm)
269 {
270 }
271
kvm_arch_destroy_vm(struct kvm * kvm)272 void kvm_arch_destroy_vm(struct kvm *kvm)
273 {
274 kvm_free_vcpus(kvm);
275 free_page((unsigned long)(kvm->arch.sca));
276 debug_unregister(kvm->arch.dbf);
277 if (!kvm_is_ucontrol(kvm))
278 gmap_free(kvm->arch.gmap);
279 }
280
281 /* Section: vcpu related */
kvm_arch_vcpu_init(struct kvm_vcpu * vcpu)282 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
283 {
284 if (kvm_is_ucontrol(vcpu->kvm)) {
285 vcpu->arch.gmap = gmap_alloc(current->mm);
286 if (!vcpu->arch.gmap)
287 return -ENOMEM;
288 return 0;
289 }
290
291 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
292 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
293 KVM_SYNC_GPRS |
294 KVM_SYNC_ACRS |
295 KVM_SYNC_CRS;
296 return 0;
297 }
298
kvm_arch_vcpu_uninit(struct kvm_vcpu * vcpu)299 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
300 {
301 /* Nothing todo */
302 }
303
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)304 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
305 {
306 save_fp_regs(&vcpu->arch.host_fpregs);
307 save_access_regs(vcpu->arch.host_acrs);
308 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
309 restore_fp_regs(&vcpu->arch.guest_fpregs);
310 restore_access_regs(vcpu->run->s.regs.acrs);
311 gmap_enable(vcpu->arch.gmap);
312 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
313 }
314
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)315 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
316 {
317 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
318 gmap_disable(vcpu->arch.gmap);
319 save_fp_regs(&vcpu->arch.guest_fpregs);
320 save_access_regs(vcpu->run->s.regs.acrs);
321 restore_fp_regs(&vcpu->arch.host_fpregs);
322 restore_access_regs(vcpu->arch.host_acrs);
323 }
324
kvm_s390_vcpu_initial_reset(struct kvm_vcpu * vcpu)325 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
326 {
327 /* this equals initial cpu reset in pop, but we don't switch to ESA */
328 vcpu->arch.sie_block->gpsw.mask = 0UL;
329 vcpu->arch.sie_block->gpsw.addr = 0UL;
330 kvm_s390_set_prefix(vcpu, 0);
331 vcpu->arch.sie_block->cputm = 0UL;
332 vcpu->arch.sie_block->ckc = 0UL;
333 vcpu->arch.sie_block->todpr = 0;
334 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
335 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
336 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
337 vcpu->arch.guest_fpregs.fpc = 0;
338 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
339 vcpu->arch.sie_block->gbea = 1;
340 }
341
kvm_arch_vcpu_setup(struct kvm_vcpu * vcpu)342 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
343 {
344 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
345 CPUSTAT_SM |
346 CPUSTAT_STOPPED);
347 vcpu->arch.sie_block->ecb = 6;
348 vcpu->arch.sie_block->eca = 0xC1002001U;
349 vcpu->arch.sie_block->fac = (int) (long) facilities;
350 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
351 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
352 (unsigned long) vcpu);
353 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
354 get_cpu_id(&vcpu->arch.cpu_id);
355 vcpu->arch.cpu_id.version = 0xff;
356 return 0;
357 }
358
kvm_arch_vcpu_create(struct kvm * kvm,unsigned int id)359 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
360 unsigned int id)
361 {
362 struct kvm_vcpu *vcpu;
363 int rc = -EINVAL;
364
365 if (id >= KVM_MAX_VCPUS)
366 goto out;
367
368 rc = -ENOMEM;
369
370 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
371 if (!vcpu)
372 goto out;
373
374 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
375 get_zeroed_page(GFP_KERNEL);
376
377 if (!vcpu->arch.sie_block)
378 goto out_free_cpu;
379
380 vcpu->arch.sie_block->icpua = id;
381 if (!kvm_is_ucontrol(kvm)) {
382 if (!kvm->arch.sca) {
383 WARN_ON_ONCE(1);
384 goto out_free_cpu;
385 }
386 if (!kvm->arch.sca->cpu[id].sda)
387 kvm->arch.sca->cpu[id].sda =
388 (__u64) vcpu->arch.sie_block;
389 vcpu->arch.sie_block->scaoh =
390 (__u32)(((__u64)kvm->arch.sca) >> 32);
391 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
392 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
393 }
394
395 spin_lock_init(&vcpu->arch.local_int.lock);
396 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
397 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
398 spin_lock(&kvm->arch.float_int.lock);
399 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
400 init_waitqueue_head(&vcpu->arch.local_int.wq);
401 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
402 spin_unlock(&kvm->arch.float_int.lock);
403
404 rc = kvm_vcpu_init(vcpu, kvm, id);
405 if (rc)
406 goto out_free_sie_block;
407 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
408 vcpu->arch.sie_block);
409
410 return vcpu;
411 out_free_sie_block:
412 free_page((unsigned long)(vcpu->arch.sie_block));
413 out_free_cpu:
414 kfree(vcpu);
415 out:
416 return ERR_PTR(rc);
417 }
418
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)419 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
420 {
421 /* kvm common code refers to this, but never calls it */
422 BUG();
423 return 0;
424 }
425
kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu * vcpu)426 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
427 {
428 kvm_s390_vcpu_initial_reset(vcpu);
429 return 0;
430 }
431
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)432 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
433 {
434 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
435 return 0;
436 }
437
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)438 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
439 {
440 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
441 return 0;
442 }
443
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)444 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
445 struct kvm_sregs *sregs)
446 {
447 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
448 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
449 restore_access_regs(vcpu->run->s.regs.acrs);
450 return 0;
451 }
452
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)453 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
454 struct kvm_sregs *sregs)
455 {
456 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
457 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
458 return 0;
459 }
460
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)461 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
462 {
463 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
464 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
465 restore_fp_regs(&vcpu->arch.guest_fpregs);
466 return 0;
467 }
468
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)469 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
470 {
471 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
472 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
473 return 0;
474 }
475
kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu * vcpu,psw_t psw)476 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
477 {
478 int rc = 0;
479
480 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
481 rc = -EBUSY;
482 else {
483 vcpu->run->psw_mask = psw.mask;
484 vcpu->run->psw_addr = psw.addr;
485 }
486 return rc;
487 }
488
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)489 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
490 struct kvm_translation *tr)
491 {
492 return -EINVAL; /* not implemented yet */
493 }
494
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)495 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
496 struct kvm_guest_debug *dbg)
497 {
498 return -EINVAL; /* not implemented yet */
499 }
500
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)501 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
502 struct kvm_mp_state *mp_state)
503 {
504 return -EINVAL; /* not implemented yet */
505 }
506
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)507 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
508 struct kvm_mp_state *mp_state)
509 {
510 return -EINVAL; /* not implemented yet */
511 }
512
__vcpu_run(struct kvm_vcpu * vcpu)513 static int __vcpu_run(struct kvm_vcpu *vcpu)
514 {
515 int rc;
516
517 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
518
519 if (need_resched())
520 schedule();
521
522 if (test_thread_flag(TIF_MCCK_PENDING))
523 s390_handle_mcck();
524
525 if (!kvm_is_ucontrol(vcpu->kvm))
526 kvm_s390_deliver_pending_interrupts(vcpu);
527
528 vcpu->arch.sie_block->icptcode = 0;
529 local_irq_disable();
530 kvm_guest_enter();
531 local_irq_enable();
532 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
533 atomic_read(&vcpu->arch.sie_block->cpuflags));
534 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
535 if (rc) {
536 if (kvm_is_ucontrol(vcpu->kvm)) {
537 rc = SIE_INTERCEPT_UCONTROL;
538 } else {
539 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
540 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
541 rc = 0;
542 }
543 }
544 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
545 vcpu->arch.sie_block->icptcode);
546 local_irq_disable();
547 kvm_guest_exit();
548 local_irq_enable();
549
550 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
551 return rc;
552 }
553
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu,struct kvm_run * kvm_run)554 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
555 {
556 int rc;
557 sigset_t sigsaved;
558
559 rerun_vcpu:
560 if (vcpu->sigset_active)
561 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
562
563 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
564
565 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
566
567 switch (kvm_run->exit_reason) {
568 case KVM_EXIT_S390_SIEIC:
569 case KVM_EXIT_UNKNOWN:
570 case KVM_EXIT_INTR:
571 case KVM_EXIT_S390_RESET:
572 case KVM_EXIT_S390_UCONTROL:
573 break;
574 default:
575 BUG();
576 }
577
578 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
579 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
580 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
581 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
582 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
583 }
584 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
585 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
586 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
587 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
588 }
589
590 might_fault();
591
592 do {
593 rc = __vcpu_run(vcpu);
594 if (rc)
595 break;
596 if (kvm_is_ucontrol(vcpu->kvm))
597 rc = -EOPNOTSUPP;
598 else
599 rc = kvm_handle_sie_intercept(vcpu);
600 } while (!signal_pending(current) && !rc);
601
602 if (rc == SIE_INTERCEPT_RERUNVCPU)
603 goto rerun_vcpu;
604
605 if (signal_pending(current) && !rc) {
606 kvm_run->exit_reason = KVM_EXIT_INTR;
607 rc = -EINTR;
608 }
609
610 #ifdef CONFIG_KVM_S390_UCONTROL
611 if (rc == SIE_INTERCEPT_UCONTROL) {
612 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
613 kvm_run->s390_ucontrol.trans_exc_code =
614 current->thread.gmap_addr;
615 kvm_run->s390_ucontrol.pgm_code = 0x10;
616 rc = 0;
617 }
618 #endif
619
620 if (rc == -EOPNOTSUPP) {
621 /* intercept cannot be handled in-kernel, prepare kvm-run */
622 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
623 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
624 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
625 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
626 rc = 0;
627 }
628
629 if (rc == -EREMOTE) {
630 /* intercept was handled, but userspace support is needed
631 * kvm_run has been prepared by the handler */
632 rc = 0;
633 }
634
635 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
636 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
637 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
638 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
639
640 if (vcpu->sigset_active)
641 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
642
643 vcpu->stat.exit_userspace++;
644 return rc;
645 }
646
__guestcopy(struct kvm_vcpu * vcpu,u64 guestdest,void * from,unsigned long n,int prefix)647 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
648 unsigned long n, int prefix)
649 {
650 if (prefix)
651 return copy_to_guest(vcpu, guestdest, from, n);
652 else
653 return copy_to_guest_absolute(vcpu, guestdest, from, n);
654 }
655
656 /*
657 * store status at address
658 * we use have two special cases:
659 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
660 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
661 */
kvm_s390_vcpu_store_status(struct kvm_vcpu * vcpu,unsigned long addr)662 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
663 {
664 unsigned char archmode = 1;
665 int prefix;
666
667 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
668 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
669 return -EFAULT;
670 addr = SAVE_AREA_BASE;
671 prefix = 0;
672 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
673 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
674 return -EFAULT;
675 addr = SAVE_AREA_BASE;
676 prefix = 1;
677 } else
678 prefix = 0;
679
680 /*
681 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
682 * copying in vcpu load/put. Lets update our copies before we save
683 * it into the save area
684 */
685 save_fp_regs(&vcpu->arch.guest_fpregs);
686 save_access_regs(vcpu->run->s.regs.acrs);
687
688 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
689 vcpu->arch.guest_fpregs.fprs, 128, prefix))
690 return -EFAULT;
691
692 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
693 vcpu->run->s.regs.gprs, 128, prefix))
694 return -EFAULT;
695
696 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
697 &vcpu->arch.sie_block->gpsw, 16, prefix))
698 return -EFAULT;
699
700 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
701 &vcpu->arch.sie_block->prefix, 4, prefix))
702 return -EFAULT;
703
704 if (__guestcopy(vcpu,
705 addr + offsetof(struct save_area, fp_ctrl_reg),
706 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
707 return -EFAULT;
708
709 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
710 &vcpu->arch.sie_block->todpr, 4, prefix))
711 return -EFAULT;
712
713 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
714 &vcpu->arch.sie_block->cputm, 8, prefix))
715 return -EFAULT;
716
717 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
718 &vcpu->arch.sie_block->ckc, 8, prefix))
719 return -EFAULT;
720
721 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
722 &vcpu->run->s.regs.acrs, 64, prefix))
723 return -EFAULT;
724
725 if (__guestcopy(vcpu,
726 addr + offsetof(struct save_area, ctrl_regs),
727 &vcpu->arch.sie_block->gcr, 128, prefix))
728 return -EFAULT;
729 return 0;
730 }
731
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)732 long kvm_arch_vcpu_ioctl(struct file *filp,
733 unsigned int ioctl, unsigned long arg)
734 {
735 struct kvm_vcpu *vcpu = filp->private_data;
736 void __user *argp = (void __user *)arg;
737 long r;
738
739 switch (ioctl) {
740 case KVM_S390_INTERRUPT: {
741 struct kvm_s390_interrupt s390int;
742
743 r = -EFAULT;
744 if (copy_from_user(&s390int, argp, sizeof(s390int)))
745 break;
746 r = kvm_s390_inject_vcpu(vcpu, &s390int);
747 break;
748 }
749 case KVM_S390_STORE_STATUS:
750 r = kvm_s390_vcpu_store_status(vcpu, arg);
751 break;
752 case KVM_S390_SET_INITIAL_PSW: {
753 psw_t psw;
754
755 r = -EFAULT;
756 if (copy_from_user(&psw, argp, sizeof(psw)))
757 break;
758 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
759 break;
760 }
761 case KVM_S390_INITIAL_RESET:
762 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
763 break;
764 #ifdef CONFIG_KVM_S390_UCONTROL
765 case KVM_S390_UCAS_MAP: {
766 struct kvm_s390_ucas_mapping ucasmap;
767
768 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
769 r = -EFAULT;
770 break;
771 }
772
773 if (!kvm_is_ucontrol(vcpu->kvm)) {
774 r = -EINVAL;
775 break;
776 }
777
778 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
779 ucasmap.vcpu_addr, ucasmap.length);
780 break;
781 }
782 case KVM_S390_UCAS_UNMAP: {
783 struct kvm_s390_ucas_mapping ucasmap;
784
785 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
786 r = -EFAULT;
787 break;
788 }
789
790 if (!kvm_is_ucontrol(vcpu->kvm)) {
791 r = -EINVAL;
792 break;
793 }
794
795 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
796 ucasmap.length);
797 break;
798 }
799 #endif
800 case KVM_S390_VCPU_FAULT: {
801 r = gmap_fault(arg, vcpu->arch.gmap);
802 if (!IS_ERR_VALUE(r))
803 r = 0;
804 break;
805 }
806 default:
807 r = -ENOTTY;
808 }
809 return r;
810 }
811
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)812 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
813 {
814 #ifdef CONFIG_KVM_S390_UCONTROL
815 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
816 && (kvm_is_ucontrol(vcpu->kvm))) {
817 vmf->page = virt_to_page(vcpu->arch.sie_block);
818 get_page(vmf->page);
819 return 0;
820 }
821 #endif
822 return VM_FAULT_SIGBUS;
823 }
824
kvm_arch_free_memslot(struct kvm_memory_slot * free,struct kvm_memory_slot * dont)825 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
826 struct kvm_memory_slot *dont)
827 {
828 }
829
kvm_arch_create_memslot(struct kvm_memory_slot * slot,unsigned long npages)830 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
831 {
832 return 0;
833 }
834
835 /* Section: memory related */
kvm_arch_prepare_memory_region(struct kvm * kvm,struct kvm_memory_slot * memslot,struct kvm_memory_slot old,struct kvm_userspace_memory_region * mem,int user_alloc)836 int kvm_arch_prepare_memory_region(struct kvm *kvm,
837 struct kvm_memory_slot *memslot,
838 struct kvm_memory_slot old,
839 struct kvm_userspace_memory_region *mem,
840 int user_alloc)
841 {
842 /* A few sanity checks. We can have exactly one memory slot which has
843 to start at guest virtual zero and which has to be located at a
844 page boundary in userland and which has to end at a page boundary.
845 The memory in userland is ok to be fragmented into various different
846 vmas. It is okay to mmap() and munmap() stuff in this slot after
847 doing this call at any time */
848
849 if (mem->slot)
850 return -EINVAL;
851
852 if (mem->guest_phys_addr)
853 return -EINVAL;
854
855 if (mem->userspace_addr & 0xffffful)
856 return -EINVAL;
857
858 if (mem->memory_size & 0xffffful)
859 return -EINVAL;
860
861 if (!user_alloc)
862 return -EINVAL;
863
864 return 0;
865 }
866
kvm_arch_commit_memory_region(struct kvm * kvm,struct kvm_userspace_memory_region * mem,struct kvm_memory_slot old,int user_alloc)867 void kvm_arch_commit_memory_region(struct kvm *kvm,
868 struct kvm_userspace_memory_region *mem,
869 struct kvm_memory_slot old,
870 int user_alloc)
871 {
872 int rc;
873
874
875 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
876 mem->guest_phys_addr, mem->memory_size);
877 if (rc)
878 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
879 return;
880 }
881
kvm_arch_flush_shadow(struct kvm * kvm)882 void kvm_arch_flush_shadow(struct kvm *kvm)
883 {
884 }
885
kvm_s390_init(void)886 static int __init kvm_s390_init(void)
887 {
888 int ret;
889 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
890 if (ret)
891 return ret;
892
893 /*
894 * guests can ask for up to 255+1 double words, we need a full page
895 * to hold the maximum amount of facilities. On the other hand, we
896 * only set facilities that are known to work in KVM.
897 */
898 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
899 if (!facilities) {
900 kvm_exit();
901 return -ENOMEM;
902 }
903 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
904 facilities[0] &= 0xff00fff3f47c0000ULL;
905 facilities[1] &= 0x201c000000000000ULL;
906 return 0;
907 }
908
kvm_s390_exit(void)909 static void __exit kvm_s390_exit(void)
910 {
911 free_page((unsigned long) facilities);
912 kvm_exit();
913 }
914
915 module_init(kvm_s390_init);
916 module_exit(kvm_s390_exit);
917