Lines Matching full:vcpu
34 static int handle_ri(struct kvm_vcpu *vcpu) in handle_ri() argument
36 vcpu->stat.instruction_ri++; in handle_ri()
38 if (test_kvm_facility(vcpu->kvm, 64)) { in handle_ri()
39 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)"); in handle_ri()
40 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in handle_ri()
41 kvm_s390_retry_instr(vcpu); in handle_ri()
44 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); in handle_ri()
47 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu) in kvm_s390_handle_aa() argument
49 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4) in kvm_s390_handle_aa()
50 return handle_ri(vcpu); in kvm_s390_handle_aa()
55 static int handle_gs(struct kvm_vcpu *vcpu) in handle_gs() argument
57 vcpu->stat.instruction_gs++; in handle_gs()
59 if (test_kvm_facility(vcpu->kvm, 133)) { in handle_gs()
60 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)"); in handle_gs()
63 current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb; in handle_gs()
66 vcpu->arch.sie_block->ecb |= ECB_GS; in handle_gs()
67 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in handle_gs()
68 vcpu->arch.gs_enabled = 1; in handle_gs()
69 kvm_s390_retry_instr(vcpu); in handle_gs()
72 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); in handle_gs()
75 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu) in kvm_s390_handle_e3() argument
77 int code = vcpu->arch.sie_block->ipb & 0xff; in kvm_s390_handle_e3()
80 return handle_gs(vcpu); in kvm_s390_handle_e3()
85 static int handle_set_clock(struct kvm_vcpu *vcpu) in handle_set_clock() argument
92 vcpu->stat.instruction_sck++; in handle_set_clock()
94 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_set_clock()
95 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_set_clock()
97 op2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_set_clock()
99 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_set_clock()
100 rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod)); in handle_set_clock()
102 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_set_clock()
104 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod); in handle_set_clock()
105 kvm_s390_set_tod_clock(vcpu->kvm, >od); in handle_set_clock()
107 kvm_s390_set_psw_cc(vcpu, 0); in handle_set_clock()
111 static int handle_set_prefix(struct kvm_vcpu *vcpu) in handle_set_prefix() argument
118 vcpu->stat.instruction_spx++; in handle_set_prefix()
120 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_set_prefix()
121 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_set_prefix()
123 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_set_prefix()
127 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_set_prefix()
130 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); in handle_set_prefix()
132 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_set_prefix()
141 if (kvm_is_error_gpa(vcpu->kvm, address)) in handle_set_prefix()
142 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_set_prefix()
144 kvm_s390_set_prefix(vcpu, address); in handle_set_prefix()
145 trace_kvm_s390_handle_prefix(vcpu, 1, address); in handle_set_prefix()
149 static int handle_store_prefix(struct kvm_vcpu *vcpu) in handle_store_prefix() argument
156 vcpu->stat.instruction_stpx++; in handle_store_prefix()
158 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_store_prefix()
159 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_store_prefix()
161 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_store_prefix()
165 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_store_prefix()
167 address = kvm_s390_get_prefix(vcpu); in handle_store_prefix()
170 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); in handle_store_prefix()
172 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_store_prefix()
174 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2); in handle_store_prefix()
175 trace_kvm_s390_handle_prefix(vcpu, 0, address); in handle_store_prefix()
179 static int handle_store_cpu_address(struct kvm_vcpu *vcpu) in handle_store_cpu_address() argument
181 u16 vcpu_id = vcpu->vcpu_id; in handle_store_cpu_address()
186 vcpu->stat.instruction_stap++; in handle_store_cpu_address()
188 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_store_cpu_address()
189 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_store_cpu_address()
191 ga = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_store_cpu_address()
194 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_store_cpu_address()
196 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); in handle_store_cpu_address()
198 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_store_cpu_address()
200 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga); in handle_store_cpu_address()
201 trace_kvm_s390_handle_stap(vcpu, ga); in handle_store_cpu_address()
205 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu) in kvm_s390_skey_check_enable() argument
209 trace_kvm_s390_skey_related_inst(vcpu); in kvm_s390_skey_check_enable()
211 if (vcpu->arch.skey_enabled) in kvm_s390_skey_check_enable()
215 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc); in kvm_s390_skey_check_enable()
219 if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS)) in kvm_s390_skey_check_enable()
220 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS); in kvm_s390_skey_check_enable()
221 if (!vcpu->kvm->arch.use_skf) in kvm_s390_skey_check_enable()
222 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_s390_skey_check_enable()
224 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); in kvm_s390_skey_check_enable()
225 vcpu->arch.skey_enabled = true; in kvm_s390_skey_check_enable()
229 static int try_handle_skey(struct kvm_vcpu *vcpu) in try_handle_skey() argument
233 rc = kvm_s390_skey_check_enable(vcpu); in try_handle_skey()
236 if (vcpu->kvm->arch.use_skf) { in try_handle_skey()
238 kvm_s390_retry_instr(vcpu); in try_handle_skey()
239 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); in try_handle_skey()
245 static int handle_iske(struct kvm_vcpu *vcpu) in handle_iske() argument
253 vcpu->stat.instruction_iske++; in handle_iske()
255 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_iske()
256 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_iske()
258 rc = try_handle_skey(vcpu); in handle_iske()
262 kvm_s390_get_regs_rre(vcpu, ®1, ®2); in handle_iske()
264 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; in handle_iske()
265 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr); in handle_iske()
266 gaddr = kvm_s390_real_to_abs(vcpu, gaddr); in handle_iske()
267 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr)); in handle_iske()
269 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_iske()
285 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_iske()
288 vcpu->run->s.regs.gprs[reg1] &= ~0xff; in handle_iske()
289 vcpu->run->s.regs.gprs[reg1] |= key; in handle_iske()
293 static int handle_rrbe(struct kvm_vcpu *vcpu) in handle_rrbe() argument
300 vcpu->stat.instruction_rrbe++; in handle_rrbe()
302 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_rrbe()
303 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_rrbe()
305 rc = try_handle_skey(vcpu); in handle_rrbe()
309 kvm_s390_get_regs_rre(vcpu, ®1, ®2); in handle_rrbe()
311 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; in handle_rrbe()
312 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr); in handle_rrbe()
313 gaddr = kvm_s390_real_to_abs(vcpu, gaddr); in handle_rrbe()
314 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr)); in handle_rrbe()
316 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_rrbe()
331 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_rrbe()
334 kvm_s390_set_psw_cc(vcpu, rc); in handle_rrbe()
342 static int handle_sske(struct kvm_vcpu *vcpu) in handle_sske() argument
344 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28; in handle_sske()
351 vcpu->stat.instruction_sske++; in handle_sske()
353 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_sske()
354 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_sske()
356 rc = try_handle_skey(vcpu); in handle_sske()
360 if (!test_kvm_facility(vcpu->kvm, 8)) in handle_sske()
362 if (!test_kvm_facility(vcpu->kvm, 10)) in handle_sske()
364 if (!test_kvm_facility(vcpu->kvm, 14)) in handle_sske()
367 kvm_s390_get_regs_rre(vcpu, ®1, ®2); in handle_sske()
369 key = vcpu->run->s.regs.gprs[reg1] & 0xfe; in handle_sske()
370 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; in handle_sske()
371 start = kvm_s390_logical_to_effective(vcpu, start); in handle_sske()
376 start = kvm_s390_real_to_abs(vcpu, start); in handle_sske()
381 unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); in handle_sske()
385 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_sske()
399 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_sske()
408 kvm_s390_set_psw_cc(vcpu, 3); in handle_sske()
410 kvm_s390_set_psw_cc(vcpu, rc); in handle_sske()
411 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL; in handle_sske()
412 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8; in handle_sske()
416 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) in handle_sske()
417 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK; in handle_sske()
419 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL; in handle_sske()
420 end = kvm_s390_logical_to_effective(vcpu, end); in handle_sske()
421 vcpu->run->s.regs.gprs[reg2] |= end; in handle_sske()
426 static int handle_ipte_interlock(struct kvm_vcpu *vcpu) in handle_ipte_interlock() argument
428 vcpu->stat.instruction_ipte_interlock++; in handle_ipte_interlock()
429 if (psw_bits(vcpu->arch.sie_block->gpsw).pstate) in handle_ipte_interlock()
430 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_ipte_interlock()
431 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); in handle_ipte_interlock()
432 kvm_s390_retry_instr(vcpu); in handle_ipte_interlock()
433 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); in handle_ipte_interlock()
437 static int handle_test_block(struct kvm_vcpu *vcpu) in handle_test_block() argument
442 vcpu->stat.instruction_tb++; in handle_test_block()
444 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_test_block()
445 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_test_block()
447 kvm_s390_get_regs_rre(vcpu, NULL, ®2); in handle_test_block()
448 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; in handle_test_block()
449 addr = kvm_s390_logical_to_effective(vcpu, addr); in handle_test_block()
450 if (kvm_s390_check_low_addr_prot_real(vcpu, addr)) in handle_test_block()
451 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in handle_test_block()
452 addr = kvm_s390_real_to_abs(vcpu, addr); in handle_test_block()
454 if (kvm_is_error_gpa(vcpu->kvm, addr)) in handle_test_block()
455 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_test_block()
460 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) in handle_test_block()
462 kvm_s390_set_psw_cc(vcpu, 0); in handle_test_block()
463 vcpu->run->s.regs.gprs[0] = 0; in handle_test_block()
467 static int handle_tpi(struct kvm_vcpu *vcpu) in handle_tpi() argument
476 vcpu->stat.instruction_tpi++; in handle_tpi()
478 addr = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_tpi()
480 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_tpi()
482 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); in handle_tpi()
484 kvm_s390_set_psw_cc(vcpu, 0); in handle_tpi()
497 rc = write_guest(vcpu, addr, ar, &tpi_data, len); in handle_tpi()
499 rc = kvm_s390_inject_prog_cond(vcpu, rc); in handle_tpi()
508 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { in handle_tpi()
517 kvm_s390_set_psw_cc(vcpu, 1); in handle_tpi()
525 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { in handle_tpi()
533 static int handle_tsch(struct kvm_vcpu *vcpu) in handle_tsch() argument
538 vcpu->stat.instruction_tsch++; in handle_tsch()
541 if (vcpu->run->s.regs.gprs[1]) in handle_tsch()
542 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, in handle_tsch()
543 vcpu->run->s.regs.gprs[1]); in handle_tsch()
553 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; in handle_tsch()
554 vcpu->run->s390_tsch.dequeued = !!inti; in handle_tsch()
556 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; in handle_tsch()
557 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; in handle_tsch()
558 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; in handle_tsch()
559 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; in handle_tsch()
561 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; in handle_tsch()
566 static int handle_io_inst(struct kvm_vcpu *vcpu) in handle_io_inst() argument
568 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); in handle_io_inst()
570 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_io_inst()
571 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_io_inst()
573 if (vcpu->kvm->arch.css_support) { in handle_io_inst()
578 if (vcpu->arch.sie_block->ipa == 0xb236) in handle_io_inst()
579 return handle_tpi(vcpu); in handle_io_inst()
580 if (vcpu->arch.sie_block->ipa == 0xb235) in handle_io_inst()
581 return handle_tsch(vcpu); in handle_io_inst()
583 vcpu->stat.instruction_io_other++; in handle_io_inst()
590 kvm_s390_set_psw_cc(vcpu, 3); in handle_io_inst()
595 static int handle_stfl(struct kvm_vcpu *vcpu) in handle_stfl() argument
600 vcpu->stat.instruction_stfl++; in handle_stfl()
602 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_stfl()
603 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_stfl()
609 fac = *vcpu->kvm->arch.model.fac_list >> 32; in handle_stfl()
610 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list), in handle_stfl()
614 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac); in handle_stfl()
615 trace_kvm_s390_handle_stfl(vcpu, fac); in handle_stfl()
641 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) in kvm_s390_handle_lpsw() argument
643 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; in kvm_s390_handle_lpsw()
649 vcpu->stat.instruction_lpsw++; in kvm_s390_handle_lpsw()
652 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in kvm_s390_handle_lpsw()
654 addr = kvm_s390_get_base_disp_s(vcpu, &ar); in kvm_s390_handle_lpsw()
656 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in kvm_s390_handle_lpsw()
658 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); in kvm_s390_handle_lpsw()
660 return kvm_s390_inject_prog_cond(vcpu, rc); in kvm_s390_handle_lpsw()
662 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in kvm_s390_handle_lpsw()
667 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in kvm_s390_handle_lpsw()
671 static int handle_lpswe(struct kvm_vcpu *vcpu) in handle_lpswe() argument
678 vcpu->stat.instruction_lpswe++; in handle_lpswe()
680 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_lpswe()
681 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_lpswe()
683 addr = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_lpswe()
685 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_lpswe()
686 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); in handle_lpswe()
688 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_lpswe()
689 vcpu->arch.sie_block->gpsw = new_psw; in handle_lpswe()
690 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) in handle_lpswe()
691 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_lpswe()
695 static int handle_stidp(struct kvm_vcpu *vcpu) in handle_stidp() argument
697 u64 stidp_data = vcpu->kvm->arch.model.cpuid; in handle_stidp()
702 vcpu->stat.instruction_stidp++; in handle_stidp()
704 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_stidp()
705 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_stidp()
707 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_stidp()
710 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_stidp()
712 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); in handle_stidp()
714 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_stidp()
716 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data); in handle_stidp()
720 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) in handle_stsi_3_2_2() argument
725 cpus = atomic_read(&vcpu->kvm->online_vcpus); in handle_stsi_3_2_2()
747 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar, in insert_stsi_usr_data() argument
750 vcpu->run->exit_reason = KVM_EXIT_S390_STSI; in insert_stsi_usr_data()
751 vcpu->run->s390_stsi.addr = addr; in insert_stsi_usr_data()
752 vcpu->run->s390_stsi.ar = ar; in insert_stsi_usr_data()
753 vcpu->run->s390_stsi.fc = fc; in insert_stsi_usr_data()
754 vcpu->run->s390_stsi.sel1 = sel1; in insert_stsi_usr_data()
755 vcpu->run->s390_stsi.sel2 = sel2; in insert_stsi_usr_data()
758 static int handle_stsi(struct kvm_vcpu *vcpu) in handle_stsi() argument
760 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; in handle_stsi()
761 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; in handle_stsi()
762 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; in handle_stsi()
768 vcpu->stat.instruction_stsi++; in handle_stsi()
769 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2); in handle_stsi()
771 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_stsi()
772 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_stsi()
775 kvm_s390_set_psw_cc(vcpu, 3); in handle_stsi()
779 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 in handle_stsi()
780 || vcpu->run->s.regs.gprs[1] & 0xffff0000) in handle_stsi()
781 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_stsi()
784 vcpu->run->s.regs.gprs[0] = 3 << 28; in handle_stsi()
785 kvm_s390_set_psw_cc(vcpu, 0); in handle_stsi()
789 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_stsi()
792 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_stsi()
809 handle_stsi_3_2_2(vcpu, (void *) mem); in handle_stsi()
813 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); in handle_stsi()
815 rc = kvm_s390_inject_prog_cond(vcpu, rc); in handle_stsi()
818 if (vcpu->kvm->arch.user_stsi) { in handle_stsi()
819 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); in handle_stsi()
822 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); in handle_stsi()
824 kvm_s390_set_psw_cc(vcpu, 0); in handle_stsi()
825 vcpu->run->s.regs.gprs[0] = 0; in handle_stsi()
828 kvm_s390_set_psw_cc(vcpu, 3); in handle_stsi()
834 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) in kvm_s390_handle_b2() argument
836 switch (vcpu->arch.sie_block->ipa & 0x00ff) { in kvm_s390_handle_b2()
838 return handle_stidp(vcpu); in kvm_s390_handle_b2()
840 return handle_set_clock(vcpu); in kvm_s390_handle_b2()
842 return handle_set_prefix(vcpu); in kvm_s390_handle_b2()
844 return handle_store_prefix(vcpu); in kvm_s390_handle_b2()
846 return handle_store_cpu_address(vcpu); in kvm_s390_handle_b2()
848 return kvm_s390_handle_vsie(vcpu); in kvm_s390_handle_b2()
851 return handle_ipte_interlock(vcpu); in kvm_s390_handle_b2()
853 return handle_iske(vcpu); in kvm_s390_handle_b2()
855 return handle_rrbe(vcpu); in kvm_s390_handle_b2()
857 return handle_sske(vcpu); in kvm_s390_handle_b2()
859 return handle_test_block(vcpu); in kvm_s390_handle_b2()
876 return handle_io_inst(vcpu); in kvm_s390_handle_b2()
878 return handle_sthyi(vcpu); in kvm_s390_handle_b2()
880 return handle_stsi(vcpu); in kvm_s390_handle_b2()
882 return handle_stfl(vcpu); in kvm_s390_handle_b2()
884 return handle_lpswe(vcpu); in kvm_s390_handle_b2()
890 static int handle_epsw(struct kvm_vcpu *vcpu) in handle_epsw() argument
894 vcpu->stat.instruction_epsw++; in handle_epsw()
896 kvm_s390_get_regs_rre(vcpu, ®1, ®2); in handle_epsw()
899 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; in handle_epsw()
900 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; in handle_epsw()
902 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; in handle_epsw()
903 vcpu->run->s.regs.gprs[reg2] |= in handle_epsw()
904 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; in handle_epsw()
919 static int handle_pfmf(struct kvm_vcpu *vcpu) in handle_pfmf() argument
926 vcpu->stat.instruction_pfmf++; in handle_pfmf()
928 kvm_s390_get_regs_rre(vcpu, ®1, ®2); in handle_pfmf()
930 if (!test_kvm_facility(vcpu->kvm, 8)) in handle_pfmf()
931 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); in handle_pfmf()
933 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_pfmf()
934 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_pfmf()
936 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) in handle_pfmf()
937 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_pfmf()
940 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && in handle_pfmf()
941 !test_kvm_facility(vcpu->kvm, 14)) in handle_pfmf()
942 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_pfmf()
945 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK && in handle_pfmf()
946 test_kvm_facility(vcpu->kvm, 10)) { in handle_pfmf()
947 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR; in handle_pfmf()
948 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC; in handle_pfmf()
951 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ; in handle_pfmf()
952 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY; in handle_pfmf()
953 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; in handle_pfmf()
954 start = kvm_s390_logical_to_effective(vcpu, start); in handle_pfmf()
956 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { in handle_pfmf()
957 if (kvm_s390_check_low_addr_prot_real(vcpu, start)) in handle_pfmf()
958 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in handle_pfmf()
961 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { in handle_pfmf()
964 start = kvm_s390_real_to_abs(vcpu, start); in handle_pfmf()
973 if (!test_kvm_facility(vcpu->kvm, 78) || in handle_pfmf()
974 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT) in handle_pfmf()
975 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_pfmf()
979 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_pfmf()
987 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start)); in handle_pfmf()
989 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_pfmf()
991 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { in handle_pfmf()
992 if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE)) in handle_pfmf()
993 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_pfmf()
996 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { in handle_pfmf()
997 int rc = kvm_s390_skey_check_enable(vcpu); in handle_pfmf()
1011 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_pfmf()
1019 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { in handle_pfmf()
1020 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) { in handle_pfmf()
1021 vcpu->run->s.regs.gprs[reg2] = end; in handle_pfmf()
1023 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL; in handle_pfmf()
1024 end = kvm_s390_logical_to_effective(vcpu, end); in handle_pfmf()
1025 vcpu->run->s.regs.gprs[reg2] |= end; in handle_pfmf()
1034 static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc) in __do_essa() argument
1045 kvm_s390_get_regs_rre(vcpu, &r1, &r2); in __do_essa()
1046 gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT; in __do_essa()
1047 hva = gfn_to_hva(vcpu->kvm, gfn); in __do_essa()
1048 entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; in __do_essa()
1051 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in __do_essa()
1053 nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev); in __do_essa()
1056 vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */ in __do_essa()
1073 vcpu->run->s.regs.gprs[r1] = res; in __do_essa()
1081 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK); in __do_essa()
1086 struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn); in __do_essa()
1090 atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages); in __do_essa()
1096 static int handle_essa(struct kvm_vcpu *vcpu) in handle_essa() argument
1099 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; in handle_essa()
1104 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries); in handle_essa()
1105 gmap = vcpu->arch.gmap; in handle_essa()
1106 vcpu->stat.instruction_essa++; in handle_essa()
1107 if (!vcpu->kvm->arch.use_cmma) in handle_essa()
1108 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); in handle_essa()
1110 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_essa()
1111 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_essa()
1113 orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; in handle_essa()
1115 if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT in handle_essa()
1117 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_essa()
1119 if (!vcpu->kvm->arch.migration_mode) { in handle_essa()
1129 if (vcpu->kvm->mm->context.uses_cmm == 0) { in handle_essa()
1130 down_write(&vcpu->kvm->mm->mmap_sem); in handle_essa()
1131 vcpu->kvm->mm->context.uses_cmm = 1; in handle_essa()
1132 up_write(&vcpu->kvm->mm->mmap_sem); in handle_essa()
1143 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; in handle_essa()
1145 kvm_s390_retry_instr(vcpu); in handle_essa()
1149 down_read(&vcpu->kvm->mm->mmap_sem); in handle_essa()
1150 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in handle_essa()
1151 i = __do_essa(vcpu, orc); in handle_essa()
1152 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in handle_essa()
1153 up_read(&vcpu->kvm->mm->mmap_sem); in handle_essa()
1159 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ in handle_essa()
1160 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); in handle_essa()
1168 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) in kvm_s390_handle_b9() argument
1170 switch (vcpu->arch.sie_block->ipa & 0x00ff) { in kvm_s390_handle_b9()
1174 return handle_ipte_interlock(vcpu); in kvm_s390_handle_b9()
1176 return handle_epsw(vcpu); in kvm_s390_handle_b9()
1178 return handle_essa(vcpu); in kvm_s390_handle_b9()
1180 return handle_pfmf(vcpu); in kvm_s390_handle_b9()
1186 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) in kvm_s390_handle_lctl() argument
1188 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in kvm_s390_handle_lctl()
1189 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; in kvm_s390_handle_lctl()
1195 vcpu->stat.instruction_lctl++; in kvm_s390_handle_lctl()
1197 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in kvm_s390_handle_lctl()
1198 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in kvm_s390_handle_lctl()
1200 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); in kvm_s390_handle_lctl()
1203 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in kvm_s390_handle_lctl()
1205 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); in kvm_s390_handle_lctl()
1206 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); in kvm_s390_handle_lctl()
1209 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); in kvm_s390_handle_lctl()
1211 return kvm_s390_inject_prog_cond(vcpu, rc); in kvm_s390_handle_lctl()
1215 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; in kvm_s390_handle_lctl()
1216 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; in kvm_s390_handle_lctl()
1221 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvm_s390_handle_lctl()
1225 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) in kvm_s390_handle_stctl() argument
1227 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in kvm_s390_handle_stctl()
1228 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; in kvm_s390_handle_stctl()
1234 vcpu->stat.instruction_stctl++; in kvm_s390_handle_stctl()
1236 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in kvm_s390_handle_stctl()
1237 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in kvm_s390_handle_stctl()
1239 ga = kvm_s390_get_base_disp_rs(vcpu, &ar); in kvm_s390_handle_stctl()
1242 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in kvm_s390_handle_stctl()
1244 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); in kvm_s390_handle_stctl()
1245 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); in kvm_s390_handle_stctl()
1250 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; in kvm_s390_handle_stctl()
1255 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); in kvm_s390_handle_stctl()
1256 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; in kvm_s390_handle_stctl()
1259 static int handle_lctlg(struct kvm_vcpu *vcpu) in handle_lctlg() argument
1261 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in handle_lctlg()
1262 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; in handle_lctlg()
1268 vcpu->stat.instruction_lctlg++; in handle_lctlg()
1270 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_lctlg()
1271 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_lctlg()
1273 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); in handle_lctlg()
1276 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_lctlg()
1278 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); in handle_lctlg()
1279 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); in handle_lctlg()
1282 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); in handle_lctlg()
1284 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_lctlg()
1288 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; in handle_lctlg()
1293 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in handle_lctlg()
1297 static int handle_stctg(struct kvm_vcpu *vcpu) in handle_stctg() argument
1299 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in handle_stctg()
1300 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; in handle_stctg()
1306 vcpu->stat.instruction_stctg++; in handle_stctg()
1308 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_stctg()
1309 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_stctg()
1311 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); in handle_stctg()
1314 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_stctg()
1316 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga); in handle_stctg()
1317 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); in handle_stctg()
1322 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; in handle_stctg()
1327 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); in handle_stctg()
1328 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; in handle_stctg()
1331 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) in kvm_s390_handle_eb() argument
1333 switch (vcpu->arch.sie_block->ipb & 0x000000ff) { in kvm_s390_handle_eb()
1335 return handle_stctg(vcpu); in kvm_s390_handle_eb()
1337 return handle_lctlg(vcpu); in kvm_s390_handle_eb()
1341 return handle_ri(vcpu); in kvm_s390_handle_eb()
1347 static int handle_tprot(struct kvm_vcpu *vcpu) in handle_tprot() argument
1355 vcpu->stat.instruction_tprot++; in handle_tprot()
1357 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_tprot()
1358 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_tprot()
1360 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL); in handle_tprot()
1367 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) in handle_tprot()
1368 ipte_lock(vcpu); in handle_tprot()
1369 ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE); in handle_tprot()
1373 ret = guest_translate_address(vcpu, address1, ar, &gpa, in handle_tprot()
1378 ret = kvm_s390_inject_program_int(vcpu, ret); in handle_tprot()
1381 kvm_s390_set_psw_cc(vcpu, 3); in handle_tprot()
1387 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); in handle_tprot()
1389 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_tprot()
1393 kvm_s390_set_psw_cc(vcpu, cc); in handle_tprot()
1397 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) in handle_tprot()
1398 ipte_unlock(vcpu); in handle_tprot()
1402 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) in kvm_s390_handle_e5() argument
1404 switch (vcpu->arch.sie_block->ipa & 0x00ff) { in kvm_s390_handle_e5()
1406 return handle_tprot(vcpu); in kvm_s390_handle_e5()
1412 static int handle_sckpf(struct kvm_vcpu *vcpu) in handle_sckpf() argument
1416 vcpu->stat.instruction_sckpf++; in handle_sckpf()
1418 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_sckpf()
1419 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_sckpf()
1421 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) in handle_sckpf()
1422 return kvm_s390_inject_program_int(vcpu, in handle_sckpf()
1425 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; in handle_sckpf()
1426 vcpu->arch.sie_block->todpr = value; in handle_sckpf()
1431 static int handle_ptff(struct kvm_vcpu *vcpu) in handle_ptff() argument
1433 vcpu->stat.instruction_ptff++; in handle_ptff()
1436 kvm_s390_set_psw_cc(vcpu, 3); in handle_ptff()
1440 int kvm_s390_handle_01(struct kvm_vcpu *vcpu) in kvm_s390_handle_01() argument
1442 switch (vcpu->arch.sie_block->ipa & 0x00ff) { in kvm_s390_handle_01()
1444 return handle_ptff(vcpu); in kvm_s390_handle_01()
1446 return handle_sckpf(vcpu); in kvm_s390_handle_01()