• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012,2013 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * Derived from arch/arm/kvm/reset.c
6  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include <linux/errno.h>
23 #include <linux/kvm_host.h>
24 #include <linux/kvm.h>
25 #include <linux/hw_breakpoint.h>
26 
27 #include <kvm/arm_arch_timer.h>
28 
29 #include <asm/cputype.h>
30 #include <asm/ptrace.h>
31 #include <asm/kvm_arm.h>
32 #include <asm/kvm_asm.h>
33 #include <asm/kvm_coproc.h>
34 #include <asm/kvm_emulate.h>
35 #include <asm/kvm_mmu.h>
36 
37 /*
38  * ARMv8 Reset Values
39  */
40 static const struct kvm_regs default_regs_reset = {
41 	.regs.pstate = (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT |
42 			PSR_F_BIT | PSR_D_BIT),
43 };
44 
45 static const struct kvm_regs default_regs_reset32 = {
46 	.regs.pstate = (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT |
47 			PSR_AA32_I_BIT | PSR_AA32_F_BIT),
48 };
49 
cpu_has_32bit_el1(void)50 static bool cpu_has_32bit_el1(void)
51 {
52 	u64 pfr0;
53 
54 	pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
55 	return !!(pfr0 & 0x20);
56 }
57 
58 /**
59  * kvm_arch_dev_ioctl_check_extension
60  *
61  * We currently assume that the number of HW registers is uniform
62  * across all CPUs (see cpuinfo_sanity_check).
63  */
kvm_arch_dev_ioctl_check_extension(struct kvm * kvm,long ext)64 int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
65 {
66 	int r;
67 
68 	switch (ext) {
69 	case KVM_CAP_ARM_EL1_32BIT:
70 		r = cpu_has_32bit_el1();
71 		break;
72 	case KVM_CAP_GUEST_DEBUG_HW_BPS:
73 		r = get_num_brps();
74 		break;
75 	case KVM_CAP_GUEST_DEBUG_HW_WPS:
76 		r = get_num_wrps();
77 		break;
78 	case KVM_CAP_ARM_PMU_V3:
79 		r = kvm_arm_support_pmu_v3();
80 		break;
81 	case KVM_CAP_ARM_INJECT_SERROR_ESR:
82 		r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
83 		break;
84 	case KVM_CAP_SET_GUEST_DEBUG:
85 	case KVM_CAP_VCPU_ATTRIBUTES:
86 	case KVM_CAP_VCPU_EVENTS:
87 		r = 1;
88 		break;
89 	default:
90 		r = 0;
91 	}
92 
93 	return r;
94 }
95 
96 /**
97  * kvm_reset_vcpu - sets core registers and sys_regs to reset value
98  * @vcpu: The VCPU pointer
99  *
100  * This function finds the right table above and sets the registers on
101  * the virtual CPU struct to their architecturally defined reset
102  * values.
103  *
104  * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
105  * ioctl or as part of handling a request issued by another VCPU in the PSCI
106  * handling code.  In the first case, the VCPU will not be loaded, and in the
107  * second case the VCPU will be loaded.  Because this function operates purely
108  * on the memory-backed valus of system registers, we want to do a full put if
109  * we were loaded (handling a request) and load the values back at the end of
110  * the function.  Otherwise we leave the state alone.  In both cases, we
111  * disable preemption around the vcpu reset as we would otherwise race with
112  * preempt notifiers which also call put/load.
113  */
kvm_reset_vcpu(struct kvm_vcpu * vcpu)114 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
115 {
116 	const struct kvm_regs *cpu_reset;
117 	int ret = -EINVAL;
118 	bool loaded;
119 
120 	/* Reset PMU outside of the non-preemptible section */
121 	kvm_pmu_vcpu_reset(vcpu);
122 
123 	preempt_disable();
124 	loaded = (vcpu->cpu != -1);
125 	if (loaded)
126 		kvm_arch_vcpu_put(vcpu);
127 
128 	switch (vcpu->arch.target) {
129 	default:
130 		if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
131 			if (!cpu_has_32bit_el1())
132 				goto out;
133 			cpu_reset = &default_regs_reset32;
134 		} else {
135 			cpu_reset = &default_regs_reset;
136 		}
137 
138 		break;
139 	}
140 
141 	/* Reset core registers */
142 	memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset));
143 
144 	/* Reset system registers */
145 	kvm_reset_sys_regs(vcpu);
146 
147 	/*
148 	 * Additional reset state handling that PSCI may have imposed on us.
149 	 * Must be done after all the sys_reg reset.
150 	 */
151 	if (vcpu->arch.reset_state.reset) {
152 		unsigned long target_pc = vcpu->arch.reset_state.pc;
153 
154 		/* Gracefully handle Thumb2 entry point */
155 		if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
156 			target_pc &= ~1UL;
157 			vcpu_set_thumb(vcpu);
158 		}
159 
160 		/* Propagate caller endianness */
161 		if (vcpu->arch.reset_state.be)
162 			kvm_vcpu_set_be(vcpu);
163 
164 		*vcpu_pc(vcpu) = target_pc;
165 		vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
166 
167 		vcpu->arch.reset_state.reset = false;
168 	}
169 
170 	/* Default workaround setup is enabled (if supported) */
171 	if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
172 		vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
173 
174 	/* Reset timer */
175 	ret = kvm_timer_vcpu_reset(vcpu);
176 out:
177 	if (loaded)
178 		kvm_arch_vcpu_load(vcpu, smp_processor_id());
179 	preempt_enable();
180 	return ret;
181 }
182