• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5  */
6 
7 #include <linux/errno.h>
8 #include <linux/err.h>
9 #include <linux/kvm_host.h>
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/fs.h>
13 #include <kvm/arm_psci.h>
14 #include <asm/cputype.h>
15 #include <linux/uaccess.h>
16 #include <asm/kvm.h>
17 #include <asm/kvm_emulate.h>
18 #include <asm/kvm_coproc.h>
19 
20 #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
21 #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
22 
23 struct kvm_stats_debugfs_item debugfs_entries[] = {
24 	VCPU_STAT(hvc_exit_stat),
25 	VCPU_STAT(wfe_exit_stat),
26 	VCPU_STAT(wfi_exit_stat),
27 	VCPU_STAT(mmio_exit_user),
28 	VCPU_STAT(mmio_exit_kernel),
29 	VCPU_STAT(exits),
30 	{ NULL }
31 };
32 
kvm_arch_vcpu_setup(struct kvm_vcpu * vcpu)33 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
34 {
35 	return 0;
36 }
37 
core_reg_offset_from_id(u64 id)38 static u64 core_reg_offset_from_id(u64 id)
39 {
40 	return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
41 }
42 
get_core_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)43 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
44 {
45 	u32 __user *uaddr = (u32 __user *)(long)reg->addr;
46 	struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
47 	u64 off;
48 
49 	if (KVM_REG_SIZE(reg->id) != 4)
50 		return -ENOENT;
51 
52 	/* Our ID is an index into the kvm_regs struct. */
53 	off = core_reg_offset_from_id(reg->id);
54 	if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
55 		return -ENOENT;
56 
57 	return put_user(((u32 *)regs)[off], uaddr);
58 }
59 
set_core_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)60 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
61 {
62 	u32 __user *uaddr = (u32 __user *)(long)reg->addr;
63 	struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
64 	u64 off, val;
65 
66 	if (KVM_REG_SIZE(reg->id) != 4)
67 		return -ENOENT;
68 
69 	/* Our ID is an index into the kvm_regs struct. */
70 	off = core_reg_offset_from_id(reg->id);
71 	if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
72 		return -ENOENT;
73 
74 	if (get_user(val, uaddr) != 0)
75 		return -EFAULT;
76 
77 	if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) {
78 		unsigned long mode = val & MODE_MASK;
79 		switch (mode) {
80 		case USR_MODE:
81 		case FIQ_MODE:
82 		case IRQ_MODE:
83 		case SVC_MODE:
84 		case ABT_MODE:
85 		case UND_MODE:
86 			break;
87 		default:
88 			return -EINVAL;
89 		}
90 	}
91 
92 	((u32 *)regs)[off] = val;
93 	return 0;
94 }
95 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)96 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
97 {
98 	return -EINVAL;
99 }
100 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)101 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
102 {
103 	return -EINVAL;
104 }
105 
106 #define NUM_TIMER_REGS 3
107 
is_timer_reg(u64 index)108 static bool is_timer_reg(u64 index)
109 {
110 	switch (index) {
111 	case KVM_REG_ARM_TIMER_CTL:
112 	case KVM_REG_ARM_TIMER_CNT:
113 	case KVM_REG_ARM_TIMER_CVAL:
114 		return true;
115 	}
116 	return false;
117 }
118 
copy_timer_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)119 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
120 {
121 	if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
122 		return -EFAULT;
123 	uindices++;
124 	if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
125 		return -EFAULT;
126 	uindices++;
127 	if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
128 		return -EFAULT;
129 
130 	return 0;
131 }
132 
set_timer_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)133 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
134 {
135 	void __user *uaddr = (void __user *)(long)reg->addr;
136 	u64 val;
137 	int ret;
138 
139 	ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
140 	if (ret != 0)
141 		return -EFAULT;
142 
143 	return kvm_arm_timer_set_reg(vcpu, reg->id, val);
144 }
145 
get_timer_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)146 static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
147 {
148 	void __user *uaddr = (void __user *)(long)reg->addr;
149 	u64 val;
150 
151 	val = kvm_arm_timer_get_reg(vcpu, reg->id);
152 	return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
153 }
154 
num_core_regs(void)155 static unsigned long num_core_regs(void)
156 {
157 	return sizeof(struct kvm_regs) / sizeof(u32);
158 }
159 
160 /**
161  * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
162  *
163  * This is for all registers.
164  */
kvm_arm_num_regs(struct kvm_vcpu * vcpu)165 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
166 {
167 	return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
168 		+ kvm_arm_get_fw_num_regs(vcpu)
169 		+ NUM_TIMER_REGS;
170 }
171 
172 /**
173  * kvm_arm_copy_reg_indices - get indices of all registers.
174  *
175  * We do core registers right here, then we append coproc regs.
176  */
kvm_arm_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)177 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
178 {
179 	unsigned int i;
180 	const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE;
181 	int ret;
182 
183 	for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) {
184 		if (put_user(core_reg | i, uindices))
185 			return -EFAULT;
186 		uindices++;
187 	}
188 
189 	ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
190 	if (ret)
191 		return ret;
192 	uindices += kvm_arm_get_fw_num_regs(vcpu);
193 
194 	ret = copy_timer_indices(vcpu, uindices);
195 	if (ret)
196 		return ret;
197 	uindices += NUM_TIMER_REGS;
198 
199 	return kvm_arm_copy_coproc_indices(vcpu, uindices);
200 }
201 
kvm_arm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)202 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
203 {
204 	/* We currently use nothing arch-specific in upper 32 bits */
205 	if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
206 		return -EINVAL;
207 
208 	/* Register group 16 means we want a core register. */
209 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
210 		return get_core_reg(vcpu, reg);
211 
212 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
213 		return kvm_arm_get_fw_reg(vcpu, reg);
214 
215 	if (is_timer_reg(reg->id))
216 		return get_timer_reg(vcpu, reg);
217 
218 	return kvm_arm_coproc_get_reg(vcpu, reg);
219 }
220 
kvm_arm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)221 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
222 {
223 	/* We currently use nothing arch-specific in upper 32 bits */
224 	if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
225 		return -EINVAL;
226 
227 	/* Register group 16 means we set a core register. */
228 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
229 		return set_core_reg(vcpu, reg);
230 
231 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
232 		return kvm_arm_set_fw_reg(vcpu, reg);
233 
234 	if (is_timer_reg(reg->id))
235 		return set_timer_reg(vcpu, reg);
236 
237 	return kvm_arm_coproc_set_reg(vcpu, reg);
238 }
239 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)240 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
241 				  struct kvm_sregs *sregs)
242 {
243 	return -EINVAL;
244 }
245 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)246 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
247 				  struct kvm_sregs *sregs)
248 {
249 	return -EINVAL;
250 }
251 
252 
__kvm_arm_vcpu_get_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)253 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
254 			      struct kvm_vcpu_events *events)
255 {
256 	events->exception.serror_pending = !!(*vcpu_hcr(vcpu) & HCR_VA);
257 
258 	return 0;
259 }
260 
__kvm_arm_vcpu_set_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)261 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
262 			      struct kvm_vcpu_events *events)
263 {
264 	bool serror_pending = events->exception.serror_pending;
265 	bool has_esr = events->exception.serror_has_esr;
266 
267 	if (serror_pending && has_esr)
268 		return -EINVAL;
269 	else if (serror_pending)
270 		kvm_inject_vabt(vcpu);
271 
272 	return 0;
273 }
274 
kvm_target_cpu(void)275 int __attribute_const__ kvm_target_cpu(void)
276 {
277 	switch (read_cpuid_part()) {
278 	case ARM_CPU_PART_CORTEX_A7:
279 		return KVM_ARM_TARGET_CORTEX_A7;
280 	case ARM_CPU_PART_CORTEX_A15:
281 		return KVM_ARM_TARGET_CORTEX_A15;
282 	default:
283 		return -EINVAL;
284 	}
285 }
286 
kvm_vcpu_preferred_target(struct kvm_vcpu_init * init)287 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
288 {
289 	int target = kvm_target_cpu();
290 
291 	if (target < 0)
292 		return -ENODEV;
293 
294 	memset(init, 0, sizeof(*init));
295 
296 	/*
297 	 * For now, we don't return any features.
298 	 * In future, we might use features to return target
299 	 * specific features available for the preferred
300 	 * target type.
301 	 */
302 	init->target = (__u32)target;
303 
304 	return 0;
305 }
306 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)307 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
308 {
309 	return -EINVAL;
310 }
311 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)312 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
313 {
314 	return -EINVAL;
315 }
316 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)317 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
318 				  struct kvm_translation *tr)
319 {
320 	return -EINVAL;
321 }
322 
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)323 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
324 					struct kvm_guest_debug *dbg)
325 {
326 	return -EINVAL;
327 }
328 
kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)329 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
330 			       struct kvm_device_attr *attr)
331 {
332 	int ret;
333 
334 	switch (attr->group) {
335 	case KVM_ARM_VCPU_TIMER_CTRL:
336 		ret = kvm_arm_timer_set_attr(vcpu, attr);
337 		break;
338 	default:
339 		ret = -ENXIO;
340 		break;
341 	}
342 
343 	return ret;
344 }
345 
kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)346 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
347 			       struct kvm_device_attr *attr)
348 {
349 	int ret;
350 
351 	switch (attr->group) {
352 	case KVM_ARM_VCPU_TIMER_CTRL:
353 		ret = kvm_arm_timer_get_attr(vcpu, attr);
354 		break;
355 	default:
356 		ret = -ENXIO;
357 		break;
358 	}
359 
360 	return ret;
361 }
362 
kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)363 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
364 			       struct kvm_device_attr *attr)
365 {
366 	int ret;
367 
368 	switch (attr->group) {
369 	case KVM_ARM_VCPU_TIMER_CTRL:
370 		ret = kvm_arm_timer_has_attr(vcpu, attr);
371 		break;
372 	default:
373 		ret = -ENXIO;
374 		break;
375 	}
376 
377 	return ret;
378 }
379