• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS: MIPS specific KVM APIs
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10  */
11 
12 #include <linux/bitops.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/kdebug.h>
16 #include <linux/module.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sched/signal.h>
20 #include <linux/fs.h>
21 #include <linux/bootmem.h>
22 
23 #include <asm/fpu.h>
24 #include <asm/page.h>
25 #include <asm/cacheflush.h>
26 #include <asm/mmu_context.h>
27 #include <asm/pgalloc.h>
28 #include <asm/pgtable.h>
29 
30 #include <linux/kvm_host.h>
31 
32 #include "interrupt.h"
33 #include "commpage.h"
34 
35 #define CREATE_TRACE_POINTS
36 #include "trace.h"
37 
38 #ifndef VECTORSPACING
39 #define VECTORSPACING 0x100	/* for EI/VI mode */
40 #endif
41 
42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
43 struct kvm_stats_debugfs_item debugfs_entries[] = {
44 	{ "wait",	  VCPU_STAT(wait_exits),	 KVM_STAT_VCPU },
45 	{ "cache",	  VCPU_STAT(cache_exits),	 KVM_STAT_VCPU },
46 	{ "signal",	  VCPU_STAT(signal_exits),	 KVM_STAT_VCPU },
47 	{ "interrupt",	  VCPU_STAT(int_exits),		 KVM_STAT_VCPU },
48 	{ "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
49 	{ "tlbmod",	  VCPU_STAT(tlbmod_exits),	 KVM_STAT_VCPU },
50 	{ "tlbmiss_ld",	  VCPU_STAT(tlbmiss_ld_exits),	 KVM_STAT_VCPU },
51 	{ "tlbmiss_st",	  VCPU_STAT(tlbmiss_st_exits),	 KVM_STAT_VCPU },
52 	{ "addrerr_st",	  VCPU_STAT(addrerr_st_exits),	 KVM_STAT_VCPU },
53 	{ "addrerr_ld",	  VCPU_STAT(addrerr_ld_exits),	 KVM_STAT_VCPU },
54 	{ "syscall",	  VCPU_STAT(syscall_exits),	 KVM_STAT_VCPU },
55 	{ "resvd_inst",	  VCPU_STAT(resvd_inst_exits),	 KVM_STAT_VCPU },
56 	{ "break_inst",	  VCPU_STAT(break_inst_exits),	 KVM_STAT_VCPU },
57 	{ "trap_inst",	  VCPU_STAT(trap_inst_exits),	 KVM_STAT_VCPU },
58 	{ "msa_fpe",	  VCPU_STAT(msa_fpe_exits),	 KVM_STAT_VCPU },
59 	{ "fpe",	  VCPU_STAT(fpe_exits),		 KVM_STAT_VCPU },
60 	{ "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
61 	{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
62 #ifdef CONFIG_KVM_MIPS_VZ
63 	{ "vz_gpsi",	  VCPU_STAT(vz_gpsi_exits),	 KVM_STAT_VCPU },
64 	{ "vz_gsfc",	  VCPU_STAT(vz_gsfc_exits),	 KVM_STAT_VCPU },
65 	{ "vz_hc",	  VCPU_STAT(vz_hc_exits),	 KVM_STAT_VCPU },
66 	{ "vz_grr",	  VCPU_STAT(vz_grr_exits),	 KVM_STAT_VCPU },
67 	{ "vz_gva",	  VCPU_STAT(vz_gva_exits),	 KVM_STAT_VCPU },
68 	{ "vz_ghfc",	  VCPU_STAT(vz_ghfc_exits),	 KVM_STAT_VCPU },
69 	{ "vz_gpa",	  VCPU_STAT(vz_gpa_exits),	 KVM_STAT_VCPU },
70 	{ "vz_resvd",	  VCPU_STAT(vz_resvd_exits),	 KVM_STAT_VCPU },
71 #endif
72 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
73 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
74 	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
75 	{ "halt_wakeup",  VCPU_STAT(halt_wakeup),	 KVM_STAT_VCPU },
76 	{NULL}
77 };
78 
79 bool kvm_trace_guest_mode_change;
80 
kvm_guest_mode_change_trace_reg(void)81 int kvm_guest_mode_change_trace_reg(void)
82 {
83 	kvm_trace_guest_mode_change = 1;
84 	return 0;
85 }
86 
kvm_guest_mode_change_trace_unreg(void)87 void kvm_guest_mode_change_trace_unreg(void)
88 {
89 	kvm_trace_guest_mode_change = 0;
90 }
91 
92 /*
93  * XXXKYMA: We are simulatoring a processor that has the WII bit set in
94  * Config7, so we are "runnable" if interrupts are pending
95  */
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)96 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
97 {
98 	return !!(vcpu->arch.pending_exceptions);
99 }
100 
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)101 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
102 {
103 	return false;
104 }
105 
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)106 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
107 {
108 	return 1;
109 }
110 
kvm_arch_hardware_enable(void)111 int kvm_arch_hardware_enable(void)
112 {
113 	return kvm_mips_callbacks->hardware_enable();
114 }
115 
kvm_arch_hardware_disable(void)116 void kvm_arch_hardware_disable(void)
117 {
118 	kvm_mips_callbacks->hardware_disable();
119 }
120 
kvm_arch_hardware_setup(void)121 int kvm_arch_hardware_setup(void)
122 {
123 	return 0;
124 }
125 
kvm_arch_check_processor_compat(void * rtn)126 void kvm_arch_check_processor_compat(void *rtn)
127 {
128 	*(int *)rtn = 0;
129 }
130 
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)131 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
132 {
133 	switch (type) {
134 #ifdef CONFIG_KVM_MIPS_VZ
135 	case KVM_VM_MIPS_VZ:
136 #else
137 	case KVM_VM_MIPS_TE:
138 #endif
139 		break;
140 	default:
141 		/* Unsupported KVM type */
142 		return -EINVAL;
143 	};
144 
145 	/* Allocate page table to map GPA -> RPA */
146 	kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
147 	if (!kvm->arch.gpa_mm.pgd)
148 		return -ENOMEM;
149 
150 	return 0;
151 }
152 
kvm_arch_has_vcpu_debugfs(void)153 bool kvm_arch_has_vcpu_debugfs(void)
154 {
155 	return false;
156 }
157 
kvm_arch_create_vcpu_debugfs(struct kvm_vcpu * vcpu)158 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
159 {
160 	return 0;
161 }
162 
kvm_mips_free_vcpus(struct kvm * kvm)163 void kvm_mips_free_vcpus(struct kvm *kvm)
164 {
165 	unsigned int i;
166 	struct kvm_vcpu *vcpu;
167 
168 	kvm_for_each_vcpu(i, vcpu, kvm) {
169 		kvm_arch_vcpu_free(vcpu);
170 	}
171 
172 	mutex_lock(&kvm->lock);
173 
174 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
175 		kvm->vcpus[i] = NULL;
176 
177 	atomic_set(&kvm->online_vcpus, 0);
178 
179 	mutex_unlock(&kvm->lock);
180 }
181 
kvm_mips_free_gpa_pt(struct kvm * kvm)182 static void kvm_mips_free_gpa_pt(struct kvm *kvm)
183 {
184 	/* It should always be safe to remove after flushing the whole range */
185 	WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0));
186 	pgd_free(NULL, kvm->arch.gpa_mm.pgd);
187 }
188 
kvm_arch_destroy_vm(struct kvm * kvm)189 void kvm_arch_destroy_vm(struct kvm *kvm)
190 {
191 	kvm_mips_free_vcpus(kvm);
192 	kvm_mips_free_gpa_pt(kvm);
193 }
194 
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)195 long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
196 			unsigned long arg)
197 {
198 	return -ENOIOCTLCMD;
199 }
200 
kvm_arch_create_memslot(struct kvm * kvm,struct kvm_memory_slot * slot,unsigned long npages)201 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
202 			    unsigned long npages)
203 {
204 	return 0;
205 }
206 
kvm_arch_flush_shadow_all(struct kvm * kvm)207 void kvm_arch_flush_shadow_all(struct kvm *kvm)
208 {
209 	/* Flush whole GPA */
210 	kvm_mips_flush_gpa_pt(kvm, 0, ~0);
211 
212 	/* Let implementation do the rest */
213 	kvm_mips_callbacks->flush_shadow_all(kvm);
214 }
215 
kvm_arch_flush_shadow_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)216 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
217 				   struct kvm_memory_slot *slot)
218 {
219 	/*
220 	 * The slot has been made invalid (ready for moving or deletion), so we
221 	 * need to ensure that it can no longer be accessed by any guest VCPUs.
222 	 */
223 
224 	spin_lock(&kvm->mmu_lock);
225 	/* Flush slot from GPA */
226 	kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
227 			      slot->base_gfn + slot->npages - 1);
228 	/* Let implementation do the rest */
229 	kvm_mips_callbacks->flush_shadow_memslot(kvm, slot);
230 	spin_unlock(&kvm->mmu_lock);
231 }
232 
kvm_arch_prepare_memory_region(struct kvm * kvm,struct kvm_memory_slot * memslot,const struct kvm_userspace_memory_region * mem,enum kvm_mr_change change)233 int kvm_arch_prepare_memory_region(struct kvm *kvm,
234 				   struct kvm_memory_slot *memslot,
235 				   const struct kvm_userspace_memory_region *mem,
236 				   enum kvm_mr_change change)
237 {
238 	return 0;
239 }
240 
kvm_arch_commit_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region * mem,const struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)241 void kvm_arch_commit_memory_region(struct kvm *kvm,
242 				   const struct kvm_userspace_memory_region *mem,
243 				   const struct kvm_memory_slot *old,
244 				   const struct kvm_memory_slot *new,
245 				   enum kvm_mr_change change)
246 {
247 	int needs_flush;
248 
249 	kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
250 		  __func__, kvm, mem->slot, mem->guest_phys_addr,
251 		  mem->memory_size, mem->userspace_addr);
252 
253 	/*
254 	 * If dirty page logging is enabled, write protect all pages in the slot
255 	 * ready for dirty logging.
256 	 *
257 	 * There is no need to do this in any of the following cases:
258 	 * CREATE:	No dirty mappings will already exist.
259 	 * MOVE/DELETE:	The old mappings will already have been cleaned up by
260 	 *		kvm_arch_flush_shadow_memslot()
261 	 */
262 	if (change == KVM_MR_FLAGS_ONLY &&
263 	    (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
264 	     new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
265 		spin_lock(&kvm->mmu_lock);
266 		/* Write protect GPA page table entries */
267 		needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
268 					new->base_gfn + new->npages - 1);
269 		/* Let implementation do the rest */
270 		if (needs_flush)
271 			kvm_mips_callbacks->flush_shadow_memslot(kvm, new);
272 		spin_unlock(&kvm->mmu_lock);
273 	}
274 }
275 
dump_handler(const char * symbol,void * start,void * end)276 static inline void dump_handler(const char *symbol, void *start, void *end)
277 {
278 	u32 *p;
279 
280 	pr_debug("LEAF(%s)\n", symbol);
281 
282 	pr_debug("\t.set push\n");
283 	pr_debug("\t.set noreorder\n");
284 
285 	for (p = start; p < (u32 *)end; ++p)
286 		pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
287 
288 	pr_debug("\t.set\tpop\n");
289 
290 	pr_debug("\tEND(%s)\n", symbol);
291 }
292 
kvm_arch_vcpu_create(struct kvm * kvm,unsigned int id)293 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
294 {
295 	int err, size;
296 	void *gebase, *p, *handler, *refill_start, *refill_end;
297 	int i;
298 
299 	struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
300 
301 	if (!vcpu) {
302 		err = -ENOMEM;
303 		goto out;
304 	}
305 
306 	err = kvm_vcpu_init(vcpu, kvm, id);
307 
308 	if (err)
309 		goto out_free_cpu;
310 
311 	kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
312 
313 	/*
314 	 * Allocate space for host mode exception handlers that handle
315 	 * guest mode exits
316 	 */
317 	if (cpu_has_veic || cpu_has_vint)
318 		size = 0x200 + VECTORSPACING * 64;
319 	else
320 		size = 0x4000;
321 
322 	gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
323 
324 	if (!gebase) {
325 		err = -ENOMEM;
326 		goto out_uninit_cpu;
327 	}
328 	kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
329 		  ALIGN(size, PAGE_SIZE), gebase);
330 
331 	/*
332 	 * Check new ebase actually fits in CP0_EBase. The lack of a write gate
333 	 * limits us to the low 512MB of physical address space. If the memory
334 	 * we allocate is out of range, just give up now.
335 	 */
336 	if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
337 		kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
338 			gebase);
339 		err = -ENOMEM;
340 		goto out_free_gebase;
341 	}
342 
343 	/* Save new ebase */
344 	vcpu->arch.guest_ebase = gebase;
345 
346 	/* Build guest exception vectors dynamically in unmapped memory */
347 	handler = gebase + 0x2000;
348 
349 	/* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
350 	refill_start = gebase;
351 	if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT))
352 		refill_start += 0x080;
353 	refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
354 
355 	/* General Exception Entry point */
356 	kvm_mips_build_exception(gebase + 0x180, handler);
357 
358 	/* For vectored interrupts poke the exception code @ all offsets 0-7 */
359 	for (i = 0; i < 8; i++) {
360 		kvm_debug("L1 Vectored handler @ %p\n",
361 			  gebase + 0x200 + (i * VECTORSPACING));
362 		kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
363 					 handler);
364 	}
365 
366 	/* General exit handler */
367 	p = handler;
368 	p = kvm_mips_build_exit(p);
369 
370 	/* Guest entry routine */
371 	vcpu->arch.vcpu_run = p;
372 	p = kvm_mips_build_vcpu_run(p);
373 
374 	/* Dump the generated code */
375 	pr_debug("#include <asm/asm.h>\n");
376 	pr_debug("#include <asm/regdef.h>\n");
377 	pr_debug("\n");
378 	dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
379 	dump_handler("kvm_tlb_refill", refill_start, refill_end);
380 	dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
381 	dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
382 
383 	/* Invalidate the icache for these ranges */
384 	flush_icache_range((unsigned long)gebase,
385 			   (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
386 
387 	/*
388 	 * Allocate comm page for guest kernel, a TLB will be reserved for
389 	 * mapping GVA @ 0xFFFF8000 to this page
390 	 */
391 	vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
392 
393 	if (!vcpu->arch.kseg0_commpage) {
394 		err = -ENOMEM;
395 		goto out_free_gebase;
396 	}
397 
398 	kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
399 	kvm_mips_commpage_init(vcpu);
400 
401 	/* Init */
402 	vcpu->arch.last_sched_cpu = -1;
403 	vcpu->arch.last_exec_cpu = -1;
404 
405 	return vcpu;
406 
407 out_free_gebase:
408 	kfree(gebase);
409 
410 out_uninit_cpu:
411 	kvm_vcpu_uninit(vcpu);
412 
413 out_free_cpu:
414 	kfree(vcpu);
415 
416 out:
417 	return ERR_PTR(err);
418 }
419 
kvm_arch_vcpu_free(struct kvm_vcpu * vcpu)420 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
421 {
422 	hrtimer_cancel(&vcpu->arch.comparecount_timer);
423 
424 	kvm_vcpu_uninit(vcpu);
425 
426 	kvm_mips_dump_stats(vcpu);
427 
428 	kvm_mmu_free_memory_caches(vcpu);
429 	kfree(vcpu->arch.guest_ebase);
430 	kfree(vcpu->arch.kseg0_commpage);
431 	kfree(vcpu);
432 }
433 
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)434 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
435 {
436 	kvm_arch_vcpu_free(vcpu);
437 }
438 
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)439 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
440 					struct kvm_guest_debug *dbg)
441 {
442 	return -ENOIOCTLCMD;
443 }
444 
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu,struct kvm_run * run)445 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
446 {
447 	int r = -EINTR;
448 
449 	kvm_sigset_activate(vcpu);
450 
451 	if (vcpu->mmio_needed) {
452 		if (!vcpu->mmio_is_write)
453 			kvm_mips_complete_mmio_load(vcpu, run);
454 		vcpu->mmio_needed = 0;
455 	}
456 
457 	if (run->immediate_exit)
458 		goto out;
459 
460 	lose_fpu(1);
461 
462 	local_irq_disable();
463 	guest_enter_irqoff();
464 	trace_kvm_enter(vcpu);
465 
466 	/*
467 	 * Make sure the read of VCPU requests in vcpu_run() callback is not
468 	 * reordered ahead of the write to vcpu->mode, or we could miss a TLB
469 	 * flush request while the requester sees the VCPU as outside of guest
470 	 * mode and not needing an IPI.
471 	 */
472 	smp_store_mb(vcpu->mode, IN_GUEST_MODE);
473 
474 	r = kvm_mips_callbacks->vcpu_run(run, vcpu);
475 
476 	trace_kvm_out(vcpu);
477 	guest_exit_irqoff();
478 	local_irq_enable();
479 
480 out:
481 	kvm_sigset_deactivate(vcpu);
482 
483 	return r;
484 }
485 
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_mips_interrupt * irq)486 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
487 			     struct kvm_mips_interrupt *irq)
488 {
489 	int intr = (int)irq->irq;
490 	struct kvm_vcpu *dvcpu = NULL;
491 
492 	if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
493 		kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
494 			  (int)intr);
495 
496 	if (irq->cpu == -1)
497 		dvcpu = vcpu;
498 	else
499 		dvcpu = vcpu->kvm->vcpus[irq->cpu];
500 
501 	if (intr == 2 || intr == 3 || intr == 4) {
502 		kvm_mips_callbacks->queue_io_int(dvcpu, irq);
503 
504 	} else if (intr == -2 || intr == -3 || intr == -4) {
505 		kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
506 	} else {
507 		kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
508 			irq->cpu, irq->irq);
509 		return -EINVAL;
510 	}
511 
512 	dvcpu->arch.wait = 0;
513 
514 	if (swq_has_sleeper(&dvcpu->wq))
515 		swake_up(&dvcpu->wq);
516 
517 	return 0;
518 }
519 
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)520 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
521 				    struct kvm_mp_state *mp_state)
522 {
523 	return -ENOIOCTLCMD;
524 }
525 
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)526 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
527 				    struct kvm_mp_state *mp_state)
528 {
529 	return -ENOIOCTLCMD;
530 }
531 
532 static u64 kvm_mips_get_one_regs[] = {
533 	KVM_REG_MIPS_R0,
534 	KVM_REG_MIPS_R1,
535 	KVM_REG_MIPS_R2,
536 	KVM_REG_MIPS_R3,
537 	KVM_REG_MIPS_R4,
538 	KVM_REG_MIPS_R5,
539 	KVM_REG_MIPS_R6,
540 	KVM_REG_MIPS_R7,
541 	KVM_REG_MIPS_R8,
542 	KVM_REG_MIPS_R9,
543 	KVM_REG_MIPS_R10,
544 	KVM_REG_MIPS_R11,
545 	KVM_REG_MIPS_R12,
546 	KVM_REG_MIPS_R13,
547 	KVM_REG_MIPS_R14,
548 	KVM_REG_MIPS_R15,
549 	KVM_REG_MIPS_R16,
550 	KVM_REG_MIPS_R17,
551 	KVM_REG_MIPS_R18,
552 	KVM_REG_MIPS_R19,
553 	KVM_REG_MIPS_R20,
554 	KVM_REG_MIPS_R21,
555 	KVM_REG_MIPS_R22,
556 	KVM_REG_MIPS_R23,
557 	KVM_REG_MIPS_R24,
558 	KVM_REG_MIPS_R25,
559 	KVM_REG_MIPS_R26,
560 	KVM_REG_MIPS_R27,
561 	KVM_REG_MIPS_R28,
562 	KVM_REG_MIPS_R29,
563 	KVM_REG_MIPS_R30,
564 	KVM_REG_MIPS_R31,
565 
566 #ifndef CONFIG_CPU_MIPSR6
567 	KVM_REG_MIPS_HI,
568 	KVM_REG_MIPS_LO,
569 #endif
570 	KVM_REG_MIPS_PC,
571 };
572 
573 static u64 kvm_mips_get_one_regs_fpu[] = {
574 	KVM_REG_MIPS_FCR_IR,
575 	KVM_REG_MIPS_FCR_CSR,
576 };
577 
578 static u64 kvm_mips_get_one_regs_msa[] = {
579 	KVM_REG_MIPS_MSA_IR,
580 	KVM_REG_MIPS_MSA_CSR,
581 };
582 
kvm_mips_num_regs(struct kvm_vcpu * vcpu)583 static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
584 {
585 	unsigned long ret;
586 
587 	ret = ARRAY_SIZE(kvm_mips_get_one_regs);
588 	if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
589 		ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
590 		/* odd doubles */
591 		if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
592 			ret += 16;
593 	}
594 	if (kvm_mips_guest_can_have_msa(&vcpu->arch))
595 		ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
596 	ret += kvm_mips_callbacks->num_regs(vcpu);
597 
598 	return ret;
599 }
600 
kvm_mips_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * indices)601 static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
602 {
603 	u64 index;
604 	unsigned int i;
605 
606 	if (copy_to_user(indices, kvm_mips_get_one_regs,
607 			 sizeof(kvm_mips_get_one_regs)))
608 		return -EFAULT;
609 	indices += ARRAY_SIZE(kvm_mips_get_one_regs);
610 
611 	if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
612 		if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
613 				 sizeof(kvm_mips_get_one_regs_fpu)))
614 			return -EFAULT;
615 		indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
616 
617 		for (i = 0; i < 32; ++i) {
618 			index = KVM_REG_MIPS_FPR_32(i);
619 			if (copy_to_user(indices, &index, sizeof(index)))
620 				return -EFAULT;
621 			++indices;
622 
623 			/* skip odd doubles if no F64 */
624 			if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
625 				continue;
626 
627 			index = KVM_REG_MIPS_FPR_64(i);
628 			if (copy_to_user(indices, &index, sizeof(index)))
629 				return -EFAULT;
630 			++indices;
631 		}
632 	}
633 
634 	if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
635 		if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
636 				 sizeof(kvm_mips_get_one_regs_msa)))
637 			return -EFAULT;
638 		indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
639 
640 		for (i = 0; i < 32; ++i) {
641 			index = KVM_REG_MIPS_VEC_128(i);
642 			if (copy_to_user(indices, &index, sizeof(index)))
643 				return -EFAULT;
644 			++indices;
645 		}
646 	}
647 
648 	return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
649 }
650 
kvm_mips_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)651 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
652 			    const struct kvm_one_reg *reg)
653 {
654 	struct mips_coproc *cop0 = vcpu->arch.cop0;
655 	struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
656 	int ret;
657 	s64 v;
658 	s64 vs[2];
659 	unsigned int idx;
660 
661 	switch (reg->id) {
662 	/* General purpose registers */
663 	case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
664 		v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
665 		break;
666 #ifndef CONFIG_CPU_MIPSR6
667 	case KVM_REG_MIPS_HI:
668 		v = (long)vcpu->arch.hi;
669 		break;
670 	case KVM_REG_MIPS_LO:
671 		v = (long)vcpu->arch.lo;
672 		break;
673 #endif
674 	case KVM_REG_MIPS_PC:
675 		v = (long)vcpu->arch.pc;
676 		break;
677 
678 	/* Floating point registers */
679 	case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
680 		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
681 			return -EINVAL;
682 		idx = reg->id - KVM_REG_MIPS_FPR_32(0);
683 		/* Odd singles in top of even double when FR=0 */
684 		if (kvm_read_c0_guest_status(cop0) & ST0_FR)
685 			v = get_fpr32(&fpu->fpr[idx], 0);
686 		else
687 			v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
688 		break;
689 	case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
690 		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
691 			return -EINVAL;
692 		idx = reg->id - KVM_REG_MIPS_FPR_64(0);
693 		/* Can't access odd doubles in FR=0 mode */
694 		if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
695 			return -EINVAL;
696 		v = get_fpr64(&fpu->fpr[idx], 0);
697 		break;
698 	case KVM_REG_MIPS_FCR_IR:
699 		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
700 			return -EINVAL;
701 		v = boot_cpu_data.fpu_id;
702 		break;
703 	case KVM_REG_MIPS_FCR_CSR:
704 		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
705 			return -EINVAL;
706 		v = fpu->fcr31;
707 		break;
708 
709 	/* MIPS SIMD Architecture (MSA) registers */
710 	case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
711 		if (!kvm_mips_guest_has_msa(&vcpu->arch))
712 			return -EINVAL;
713 		/* Can't access MSA registers in FR=0 mode */
714 		if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
715 			return -EINVAL;
716 		idx = reg->id - KVM_REG_MIPS_VEC_128(0);
717 #ifdef CONFIG_CPU_LITTLE_ENDIAN
718 		/* least significant byte first */
719 		vs[0] = get_fpr64(&fpu->fpr[idx], 0);
720 		vs[1] = get_fpr64(&fpu->fpr[idx], 1);
721 #else
722 		/* most significant byte first */
723 		vs[0] = get_fpr64(&fpu->fpr[idx], 1);
724 		vs[1] = get_fpr64(&fpu->fpr[idx], 0);
725 #endif
726 		break;
727 	case KVM_REG_MIPS_MSA_IR:
728 		if (!kvm_mips_guest_has_msa(&vcpu->arch))
729 			return -EINVAL;
730 		v = boot_cpu_data.msa_id;
731 		break;
732 	case KVM_REG_MIPS_MSA_CSR:
733 		if (!kvm_mips_guest_has_msa(&vcpu->arch))
734 			return -EINVAL;
735 		v = fpu->msacsr;
736 		break;
737 
738 	/* registers to be handled specially */
739 	default:
740 		ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
741 		if (ret)
742 			return ret;
743 		break;
744 	}
745 	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
746 		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
747 
748 		return put_user(v, uaddr64);
749 	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
750 		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
751 		u32 v32 = (u32)v;
752 
753 		return put_user(v32, uaddr32);
754 	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
755 		void __user *uaddr = (void __user *)(long)reg->addr;
756 
757 		return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
758 	} else {
759 		return -EINVAL;
760 	}
761 }
762 
kvm_mips_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)763 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
764 			    const struct kvm_one_reg *reg)
765 {
766 	struct mips_coproc *cop0 = vcpu->arch.cop0;
767 	struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
768 	s64 v;
769 	s64 vs[2];
770 	unsigned int idx;
771 
772 	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
773 		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
774 
775 		if (get_user(v, uaddr64) != 0)
776 			return -EFAULT;
777 	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
778 		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
779 		s32 v32;
780 
781 		if (get_user(v32, uaddr32) != 0)
782 			return -EFAULT;
783 		v = (s64)v32;
784 	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
785 		void __user *uaddr = (void __user *)(long)reg->addr;
786 
787 		return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
788 	} else {
789 		return -EINVAL;
790 	}
791 
792 	switch (reg->id) {
793 	/* General purpose registers */
794 	case KVM_REG_MIPS_R0:
795 		/* Silently ignore requests to set $0 */
796 		break;
797 	case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
798 		vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
799 		break;
800 #ifndef CONFIG_CPU_MIPSR6
801 	case KVM_REG_MIPS_HI:
802 		vcpu->arch.hi = v;
803 		break;
804 	case KVM_REG_MIPS_LO:
805 		vcpu->arch.lo = v;
806 		break;
807 #endif
808 	case KVM_REG_MIPS_PC:
809 		vcpu->arch.pc = v;
810 		break;
811 
812 	/* Floating point registers */
813 	case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
814 		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
815 			return -EINVAL;
816 		idx = reg->id - KVM_REG_MIPS_FPR_32(0);
817 		/* Odd singles in top of even double when FR=0 */
818 		if (kvm_read_c0_guest_status(cop0) & ST0_FR)
819 			set_fpr32(&fpu->fpr[idx], 0, v);
820 		else
821 			set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
822 		break;
823 	case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
824 		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
825 			return -EINVAL;
826 		idx = reg->id - KVM_REG_MIPS_FPR_64(0);
827 		/* Can't access odd doubles in FR=0 mode */
828 		if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
829 			return -EINVAL;
830 		set_fpr64(&fpu->fpr[idx], 0, v);
831 		break;
832 	case KVM_REG_MIPS_FCR_IR:
833 		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
834 			return -EINVAL;
835 		/* Read-only */
836 		break;
837 	case KVM_REG_MIPS_FCR_CSR:
838 		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
839 			return -EINVAL;
840 		fpu->fcr31 = v;
841 		break;
842 
843 	/* MIPS SIMD Architecture (MSA) registers */
844 	case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
845 		if (!kvm_mips_guest_has_msa(&vcpu->arch))
846 			return -EINVAL;
847 		idx = reg->id - KVM_REG_MIPS_VEC_128(0);
848 #ifdef CONFIG_CPU_LITTLE_ENDIAN
849 		/* least significant byte first */
850 		set_fpr64(&fpu->fpr[idx], 0, vs[0]);
851 		set_fpr64(&fpu->fpr[idx], 1, vs[1]);
852 #else
853 		/* most significant byte first */
854 		set_fpr64(&fpu->fpr[idx], 1, vs[0]);
855 		set_fpr64(&fpu->fpr[idx], 0, vs[1]);
856 #endif
857 		break;
858 	case KVM_REG_MIPS_MSA_IR:
859 		if (!kvm_mips_guest_has_msa(&vcpu->arch))
860 			return -EINVAL;
861 		/* Read-only */
862 		break;
863 	case KVM_REG_MIPS_MSA_CSR:
864 		if (!kvm_mips_guest_has_msa(&vcpu->arch))
865 			return -EINVAL;
866 		fpu->msacsr = v;
867 		break;
868 
869 	/* registers to be handled specially */
870 	default:
871 		return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
872 	}
873 	return 0;
874 }
875 
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)876 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
877 				     struct kvm_enable_cap *cap)
878 {
879 	int r = 0;
880 
881 	if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
882 		return -EINVAL;
883 	if (cap->flags)
884 		return -EINVAL;
885 	if (cap->args[0])
886 		return -EINVAL;
887 
888 	switch (cap->cap) {
889 	case KVM_CAP_MIPS_FPU:
890 		vcpu->arch.fpu_enabled = true;
891 		break;
892 	case KVM_CAP_MIPS_MSA:
893 		vcpu->arch.msa_enabled = true;
894 		break;
895 	default:
896 		r = -EINVAL;
897 		break;
898 	}
899 
900 	return r;
901 }
902 
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)903 long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
904 			 unsigned long arg)
905 {
906 	struct kvm_vcpu *vcpu = filp->private_data;
907 	void __user *argp = (void __user *)arg;
908 	long r;
909 
910 	switch (ioctl) {
911 	case KVM_SET_ONE_REG:
912 	case KVM_GET_ONE_REG: {
913 		struct kvm_one_reg reg;
914 
915 		if (copy_from_user(&reg, argp, sizeof(reg)))
916 			return -EFAULT;
917 		if (ioctl == KVM_SET_ONE_REG)
918 			return kvm_mips_set_reg(vcpu, &reg);
919 		else
920 			return kvm_mips_get_reg(vcpu, &reg);
921 	}
922 	case KVM_GET_REG_LIST: {
923 		struct kvm_reg_list __user *user_list = argp;
924 		struct kvm_reg_list reg_list;
925 		unsigned n;
926 
927 		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
928 			return -EFAULT;
929 		n = reg_list.n;
930 		reg_list.n = kvm_mips_num_regs(vcpu);
931 		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
932 			return -EFAULT;
933 		if (n < reg_list.n)
934 			return -E2BIG;
935 		return kvm_mips_copy_reg_indices(vcpu, user_list->reg);
936 	}
937 	case KVM_INTERRUPT:
938 		{
939 			struct kvm_mips_interrupt irq;
940 
941 			if (copy_from_user(&irq, argp, sizeof(irq)))
942 				return -EFAULT;
943 			kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
944 				  irq.irq);
945 
946 			r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
947 			break;
948 		}
949 	case KVM_ENABLE_CAP: {
950 		struct kvm_enable_cap cap;
951 
952 		if (copy_from_user(&cap, argp, sizeof(cap)))
953 			return -EFAULT;
954 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
955 		break;
956 	}
957 	default:
958 		r = -ENOIOCTLCMD;
959 	}
960 	return r;
961 }
962 
963 /**
964  * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
965  * @kvm: kvm instance
966  * @log: slot id and address to which we copy the log
967  *
968  * Steps 1-4 below provide general overview of dirty page logging. See
969  * kvm_get_dirty_log_protect() function description for additional details.
970  *
971  * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
972  * always flush the TLB (step 4) even if previous step failed  and the dirty
973  * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
974  * does not preclude user space subsequent dirty log read. Flushing TLB ensures
975  * writes will be marked dirty for next log read.
976  *
977  *   1. Take a snapshot of the bit and clear it if needed.
978  *   2. Write protect the corresponding page.
979  *   3. Copy the snapshot to the userspace.
980  *   4. Flush TLB's if needed.
981  */
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)982 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
983 {
984 	struct kvm_memslots *slots;
985 	struct kvm_memory_slot *memslot;
986 	bool is_dirty = false;
987 	int r;
988 
989 	mutex_lock(&kvm->slots_lock);
990 
991 	r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
992 
993 	if (is_dirty) {
994 		slots = kvm_memslots(kvm);
995 		memslot = id_to_memslot(slots, log->slot);
996 
997 		/* Let implementation handle TLB/GVA invalidation */
998 		kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
999 	}
1000 
1001 	mutex_unlock(&kvm->slots_lock);
1002 	return r;
1003 }
1004 
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1005 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1006 {
1007 	long r;
1008 
1009 	switch (ioctl) {
1010 	default:
1011 		r = -ENOIOCTLCMD;
1012 	}
1013 
1014 	return r;
1015 }
1016 
kvm_arch_init(void * opaque)1017 int kvm_arch_init(void *opaque)
1018 {
1019 	if (kvm_mips_callbacks) {
1020 		kvm_err("kvm: module already exists\n");
1021 		return -EEXIST;
1022 	}
1023 
1024 	return kvm_mips_emulation_init(&kvm_mips_callbacks);
1025 }
1026 
kvm_arch_exit(void)1027 void kvm_arch_exit(void)
1028 {
1029 	kvm_mips_callbacks = NULL;
1030 }
1031 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1032 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1033 				  struct kvm_sregs *sregs)
1034 {
1035 	return -ENOIOCTLCMD;
1036 }
1037 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1038 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1039 				  struct kvm_sregs *sregs)
1040 {
1041 	return -ENOIOCTLCMD;
1042 }
1043 
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)1044 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1045 {
1046 }
1047 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1048 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1049 {
1050 	return -ENOIOCTLCMD;
1051 }
1052 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1053 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1054 {
1055 	return -ENOIOCTLCMD;
1056 }
1057 
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)1058 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1059 {
1060 	return VM_FAULT_SIGBUS;
1061 }
1062 
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)1063 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1064 {
1065 	int r;
1066 
1067 	switch (ext) {
1068 	case KVM_CAP_ONE_REG:
1069 	case KVM_CAP_ENABLE_CAP:
1070 	case KVM_CAP_READONLY_MEM:
1071 	case KVM_CAP_SYNC_MMU:
1072 	case KVM_CAP_IMMEDIATE_EXIT:
1073 		r = 1;
1074 		break;
1075 	case KVM_CAP_NR_VCPUS:
1076 		r = num_online_cpus();
1077 		break;
1078 	case KVM_CAP_MAX_VCPUS:
1079 		r = KVM_MAX_VCPUS;
1080 		break;
1081 	case KVM_CAP_MAX_VCPU_ID:
1082 		r = KVM_MAX_VCPU_ID;
1083 		break;
1084 	case KVM_CAP_MIPS_FPU:
1085 		/* We don't handle systems with inconsistent cpu_has_fpu */
1086 		r = !!raw_cpu_has_fpu;
1087 		break;
1088 	case KVM_CAP_MIPS_MSA:
1089 		/*
1090 		 * We don't support MSA vector partitioning yet:
1091 		 * 1) It would require explicit support which can't be tested
1092 		 *    yet due to lack of support in current hardware.
1093 		 * 2) It extends the state that would need to be saved/restored
1094 		 *    by e.g. QEMU for migration.
1095 		 *
1096 		 * When vector partitioning hardware becomes available, support
1097 		 * could be added by requiring a flag when enabling
1098 		 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1099 		 * to save/restore the appropriate extra state.
1100 		 */
1101 		r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1102 		break;
1103 	default:
1104 		r = kvm_mips_callbacks->check_extension(kvm, ext);
1105 		break;
1106 	}
1107 	return r;
1108 }
1109 
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)1110 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1111 {
1112 	return kvm_mips_pending_timer(vcpu) ||
1113 		kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
1114 }
1115 
kvm_arch_vcpu_dump_regs(struct kvm_vcpu * vcpu)1116 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1117 {
1118 	int i;
1119 	struct mips_coproc *cop0;
1120 
1121 	if (!vcpu)
1122 		return -1;
1123 
1124 	kvm_debug("VCPU Register Dump:\n");
1125 	kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1126 	kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
1127 
1128 	for (i = 0; i < 32; i += 4) {
1129 		kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1130 		       vcpu->arch.gprs[i],
1131 		       vcpu->arch.gprs[i + 1],
1132 		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1133 	}
1134 	kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1135 	kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
1136 
1137 	cop0 = vcpu->arch.cop0;
1138 	kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
1139 		  kvm_read_c0_guest_status(cop0),
1140 		  kvm_read_c0_guest_cause(cop0));
1141 
1142 	kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
1143 
1144 	return 0;
1145 }
1146 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)1147 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1148 {
1149 	int i;
1150 
1151 	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1152 		vcpu->arch.gprs[i] = regs->gpr[i];
1153 	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1154 	vcpu->arch.hi = regs->hi;
1155 	vcpu->arch.lo = regs->lo;
1156 	vcpu->arch.pc = regs->pc;
1157 
1158 	return 0;
1159 }
1160 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)1161 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1162 {
1163 	int i;
1164 
1165 	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1166 		regs->gpr[i] = vcpu->arch.gprs[i];
1167 
1168 	regs->hi = vcpu->arch.hi;
1169 	regs->lo = vcpu->arch.lo;
1170 	regs->pc = vcpu->arch.pc;
1171 
1172 	return 0;
1173 }
1174 
kvm_mips_comparecount_func(unsigned long data)1175 static void kvm_mips_comparecount_func(unsigned long data)
1176 {
1177 	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1178 
1179 	kvm_mips_callbacks->queue_timer_int(vcpu);
1180 
1181 	vcpu->arch.wait = 0;
1182 	if (swq_has_sleeper(&vcpu->wq))
1183 		swake_up(&vcpu->wq);
1184 }
1185 
1186 /* low level hrtimer wake routine */
kvm_mips_comparecount_wakeup(struct hrtimer * timer)1187 static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
1188 {
1189 	struct kvm_vcpu *vcpu;
1190 
1191 	vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
1192 	kvm_mips_comparecount_func((unsigned long) vcpu);
1193 	return kvm_mips_count_timeout(vcpu);
1194 }
1195 
kvm_arch_vcpu_init(struct kvm_vcpu * vcpu)1196 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1197 {
1198 	int err;
1199 
1200 	err = kvm_mips_callbacks->vcpu_init(vcpu);
1201 	if (err)
1202 		return err;
1203 
1204 	hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
1205 		     HRTIMER_MODE_REL);
1206 	vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
1207 	return 0;
1208 }
1209 
kvm_arch_vcpu_uninit(struct kvm_vcpu * vcpu)1210 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1211 {
1212 	kvm_mips_callbacks->vcpu_uninit(vcpu);
1213 }
1214 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)1215 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1216 				  struct kvm_translation *tr)
1217 {
1218 	return 0;
1219 }
1220 
1221 /* Initial guest state */
kvm_arch_vcpu_setup(struct kvm_vcpu * vcpu)1222 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1223 {
1224 	return kvm_mips_callbacks->vcpu_setup(vcpu);
1225 }
1226 
kvm_mips_set_c0_status(void)1227 static void kvm_mips_set_c0_status(void)
1228 {
1229 	u32 status = read_c0_status();
1230 
1231 	if (cpu_has_dsp)
1232 		status |= (ST0_MX);
1233 
1234 	write_c0_status(status);
1235 	ehb();
1236 }
1237 
1238 /*
1239  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1240  */
kvm_mips_handle_exit(struct kvm_run * run,struct kvm_vcpu * vcpu)1241 int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1242 {
1243 	u32 cause = vcpu->arch.host_cp0_cause;
1244 	u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1245 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
1246 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1247 	enum emulation_result er = EMULATE_DONE;
1248 	u32 inst;
1249 	int ret = RESUME_GUEST;
1250 
1251 	vcpu->mode = OUTSIDE_GUEST_MODE;
1252 
1253 	/* re-enable HTW before enabling interrupts */
1254 	if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
1255 		htw_start();
1256 
1257 	/* Set a default exit reason */
1258 	run->exit_reason = KVM_EXIT_UNKNOWN;
1259 	run->ready_for_interrupt_injection = 1;
1260 
1261 	/*
1262 	 * Set the appropriate status bits based on host CPU features,
1263 	 * before we hit the scheduler
1264 	 */
1265 	kvm_mips_set_c0_status();
1266 
1267 	local_irq_enable();
1268 
1269 	kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1270 			cause, opc, run, vcpu);
1271 	trace_kvm_exit(vcpu, exccode);
1272 
1273 	if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1274 		/*
1275 		 * Do a privilege check, if in UM most of these exit conditions
1276 		 * end up causing an exception to be delivered to the Guest
1277 		 * Kernel
1278 		 */
1279 		er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1280 		if (er == EMULATE_PRIV_FAIL) {
1281 			goto skip_emul;
1282 		} else if (er == EMULATE_FAIL) {
1283 			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1284 			ret = RESUME_HOST;
1285 			goto skip_emul;
1286 		}
1287 	}
1288 
1289 	switch (exccode) {
1290 	case EXCCODE_INT:
1291 		kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
1292 
1293 		++vcpu->stat.int_exits;
1294 
1295 		if (need_resched())
1296 			cond_resched();
1297 
1298 		ret = RESUME_GUEST;
1299 		break;
1300 
1301 	case EXCCODE_CPU:
1302 		kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
1303 
1304 		++vcpu->stat.cop_unusable_exits;
1305 		ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1306 		/* XXXKYMA: Might need to return to user space */
1307 		if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1308 			ret = RESUME_HOST;
1309 		break;
1310 
1311 	case EXCCODE_MOD:
1312 		++vcpu->stat.tlbmod_exits;
1313 		ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1314 		break;
1315 
1316 	case EXCCODE_TLBS:
1317 		kvm_debug("TLB ST fault:  cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
1318 			  cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1319 			  badvaddr);
1320 
1321 		++vcpu->stat.tlbmiss_st_exits;
1322 		ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1323 		break;
1324 
1325 	case EXCCODE_TLBL:
1326 		kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1327 			  cause, opc, badvaddr);
1328 
1329 		++vcpu->stat.tlbmiss_ld_exits;
1330 		ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1331 		break;
1332 
1333 	case EXCCODE_ADES:
1334 		++vcpu->stat.addrerr_st_exits;
1335 		ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1336 		break;
1337 
1338 	case EXCCODE_ADEL:
1339 		++vcpu->stat.addrerr_ld_exits;
1340 		ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1341 		break;
1342 
1343 	case EXCCODE_SYS:
1344 		++vcpu->stat.syscall_exits;
1345 		ret = kvm_mips_callbacks->handle_syscall(vcpu);
1346 		break;
1347 
1348 	case EXCCODE_RI:
1349 		++vcpu->stat.resvd_inst_exits;
1350 		ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1351 		break;
1352 
1353 	case EXCCODE_BP:
1354 		++vcpu->stat.break_inst_exits;
1355 		ret = kvm_mips_callbacks->handle_break(vcpu);
1356 		break;
1357 
1358 	case EXCCODE_TR:
1359 		++vcpu->stat.trap_inst_exits;
1360 		ret = kvm_mips_callbacks->handle_trap(vcpu);
1361 		break;
1362 
1363 	case EXCCODE_MSAFPE:
1364 		++vcpu->stat.msa_fpe_exits;
1365 		ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1366 		break;
1367 
1368 	case EXCCODE_FPE:
1369 		++vcpu->stat.fpe_exits;
1370 		ret = kvm_mips_callbacks->handle_fpe(vcpu);
1371 		break;
1372 
1373 	case EXCCODE_MSADIS:
1374 		++vcpu->stat.msa_disabled_exits;
1375 		ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1376 		break;
1377 
1378 	case EXCCODE_GE:
1379 		/* defer exit accounting to handler */
1380 		ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
1381 		break;
1382 
1383 	default:
1384 		if (cause & CAUSEF_BD)
1385 			opc += 1;
1386 		inst = 0;
1387 		kvm_get_badinstr(opc, vcpu, &inst);
1388 		kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#x\n",
1389 			exccode, opc, inst, badvaddr,
1390 			kvm_read_c0_guest_status(vcpu->arch.cop0));
1391 		kvm_arch_vcpu_dump_regs(vcpu);
1392 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1393 		ret = RESUME_HOST;
1394 		break;
1395 
1396 	}
1397 
1398 skip_emul:
1399 	local_irq_disable();
1400 
1401 	if (ret == RESUME_GUEST)
1402 		kvm_vz_acquire_htimer(vcpu);
1403 
1404 	if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1405 		kvm_mips_deliver_interrupts(vcpu, cause);
1406 
1407 	if (!(ret & RESUME_HOST)) {
1408 		/* Only check for signals if not already exiting to userspace */
1409 		if (signal_pending(current)) {
1410 			run->exit_reason = KVM_EXIT_INTR;
1411 			ret = (-EINTR << 2) | RESUME_HOST;
1412 			++vcpu->stat.signal_exits;
1413 			trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
1414 		}
1415 	}
1416 
1417 	if (ret == RESUME_GUEST) {
1418 		trace_kvm_reenter(vcpu);
1419 
1420 		/*
1421 		 * Make sure the read of VCPU requests in vcpu_reenter()
1422 		 * callback is not reordered ahead of the write to vcpu->mode,
1423 		 * or we could miss a TLB flush request while the requester sees
1424 		 * the VCPU as outside of guest mode and not needing an IPI.
1425 		 */
1426 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1427 
1428 		kvm_mips_callbacks->vcpu_reenter(run, vcpu);
1429 
1430 		/*
1431 		 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1432 		 * is live), restore FCR31 / MSACSR.
1433 		 *
1434 		 * This should be before returning to the guest exception
1435 		 * vector, as it may well cause an [MSA] FP exception if there
1436 		 * are pending exception bits unmasked. (see
1437 		 * kvm_mips_csr_die_notifier() for how that is handled).
1438 		 */
1439 		if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1440 		    read_c0_status() & ST0_CU1)
1441 			__kvm_restore_fcsr(&vcpu->arch);
1442 
1443 		if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1444 		    read_c0_config5() & MIPS_CONF5_MSAEN)
1445 			__kvm_restore_msacsr(&vcpu->arch);
1446 	}
1447 
1448 	/* Disable HTW before returning to guest or host */
1449 	if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
1450 		htw_stop();
1451 
1452 	return ret;
1453 }
1454 
1455 /* Enable FPU for guest and restore context */
kvm_own_fpu(struct kvm_vcpu * vcpu)1456 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1457 {
1458 	struct mips_coproc *cop0 = vcpu->arch.cop0;
1459 	unsigned int sr, cfg5;
1460 
1461 	preempt_disable();
1462 
1463 	sr = kvm_read_c0_guest_status(cop0);
1464 
1465 	/*
1466 	 * If MSA state is already live, it is undefined how it interacts with
1467 	 * FR=0 FPU state, and we don't want to hit reserved instruction
1468 	 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1469 	 * play it safe and save it first.
1470 	 *
1471 	 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1472 	 * get called when guest CU1 is set, however we can't trust the guest
1473 	 * not to clobber the status register directly via the commpage.
1474 	 */
1475 	if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1476 	    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1477 		kvm_lose_fpu(vcpu);
1478 
1479 	/*
1480 	 * Enable FPU for guest
1481 	 * We set FR and FRE according to guest context
1482 	 */
1483 	change_c0_status(ST0_CU1 | ST0_FR, sr);
1484 	if (cpu_has_fre) {
1485 		cfg5 = kvm_read_c0_guest_config5(cop0);
1486 		change_c0_config5(MIPS_CONF5_FRE, cfg5);
1487 	}
1488 	enable_fpu_hazard();
1489 
1490 	/* If guest FPU state not active, restore it now */
1491 	if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1492 		__kvm_restore_fpu(&vcpu->arch);
1493 		vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1494 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1495 	} else {
1496 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
1497 	}
1498 
1499 	preempt_enable();
1500 }
1501 
1502 #ifdef CONFIG_CPU_HAS_MSA
1503 /* Enable MSA for guest and restore context */
kvm_own_msa(struct kvm_vcpu * vcpu)1504 void kvm_own_msa(struct kvm_vcpu *vcpu)
1505 {
1506 	struct mips_coproc *cop0 = vcpu->arch.cop0;
1507 	unsigned int sr, cfg5;
1508 
1509 	preempt_disable();
1510 
1511 	/*
1512 	 * Enable FPU if enabled in guest, since we're restoring FPU context
1513 	 * anyway. We set FR and FRE according to guest context.
1514 	 */
1515 	if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1516 		sr = kvm_read_c0_guest_status(cop0);
1517 
1518 		/*
1519 		 * If FR=0 FPU state is already live, it is undefined how it
1520 		 * interacts with MSA state, so play it safe and save it first.
1521 		 */
1522 		if (!(sr & ST0_FR) &&
1523 		    (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
1524 				KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
1525 			kvm_lose_fpu(vcpu);
1526 
1527 		change_c0_status(ST0_CU1 | ST0_FR, sr);
1528 		if (sr & ST0_CU1 && cpu_has_fre) {
1529 			cfg5 = kvm_read_c0_guest_config5(cop0);
1530 			change_c0_config5(MIPS_CONF5_FRE, cfg5);
1531 		}
1532 	}
1533 
1534 	/* Enable MSA for guest */
1535 	set_c0_config5(MIPS_CONF5_MSAEN);
1536 	enable_fpu_hazard();
1537 
1538 	switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
1539 	case KVM_MIPS_AUX_FPU:
1540 		/*
1541 		 * Guest FPU state already loaded, only restore upper MSA state
1542 		 */
1543 		__kvm_restore_msa_upper(&vcpu->arch);
1544 		vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1545 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
1546 		break;
1547 	case 0:
1548 		/* Neither FPU or MSA already active, restore full MSA state */
1549 		__kvm_restore_msa(&vcpu->arch);
1550 		vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1551 		if (kvm_mips_guest_has_fpu(&vcpu->arch))
1552 			vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1553 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
1554 			      KVM_TRACE_AUX_FPU_MSA);
1555 		break;
1556 	default:
1557 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
1558 		break;
1559 	}
1560 
1561 	preempt_enable();
1562 }
1563 #endif
1564 
1565 /* Drop FPU & MSA without saving it */
kvm_drop_fpu(struct kvm_vcpu * vcpu)1566 void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1567 {
1568 	preempt_disable();
1569 	if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1570 		disable_msa();
1571 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
1572 		vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
1573 	}
1574 	if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1575 		clear_c0_status(ST0_CU1 | ST0_FR);
1576 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
1577 		vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1578 	}
1579 	preempt_enable();
1580 }
1581 
1582 /* Save and disable FPU & MSA */
kvm_lose_fpu(struct kvm_vcpu * vcpu)1583 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1584 {
1585 	/*
1586 	 * With T&E, FPU & MSA get disabled in root context (hardware) when it
1587 	 * is disabled in guest context (software), but the register state in
1588 	 * the hardware may still be in use.
1589 	 * This is why we explicitly re-enable the hardware before saving.
1590 	 */
1591 
1592 	preempt_disable();
1593 	if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1594 		if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1595 			set_c0_config5(MIPS_CONF5_MSAEN);
1596 			enable_fpu_hazard();
1597 		}
1598 
1599 		__kvm_save_msa(&vcpu->arch);
1600 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
1601 
1602 		/* Disable MSA & FPU */
1603 		disable_msa();
1604 		if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1605 			clear_c0_status(ST0_CU1 | ST0_FR);
1606 			disable_fpu_hazard();
1607 		}
1608 		vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
1609 	} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1610 		if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1611 			set_c0_status(ST0_CU1);
1612 			enable_fpu_hazard();
1613 		}
1614 
1615 		__kvm_save_fpu(&vcpu->arch);
1616 		vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1617 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1618 
1619 		/* Disable FPU */
1620 		clear_c0_status(ST0_CU1 | ST0_FR);
1621 		disable_fpu_hazard();
1622 	}
1623 	preempt_enable();
1624 }
1625 
1626 /*
1627  * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1628  * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1629  * exception if cause bits are set in the value being written.
1630  */
kvm_mips_csr_die_notify(struct notifier_block * self,unsigned long cmd,void * ptr)1631 static int kvm_mips_csr_die_notify(struct notifier_block *self,
1632 				   unsigned long cmd, void *ptr)
1633 {
1634 	struct die_args *args = (struct die_args *)ptr;
1635 	struct pt_regs *regs = args->regs;
1636 	unsigned long pc;
1637 
1638 	/* Only interested in FPE and MSAFPE */
1639 	if (cmd != DIE_FP && cmd != DIE_MSAFP)
1640 		return NOTIFY_DONE;
1641 
1642 	/* Return immediately if guest context isn't active */
1643 	if (!(current->flags & PF_VCPU))
1644 		return NOTIFY_DONE;
1645 
1646 	/* Should never get here from user mode */
1647 	BUG_ON(user_mode(regs));
1648 
1649 	pc = instruction_pointer(regs);
1650 	switch (cmd) {
1651 	case DIE_FP:
1652 		/* match 2nd instruction in __kvm_restore_fcsr */
1653 		if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1654 			return NOTIFY_DONE;
1655 		break;
1656 	case DIE_MSAFP:
1657 		/* match 2nd/3rd instruction in __kvm_restore_msacsr */
1658 		if (!cpu_has_msa ||
1659 		    pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1660 		    pc > (unsigned long)&__kvm_restore_msacsr + 8)
1661 			return NOTIFY_DONE;
1662 		break;
1663 	}
1664 
1665 	/* Move PC forward a little and continue executing */
1666 	instruction_pointer(regs) += 4;
1667 
1668 	return NOTIFY_STOP;
1669 }
1670 
1671 static struct notifier_block kvm_mips_csr_die_notifier = {
1672 	.notifier_call = kvm_mips_csr_die_notify,
1673 };
1674 
kvm_mips_init(void)1675 static int __init kvm_mips_init(void)
1676 {
1677 	int ret;
1678 
1679 	ret = kvm_mips_entry_setup();
1680 	if (ret)
1681 		return ret;
1682 
1683 	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1684 
1685 	if (ret)
1686 		return ret;
1687 
1688 	register_die_notifier(&kvm_mips_csr_die_notifier);
1689 
1690 	return 0;
1691 }
1692 
kvm_mips_exit(void)1693 static void __exit kvm_mips_exit(void)
1694 {
1695 	kvm_exit();
1696 
1697 	unregister_die_notifier(&kvm_mips_csr_die_notifier);
1698 }
1699 
1700 module_init(kvm_mips_init);
1701 module_exit(kvm_mips_exit);
1702 
1703 EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
1704