• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10  */
11 
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/highmem.h>
15 #include <linux/kvm_host.h>
16 #include <linux/uaccess.h>
17 #include <linux/vmalloc.h>
18 #include <linux/fs.h>
19 #include <linux/memblock.h>
20 #include <asm/cacheflush.h>
21 
22 #include "commpage.h"
23 
24 /**
25  * kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
26  * @vcpu:	Virtual CPU.
27  * @opc:	PC of instruction to replace.
28  * @replace:	Instruction to write
29  */
kvm_mips_trans_replace(struct kvm_vcpu * vcpu,u32 * opc,union mips_instruction replace)30 static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
31 				  union mips_instruction replace)
32 {
33 	unsigned long vaddr = (unsigned long)opc;
34 	int err;
35 
36 retry:
37 	/* The GVA page table is still active so use the Linux TLB handlers */
38 	kvm_trap_emul_gva_lockless_begin(vcpu);
39 	err = put_user(replace.word, opc);
40 	kvm_trap_emul_gva_lockless_end(vcpu);
41 
42 	if (unlikely(err)) {
43 		/*
44 		 * We write protect clean pages in GVA page table so normal
45 		 * Linux TLB mod handler doesn't silently dirty the page.
46 		 * Its also possible we raced with a GVA invalidation.
47 		 * Try to force the page to become dirty.
48 		 */
49 		err = kvm_trap_emul_gva_fault(vcpu, vaddr, true);
50 		if (unlikely(err)) {
51 			kvm_info("%s: Address unwriteable: %p\n",
52 				 __func__, opc);
53 			return -EFAULT;
54 		}
55 
56 		/*
57 		 * Try again. This will likely trigger a TLB refill, which will
58 		 * fetch the new dirty entry from the GVA page table, which
59 		 * should then succeed.
60 		 */
61 		goto retry;
62 	}
63 	__local_flush_icache_user_range(vaddr, vaddr + 4);
64 
65 	return 0;
66 }
67 
kvm_mips_trans_cache_index(union mips_instruction inst,u32 * opc,struct kvm_vcpu * vcpu)68 int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc,
69 			       struct kvm_vcpu *vcpu)
70 {
71 	union mips_instruction nop_inst = { 0 };
72 
73 	/* Replace the CACHE instruction, with a NOP */
74 	return kvm_mips_trans_replace(vcpu, opc, nop_inst);
75 }
76 
77 /*
78  * Address based CACHE instructions are transformed into synci(s). A little
79  * heavy for just D-cache invalidates, but avoids an expensive trap
80  */
kvm_mips_trans_cache_va(union mips_instruction inst,u32 * opc,struct kvm_vcpu * vcpu)81 int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
82 			    struct kvm_vcpu *vcpu)
83 {
84 	union mips_instruction synci_inst = { 0 };
85 
86 	synci_inst.i_format.opcode = bcond_op;
87 	synci_inst.i_format.rs = inst.i_format.rs;
88 	synci_inst.i_format.rt = synci_op;
89 	if (cpu_has_mips_r6)
90 		synci_inst.i_format.simmediate = inst.spec3_format.simmediate;
91 	else
92 		synci_inst.i_format.simmediate = inst.i_format.simmediate;
93 
94 	return kvm_mips_trans_replace(vcpu, opc, synci_inst);
95 }
96 
kvm_mips_trans_mfc0(union mips_instruction inst,u32 * opc,struct kvm_vcpu * vcpu)97 int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
98 			struct kvm_vcpu *vcpu)
99 {
100 	union mips_instruction mfc0_inst = { 0 };
101 	u32 rd, sel;
102 
103 	rd = inst.c0r_format.rd;
104 	sel = inst.c0r_format.sel;
105 
106 	if (rd == MIPS_CP0_ERRCTL && sel == 0) {
107 		mfc0_inst.r_format.opcode = spec_op;
108 		mfc0_inst.r_format.rd = inst.c0r_format.rt;
109 		mfc0_inst.r_format.func = add_op;
110 	} else {
111 		mfc0_inst.i_format.opcode = lw_op;
112 		mfc0_inst.i_format.rt = inst.c0r_format.rt;
113 		mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
114 			offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
115 #ifdef CONFIG_CPU_BIG_ENDIAN
116 		if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
117 			mfc0_inst.i_format.simmediate |= 4;
118 #endif
119 	}
120 
121 	return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
122 }
123 
kvm_mips_trans_mtc0(union mips_instruction inst,u32 * opc,struct kvm_vcpu * vcpu)124 int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
125 			struct kvm_vcpu *vcpu)
126 {
127 	union mips_instruction mtc0_inst = { 0 };
128 	u32 rd, sel;
129 
130 	rd = inst.c0r_format.rd;
131 	sel = inst.c0r_format.sel;
132 
133 	mtc0_inst.i_format.opcode = sw_op;
134 	mtc0_inst.i_format.rt = inst.c0r_format.rt;
135 	mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
136 		offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
137 #ifdef CONFIG_CPU_BIG_ENDIAN
138 	if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
139 		mtc0_inst.i_format.simmediate |= 4;
140 #endif
141 
142 	return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
143 }
144