• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7  * TLB handlers run from KSEG0
8  *
9  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
10  * Authors: Sanjay Lal <sanjayl@kymasys.com>
11  */
12 
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/mm.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
20 
21 #include <asm/cpu.h>
22 #include <asm/bootinfo.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlb.h>
27 
28 #undef CONFIG_MIPS_MT
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
31 
32 #define KVM_GUEST_PC_TLB    0
33 #define KVM_GUEST_SP_TLB    1
34 
35 #define PRIx64 "llx"
36 
37 atomic_t kvm_mips_instance;
38 EXPORT_SYMBOL(kvm_mips_instance);
39 
40 /* These function pointers are initialized once the KVM module is loaded */
41 pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
42 EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
43 
44 void (*kvm_mips_release_pfn_clean)(pfn_t pfn);
45 EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
46 
47 bool (*kvm_mips_is_error_pfn)(pfn_t pfn);
48 EXPORT_SYMBOL(kvm_mips_is_error_pfn);
49 
kvm_mips_get_kernel_asid(struct kvm_vcpu * vcpu)50 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
51 {
52 	return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
53 }
54 
kvm_mips_get_user_asid(struct kvm_vcpu * vcpu)55 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
56 {
57 	return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
58 }
59 
kvm_mips_get_commpage_asid(struct kvm_vcpu * vcpu)60 inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
61 {
62 	return vcpu->kvm->arch.commpage_tlb;
63 }
64 
65 /* Structure defining an tlb entry data set. */
66 
kvm_mips_dump_host_tlbs(void)67 void kvm_mips_dump_host_tlbs(void)
68 {
69 	unsigned long old_entryhi;
70 	unsigned long old_pagemask;
71 	struct kvm_mips_tlb tlb;
72 	unsigned long flags;
73 	int i;
74 
75 	local_irq_save(flags);
76 
77 	old_entryhi = read_c0_entryhi();
78 	old_pagemask = read_c0_pagemask();
79 
80 	kvm_info("HOST TLBs:\n");
81 	kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
82 
83 	for (i = 0; i < current_cpu_data.tlbsize; i++) {
84 		write_c0_index(i);
85 		mtc0_tlbw_hazard();
86 
87 		tlb_read();
88 		tlbw_use_hazard();
89 
90 		tlb.tlb_hi = read_c0_entryhi();
91 		tlb.tlb_lo0 = read_c0_entrylo0();
92 		tlb.tlb_lo1 = read_c0_entrylo1();
93 		tlb.tlb_mask = read_c0_pagemask();
94 
95 		kvm_info("TLB%c%3d Hi 0x%08lx ",
96 			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
97 			 i, tlb.tlb_hi);
98 		kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
99 			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
100 			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
101 			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
102 			 (tlb.tlb_lo0 >> 3) & 7);
103 		kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
104 			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
105 			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
106 			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
107 			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
108 	}
109 	write_c0_entryhi(old_entryhi);
110 	write_c0_pagemask(old_pagemask);
111 	mtc0_tlbw_hazard();
112 	local_irq_restore(flags);
113 }
114 EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
115 
kvm_mips_dump_guest_tlbs(struct kvm_vcpu * vcpu)116 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
117 {
118 	struct mips_coproc *cop0 = vcpu->arch.cop0;
119 	struct kvm_mips_tlb tlb;
120 	int i;
121 
122 	kvm_info("Guest TLBs:\n");
123 	kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
124 
125 	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
126 		tlb = vcpu->arch.guest_tlb[i];
127 		kvm_info("TLB%c%3d Hi 0x%08lx ",
128 			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
129 			 i, tlb.tlb_hi);
130 		kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
131 			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
132 			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
133 			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
134 			 (tlb.tlb_lo0 >> 3) & 7);
135 		kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
136 			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
137 			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
138 			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
139 			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
140 	}
141 }
142 EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
143 
kvm_mips_map_page(struct kvm * kvm,gfn_t gfn)144 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
145 {
146 	int srcu_idx, err = 0;
147 	pfn_t pfn;
148 
149 	if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
150 		return 0;
151 
152 	srcu_idx = srcu_read_lock(&kvm->srcu);
153 	pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
154 
155 	if (is_error_noslot_pfn(pfn)) {
156 		kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
157 		err = -EFAULT;
158 		goto out;
159 	}
160 
161 	kvm->arch.guest_pmap[gfn] = pfn;
162 out:
163 	srcu_read_unlock(&kvm->srcu, srcu_idx);
164 	return err;
165 }
166 
167 /* Translate guest KSEG0 addresses to Host PA */
kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu * vcpu,unsigned long gva)168 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
169 						    unsigned long gva)
170 {
171 	gfn_t gfn;
172 	uint32_t offset = gva & ~PAGE_MASK;
173 	struct kvm *kvm = vcpu->kvm;
174 
175 	if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
176 		kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
177 			__builtin_return_address(0), gva);
178 		return KVM_INVALID_PAGE;
179 	}
180 
181 	gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
182 
183 	if (gfn >= kvm->arch.guest_pmap_npages) {
184 		kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
185 			gva);
186 		return KVM_INVALID_PAGE;
187 	}
188 
189 	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
190 		return KVM_INVALID_ADDR;
191 
192 	return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
193 }
194 EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
195 
196 /* XXXKYMA: Must be called with interrupts disabled */
197 /* set flush_dcache_mask == 0 if no dcache flush required */
kvm_mips_host_tlb_write(struct kvm_vcpu * vcpu,unsigned long entryhi,unsigned long entrylo0,unsigned long entrylo1,int flush_dcache_mask)198 int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
199 			    unsigned long entrylo0, unsigned long entrylo1,
200 			    int flush_dcache_mask)
201 {
202 	unsigned long flags;
203 	unsigned long old_entryhi;
204 	int idx;
205 
206 	local_irq_save(flags);
207 
208 	old_entryhi = read_c0_entryhi();
209 	write_c0_entryhi(entryhi);
210 	mtc0_tlbw_hazard();
211 
212 	tlb_probe();
213 	tlb_probe_hazard();
214 	idx = read_c0_index();
215 
216 	if (idx > current_cpu_data.tlbsize) {
217 		kvm_err("%s: Invalid Index: %d\n", __func__, idx);
218 		kvm_mips_dump_host_tlbs();
219 		local_irq_restore(flags);
220 		return -1;
221 	}
222 
223 	write_c0_entrylo0(entrylo0);
224 	write_c0_entrylo1(entrylo1);
225 	mtc0_tlbw_hazard();
226 
227 	if (idx < 0)
228 		tlb_write_random();
229 	else
230 		tlb_write_indexed();
231 	tlbw_use_hazard();
232 
233 	kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
234 		  vcpu->arch.pc, idx, read_c0_entryhi(),
235 		  read_c0_entrylo0(), read_c0_entrylo1());
236 
237 	/* Flush D-cache */
238 	if (flush_dcache_mask) {
239 		if (entrylo0 & MIPS3_PG_V) {
240 			++vcpu->stat.flush_dcache_exits;
241 			flush_data_cache_page((entryhi & VPN2_MASK) &
242 					      ~flush_dcache_mask);
243 		}
244 		if (entrylo1 & MIPS3_PG_V) {
245 			++vcpu->stat.flush_dcache_exits;
246 			flush_data_cache_page(((entryhi & VPN2_MASK) &
247 					       ~flush_dcache_mask) |
248 					      (0x1 << PAGE_SHIFT));
249 		}
250 	}
251 
252 	/* Restore old ASID */
253 	write_c0_entryhi(old_entryhi);
254 	mtc0_tlbw_hazard();
255 	tlbw_use_hazard();
256 	local_irq_restore(flags);
257 	return 0;
258 }
259 
260 /* XXXKYMA: Must be called with interrupts disabled */
kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,struct kvm_vcpu * vcpu)261 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
262 				    struct kvm_vcpu *vcpu)
263 {
264 	gfn_t gfn;
265 	pfn_t pfn0, pfn1;
266 	unsigned long vaddr = 0;
267 	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
268 	int even;
269 	struct kvm *kvm = vcpu->kvm;
270 	const int flush_dcache_mask = 0;
271 
272 	if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
273 		kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
274 		kvm_mips_dump_host_tlbs();
275 		return -1;
276 	}
277 
278 	gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
279 	if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
280 		kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
281 			gfn, badvaddr);
282 		kvm_mips_dump_host_tlbs();
283 		return -1;
284 	}
285 	even = !(gfn & 0x1);
286 	vaddr = badvaddr & (PAGE_MASK << 1);
287 
288 	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
289 		return -1;
290 
291 	if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
292 		return -1;
293 
294 	if (even) {
295 		pfn0 = kvm->arch.guest_pmap[gfn];
296 		pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
297 	} else {
298 		pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
299 		pfn1 = kvm->arch.guest_pmap[gfn];
300 	}
301 
302 	entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
303 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
304 		   (1 << 2) | (0x1 << 1);
305 	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
306 		   (1 << 2) | (0x1 << 1);
307 
308 	return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
309 				       flush_dcache_mask);
310 }
311 EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
312 
kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,struct kvm_vcpu * vcpu)313 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
314 	struct kvm_vcpu *vcpu)
315 {
316 	pfn_t pfn0, pfn1;
317 	unsigned long flags, old_entryhi = 0, vaddr = 0;
318 	unsigned long entrylo0 = 0, entrylo1 = 0;
319 
320 	pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
321 	pfn1 = 0;
322 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
323 		   (1 << 2) | (0x1 << 1);
324 	entrylo1 = 0;
325 
326 	local_irq_save(flags);
327 
328 	old_entryhi = read_c0_entryhi();
329 	vaddr = badvaddr & (PAGE_MASK << 1);
330 	write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
331 	mtc0_tlbw_hazard();
332 	write_c0_entrylo0(entrylo0);
333 	mtc0_tlbw_hazard();
334 	write_c0_entrylo1(entrylo1);
335 	mtc0_tlbw_hazard();
336 	write_c0_index(kvm_mips_get_commpage_asid(vcpu));
337 	mtc0_tlbw_hazard();
338 	tlb_write_indexed();
339 	mtc0_tlbw_hazard();
340 	tlbw_use_hazard();
341 
342 	kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
343 		  vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
344 		  read_c0_entrylo0(), read_c0_entrylo1());
345 
346 	/* Restore old ASID */
347 	write_c0_entryhi(old_entryhi);
348 	mtc0_tlbw_hazard();
349 	tlbw_use_hazard();
350 	local_irq_restore(flags);
351 
352 	return 0;
353 }
354 EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
355 
kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu * vcpu,struct kvm_mips_tlb * tlb,unsigned long * hpa0,unsigned long * hpa1)356 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
357 					 struct kvm_mips_tlb *tlb,
358 					 unsigned long *hpa0,
359 					 unsigned long *hpa1)
360 {
361 	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
362 	struct kvm *kvm = vcpu->kvm;
363 	pfn_t pfn0, pfn1;
364 	gfn_t gfn0, gfn1;
365 	long tlb_lo[2];
366 
367 	tlb_lo[0] = tlb->tlb_lo0;
368 	tlb_lo[1] = tlb->tlb_lo1;
369 
370 	/*
371 	 * The commpage address must not be mapped to anything else if the guest
372 	 * TLB contains entries nearby, or commpage accesses will break.
373 	 */
374 	if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
375 			VPN2_MASK & (PAGE_MASK << 1)))
376 		tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
377 
378 	gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
379 	gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
380 	if (gfn0 >= kvm->arch.guest_pmap_npages ||
381 	    gfn1 >= kvm->arch.guest_pmap_npages) {
382 		kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
383 			__func__, gfn0, gfn1, tlb->tlb_hi);
384 		kvm_mips_dump_guest_tlbs(vcpu);
385 		return -1;
386 	}
387 
388 	if (kvm_mips_map_page(kvm, gfn0) < 0)
389 		return -1;
390 
391 	if (kvm_mips_map_page(kvm, gfn1) < 0)
392 		return -1;
393 
394 	pfn0 = kvm->arch.guest_pmap[gfn0];
395 	pfn1 = kvm->arch.guest_pmap[gfn1];
396 
397 	if (hpa0)
398 		*hpa0 = pfn0 << PAGE_SHIFT;
399 
400 	if (hpa1)
401 		*hpa1 = pfn1 << PAGE_SHIFT;
402 
403 	/* Get attributes from the Guest TLB */
404 	entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
405 					       kvm_mips_get_kernel_asid(vcpu) :
406 					       kvm_mips_get_user_asid(vcpu));
407 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
408 		   (tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V);
409 	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
410 		   (tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V);
411 
412 	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
413 		  tlb->tlb_lo0, tlb->tlb_lo1);
414 
415 	return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
416 				       tlb->tlb_mask);
417 }
418 EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
419 
kvm_mips_guest_tlb_lookup(struct kvm_vcpu * vcpu,unsigned long entryhi)420 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
421 {
422 	int i;
423 	int index = -1;
424 	struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
425 
426 	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
427 		if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
428 		    TLB_HI_ASID_HIT(tlb[i], entryhi)) {
429 			index = i;
430 			break;
431 		}
432 	}
433 
434 	kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
435 		  __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
436 
437 	return index;
438 }
439 EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
440 
kvm_mips_host_tlb_lookup(struct kvm_vcpu * vcpu,unsigned long vaddr)441 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
442 {
443 	unsigned long old_entryhi, flags;
444 	int idx;
445 
446 	local_irq_save(flags);
447 
448 	old_entryhi = read_c0_entryhi();
449 
450 	if (KVM_GUEST_KERNEL_MODE(vcpu))
451 		write_c0_entryhi((vaddr & VPN2_MASK) |
452 				 kvm_mips_get_kernel_asid(vcpu));
453 	else {
454 		write_c0_entryhi((vaddr & VPN2_MASK) |
455 				 kvm_mips_get_user_asid(vcpu));
456 	}
457 
458 	mtc0_tlbw_hazard();
459 
460 	tlb_probe();
461 	tlb_probe_hazard();
462 	idx = read_c0_index();
463 
464 	/* Restore old ASID */
465 	write_c0_entryhi(old_entryhi);
466 	mtc0_tlbw_hazard();
467 	tlbw_use_hazard();
468 
469 	local_irq_restore(flags);
470 
471 	kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
472 
473 	return idx;
474 }
475 EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
476 
kvm_mips_host_tlb_inv(struct kvm_vcpu * vcpu,unsigned long va)477 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
478 {
479 	int idx;
480 	unsigned long flags, old_entryhi;
481 
482 	local_irq_save(flags);
483 
484 	old_entryhi = read_c0_entryhi();
485 
486 	write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
487 	mtc0_tlbw_hazard();
488 
489 	tlb_probe();
490 	tlb_probe_hazard();
491 	idx = read_c0_index();
492 
493 	if (idx >= current_cpu_data.tlbsize)
494 		BUG();
495 
496 	if (idx > 0) {
497 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
498 		mtc0_tlbw_hazard();
499 
500 		write_c0_entrylo0(0);
501 		mtc0_tlbw_hazard();
502 
503 		write_c0_entrylo1(0);
504 		mtc0_tlbw_hazard();
505 
506 		tlb_write_indexed();
507 		mtc0_tlbw_hazard();
508 	}
509 
510 	write_c0_entryhi(old_entryhi);
511 	mtc0_tlbw_hazard();
512 	tlbw_use_hazard();
513 
514 	local_irq_restore(flags);
515 
516 	if (idx > 0)
517 		kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
518 			  (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
519 
520 	return 0;
521 }
522 EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
523 
524 /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */
kvm_mips_host_tlb_inv_index(struct kvm_vcpu * vcpu,int index)525 int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
526 {
527 	unsigned long flags, old_entryhi;
528 
529 	if (index >= current_cpu_data.tlbsize)
530 		BUG();
531 
532 	local_irq_save(flags);
533 
534 	old_entryhi = read_c0_entryhi();
535 
536 	write_c0_entryhi(UNIQUE_ENTRYHI(index));
537 	mtc0_tlbw_hazard();
538 
539 	write_c0_index(index);
540 	mtc0_tlbw_hazard();
541 
542 	write_c0_entrylo0(0);
543 	mtc0_tlbw_hazard();
544 
545 	write_c0_entrylo1(0);
546 	mtc0_tlbw_hazard();
547 
548 	tlb_write_indexed();
549 	mtc0_tlbw_hazard();
550 	tlbw_use_hazard();
551 
552 	write_c0_entryhi(old_entryhi);
553 	mtc0_tlbw_hazard();
554 	tlbw_use_hazard();
555 
556 	local_irq_restore(flags);
557 
558 	return 0;
559 }
560 
kvm_mips_flush_host_tlb(int skip_kseg0)561 void kvm_mips_flush_host_tlb(int skip_kseg0)
562 {
563 	unsigned long flags;
564 	unsigned long old_entryhi, entryhi;
565 	unsigned long old_pagemask;
566 	int entry = 0;
567 	int maxentry = current_cpu_data.tlbsize;
568 
569 	local_irq_save(flags);
570 
571 	old_entryhi = read_c0_entryhi();
572 	old_pagemask = read_c0_pagemask();
573 
574 	/* Blast 'em all away. */
575 	for (entry = 0; entry < maxentry; entry++) {
576 		write_c0_index(entry);
577 		mtc0_tlbw_hazard();
578 
579 		if (skip_kseg0) {
580 			tlb_read();
581 			tlbw_use_hazard();
582 
583 			entryhi = read_c0_entryhi();
584 
585 			/* Don't blow away guest kernel entries */
586 			if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
587 				continue;
588 		}
589 
590 		/* Make sure all entries differ. */
591 		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
592 		mtc0_tlbw_hazard();
593 		write_c0_entrylo0(0);
594 		mtc0_tlbw_hazard();
595 		write_c0_entrylo1(0);
596 		mtc0_tlbw_hazard();
597 
598 		tlb_write_indexed();
599 		mtc0_tlbw_hazard();
600 	}
601 
602 	tlbw_use_hazard();
603 
604 	write_c0_entryhi(old_entryhi);
605 	write_c0_pagemask(old_pagemask);
606 	mtc0_tlbw_hazard();
607 	tlbw_use_hazard();
608 
609 	local_irq_restore(flags);
610 }
611 EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
612 
kvm_get_new_mmu_context(struct mm_struct * mm,unsigned long cpu,struct kvm_vcpu * vcpu)613 void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
614 			     struct kvm_vcpu *vcpu)
615 {
616 	unsigned long asid = asid_cache(cpu);
617 
618 	asid += ASID_INC;
619 	if (!(asid & ASID_MASK)) {
620 		if (cpu_has_vtag_icache)
621 			flush_icache_all();
622 
623 		kvm_local_flush_tlb_all();      /* start new asid cycle */
624 
625 		if (!asid)      /* fix version if needed */
626 			asid = ASID_FIRST_VERSION;
627 	}
628 
629 	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
630 }
631 
kvm_local_flush_tlb_all(void)632 void kvm_local_flush_tlb_all(void)
633 {
634 	unsigned long flags;
635 	unsigned long old_ctx;
636 	int entry = 0;
637 
638 	local_irq_save(flags);
639 	/* Save old context and create impossible VPN2 value */
640 	old_ctx = read_c0_entryhi();
641 	write_c0_entrylo0(0);
642 	write_c0_entrylo1(0);
643 
644 	/* Blast 'em all away. */
645 	while (entry < current_cpu_data.tlbsize) {
646 		/* Make sure all entries differ. */
647 		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
648 		write_c0_index(entry);
649 		mtc0_tlbw_hazard();
650 		tlb_write_indexed();
651 		entry++;
652 	}
653 	tlbw_use_hazard();
654 	write_c0_entryhi(old_ctx);
655 	mtc0_tlbw_hazard();
656 
657 	local_irq_restore(flags);
658 }
659 EXPORT_SYMBOL(kvm_local_flush_tlb_all);
660 
661 /**
662  * kvm_mips_migrate_count() - Migrate timer.
663  * @vcpu:	Virtual CPU.
664  *
665  * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
666  * if it was running prior to being cancelled.
667  *
668  * Must be called when the VCPU is migrated to a different CPU to ensure that
669  * timer expiry during guest execution interrupts the guest and causes the
670  * interrupt to be delivered in a timely manner.
671  */
kvm_mips_migrate_count(struct kvm_vcpu * vcpu)672 static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
673 {
674 	if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
675 		hrtimer_restart(&vcpu->arch.comparecount_timer);
676 }
677 
678 /* Restore ASID once we are scheduled back after preemption */
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)679 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
680 {
681 	unsigned long flags;
682 	int newasid = 0;
683 
684 	kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
685 
686 	/* Allocate new kernel and user ASIDs if needed */
687 
688 	local_irq_save(flags);
689 
690 	if (((vcpu->arch.
691 	      guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
692 		kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
693 		vcpu->arch.guest_kernel_asid[cpu] =
694 		    vcpu->arch.guest_kernel_mm.context.asid[cpu];
695 		kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
696 		vcpu->arch.guest_user_asid[cpu] =
697 		    vcpu->arch.guest_user_mm.context.asid[cpu];
698 		newasid++;
699 
700 		kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
701 			  cpu_context(cpu, current->mm));
702 		kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
703 			  cpu, vcpu->arch.guest_kernel_asid[cpu]);
704 		kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
705 			  vcpu->arch.guest_user_asid[cpu]);
706 	}
707 
708 	if (vcpu->arch.last_sched_cpu != cpu) {
709 		kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
710 			  vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
711 		/*
712 		 * Migrate the timer interrupt to the current CPU so that it
713 		 * always interrupts the guest and synchronously triggers a
714 		 * guest timer interrupt.
715 		 */
716 		kvm_mips_migrate_count(vcpu);
717 	}
718 
719 	if (!newasid) {
720 		/*
721 		 * If we preempted while the guest was executing, then reload
722 		 * the pre-empted ASID
723 		 */
724 		if (current->flags & PF_VCPU) {
725 			write_c0_entryhi(vcpu->arch.
726 					 preempt_entryhi & ASID_MASK);
727 			ehb();
728 		}
729 	} else {
730 		/* New ASIDs were allocated for the VM */
731 
732 		/*
733 		 * Were we in guest context? If so then the pre-empted ASID is
734 		 * no longer valid, we need to set it to what it should be based
735 		 * on the mode of the Guest (Kernel/User)
736 		 */
737 		if (current->flags & PF_VCPU) {
738 			if (KVM_GUEST_KERNEL_MODE(vcpu))
739 				write_c0_entryhi(vcpu->arch.
740 						 guest_kernel_asid[cpu] &
741 						 ASID_MASK);
742 			else
743 				write_c0_entryhi(vcpu->arch.
744 						 guest_user_asid[cpu] &
745 						 ASID_MASK);
746 			ehb();
747 		}
748 	}
749 
750 	/* restore guest state to registers */
751 	kvm_mips_callbacks->vcpu_set_regs(vcpu);
752 
753 	local_irq_restore(flags);
754 
755 }
756 EXPORT_SYMBOL(kvm_arch_vcpu_load);
757 
758 /* ASID can change if another task is scheduled during preemption */
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)759 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
760 {
761 	unsigned long flags;
762 	uint32_t cpu;
763 
764 	local_irq_save(flags);
765 
766 	cpu = smp_processor_id();
767 
768 	vcpu->arch.preempt_entryhi = read_c0_entryhi();
769 	vcpu->arch.last_sched_cpu = cpu;
770 
771 	/* save guest state in registers */
772 	kvm_mips_callbacks->vcpu_get_regs(vcpu);
773 
774 	if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
775 	     ASID_VERSION_MASK)) {
776 		kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
777 			  cpu_context(cpu, current->mm));
778 		drop_mmu_context(current->mm, cpu);
779 	}
780 	write_c0_entryhi(cpu_asid(cpu, current->mm));
781 	ehb();
782 
783 	local_irq_restore(flags);
784 }
785 EXPORT_SYMBOL(kvm_arch_vcpu_put);
786 
kvm_get_inst(uint32_t * opc,struct kvm_vcpu * vcpu)787 uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
788 {
789 	struct mips_coproc *cop0 = vcpu->arch.cop0;
790 	unsigned long paddr, flags, vpn2, asid;
791 	uint32_t inst;
792 	int index;
793 
794 	if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
795 	    KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
796 		local_irq_save(flags);
797 		index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
798 		if (index >= 0) {
799 			inst = *(opc);
800 		} else {
801 			vpn2 = (unsigned long) opc & VPN2_MASK;
802 			asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
803 			index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
804 			if (index < 0) {
805 				kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
806 					__func__, opc, vcpu, read_c0_entryhi());
807 				kvm_mips_dump_host_tlbs();
808 				local_irq_restore(flags);
809 				return KVM_INVALID_INST;
810 			}
811 			if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
812 						&vcpu->arch.guest_tlb[index],
813 						NULL, NULL)) {
814 				kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
815 					__func__, opc, index, vcpu,
816 					read_c0_entryhi());
817 				kvm_mips_dump_guest_tlbs(vcpu);
818 				local_irq_restore(flags);
819 				return KVM_INVALID_INST;
820 			}
821 			inst = *(opc);
822 		}
823 		local_irq_restore(flags);
824 	} else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
825 		paddr =
826 		    kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
827 							  (unsigned long) opc);
828 		inst = *(uint32_t *) CKSEG0ADDR(paddr);
829 	} else {
830 		kvm_err("%s: illegal address: %p\n", __func__, opc);
831 		return KVM_INVALID_INST;
832 	}
833 
834 	return inst;
835 }
836 EXPORT_SYMBOL(kvm_get_inst);
837