• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5  */
6 
7 #include <linux/types.h>
8 #include <linux/string.h>
9 #include <linux/kvm.h>
10 #include <linux/kvm_host.h>
11 #include <linux/anon_inodes.h>
12 #include <linux/file.h>
13 #include <linux/debugfs.h>
14 
15 #include <asm/kvm_ppc.h>
16 #include <asm/kvm_book3s.h>
17 #include <asm/page.h>
18 #include <asm/mmu.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
21 #include <asm/pte-walk.h>
22 
23 /*
24  * Supported radix tree geometry.
25  * Like p9, we support either 5 or 9 bits at the first (lowest) level,
26  * for a page size of 64k or 4k.
27  */
28 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
29 
__kvmhv_copy_tofrom_guest_radix(int lpid,int pid,gva_t eaddr,void * to,void * from,unsigned long n)30 unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
31 					      gva_t eaddr, void *to, void *from,
32 					      unsigned long n)
33 {
34 	int old_pid, old_lpid;
35 	unsigned long quadrant, ret = n;
36 	bool is_load = !!to;
37 
38 	/* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
39 	if (kvmhv_on_pseries())
40 		return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
41 					  (to != NULL) ? __pa(to): 0,
42 					  (from != NULL) ? __pa(from): 0, n);
43 
44 	quadrant = 1;
45 	if (!pid)
46 		quadrant = 2;
47 	if (is_load)
48 		from = (void *) (eaddr | (quadrant << 62));
49 	else
50 		to = (void *) (eaddr | (quadrant << 62));
51 
52 	preempt_disable();
53 
54 	/* switch the lpid first to avoid running host with unallocated pid */
55 	old_lpid = mfspr(SPRN_LPID);
56 	if (old_lpid != lpid)
57 		mtspr(SPRN_LPID, lpid);
58 	if (quadrant == 1) {
59 		old_pid = mfspr(SPRN_PID);
60 		if (old_pid != pid)
61 			mtspr(SPRN_PID, pid);
62 	}
63 	isync();
64 
65 	pagefault_disable();
66 	if (is_load)
67 		ret = raw_copy_from_user(to, from, n);
68 	else
69 		ret = raw_copy_to_user(to, from, n);
70 	pagefault_enable();
71 
72 	/* switch the pid first to avoid running host with unallocated pid */
73 	if (quadrant == 1 && pid != old_pid)
74 		mtspr(SPRN_PID, old_pid);
75 	if (lpid != old_lpid)
76 		mtspr(SPRN_LPID, old_lpid);
77 	isync();
78 
79 	preempt_enable();
80 
81 	return ret;
82 }
83 EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix);
84 
kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu * vcpu,gva_t eaddr,void * to,void * from,unsigned long n)85 static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
86 					  void *to, void *from, unsigned long n)
87 {
88 	int lpid = vcpu->kvm->arch.lpid;
89 	int pid = vcpu->arch.pid;
90 
91 	/* This would cause a data segment intr so don't allow the access */
92 	if (eaddr & (0x3FFUL << 52))
93 		return -EINVAL;
94 
95 	/* Should we be using the nested lpid */
96 	if (vcpu->arch.nested)
97 		lpid = vcpu->arch.nested->shadow_lpid;
98 
99 	/* If accessing quadrant 3 then pid is expected to be 0 */
100 	if (((eaddr >> 62) & 0x3) == 0x3)
101 		pid = 0;
102 
103 	eaddr &= ~(0xFFFUL << 52);
104 
105 	return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n);
106 }
107 
kvmhv_copy_from_guest_radix(struct kvm_vcpu * vcpu,gva_t eaddr,void * to,unsigned long n)108 long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to,
109 				 unsigned long n)
110 {
111 	long ret;
112 
113 	ret = kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, to, NULL, n);
114 	if (ret > 0)
115 		memset(to + (n - ret), 0, ret);
116 
117 	return ret;
118 }
119 EXPORT_SYMBOL_GPL(kvmhv_copy_from_guest_radix);
120 
kvmhv_copy_to_guest_radix(struct kvm_vcpu * vcpu,gva_t eaddr,void * from,unsigned long n)121 long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from,
122 			       unsigned long n)
123 {
124 	return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n);
125 }
126 EXPORT_SYMBOL_GPL(kvmhv_copy_to_guest_radix);
127 
kvmppc_mmu_walk_radix_tree(struct kvm_vcpu * vcpu,gva_t eaddr,struct kvmppc_pte * gpte,u64 root,u64 * pte_ret_p)128 int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
129 			       struct kvmppc_pte *gpte, u64 root,
130 			       u64 *pte_ret_p)
131 {
132 	struct kvm *kvm = vcpu->kvm;
133 	int ret, level, ps;
134 	unsigned long rts, bits, offset, index;
135 	u64 pte, base, gpa;
136 	__be64 rpte;
137 
138 	rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
139 		((root & RTS2_MASK) >> RTS2_SHIFT);
140 	bits = root & RPDS_MASK;
141 	base = root & RPDB_MASK;
142 
143 	offset = rts + 31;
144 
145 	/* Current implementations only support 52-bit space */
146 	if (offset != 52)
147 		return -EINVAL;
148 
149 	/* Walk each level of the radix tree */
150 	for (level = 3; level >= 0; --level) {
151 		u64 addr;
152 		/* Check a valid size */
153 		if (level && bits != p9_supported_radix_bits[level])
154 			return -EINVAL;
155 		if (level == 0 && !(bits == 5 || bits == 9))
156 			return -EINVAL;
157 		offset -= bits;
158 		index = (eaddr >> offset) & ((1UL << bits) - 1);
159 		/* Check that low bits of page table base are zero */
160 		if (base & ((1UL << (bits + 3)) - 1))
161 			return -EINVAL;
162 		/* Read the entry from guest memory */
163 		addr = base + (index * sizeof(rpte));
164 		ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
165 		if (ret) {
166 			if (pte_ret_p)
167 				*pte_ret_p = addr;
168 			return ret;
169 		}
170 		pte = __be64_to_cpu(rpte);
171 		if (!(pte & _PAGE_PRESENT))
172 			return -ENOENT;
173 		/* Check if a leaf entry */
174 		if (pte & _PAGE_PTE)
175 			break;
176 		/* Get ready to walk the next level */
177 		base = pte & RPDB_MASK;
178 		bits = pte & RPDS_MASK;
179 	}
180 
181 	/* Need a leaf at lowest level; 512GB pages not supported */
182 	if (level < 0 || level == 3)
183 		return -EINVAL;
184 
185 	/* We found a valid leaf PTE */
186 	/* Offset is now log base 2 of the page size */
187 	gpa = pte & 0x01fffffffffff000ul;
188 	if (gpa & ((1ul << offset) - 1))
189 		return -EINVAL;
190 	gpa |= eaddr & ((1ul << offset) - 1);
191 	for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
192 		if (offset == mmu_psize_defs[ps].shift)
193 			break;
194 	gpte->page_size = ps;
195 	gpte->page_shift = offset;
196 
197 	gpte->eaddr = eaddr;
198 	gpte->raddr = gpa;
199 
200 	/* Work out permissions */
201 	gpte->may_read = !!(pte & _PAGE_READ);
202 	gpte->may_write = !!(pte & _PAGE_WRITE);
203 	gpte->may_execute = !!(pte & _PAGE_EXEC);
204 
205 	gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY);
206 
207 	if (pte_ret_p)
208 		*pte_ret_p = pte;
209 
210 	return 0;
211 }
212 
213 /*
214  * Used to walk a partition or process table radix tree in guest memory
215  * Note: We exploit the fact that a partition table and a process
216  * table have the same layout, a partition-scoped page table and a
217  * process-scoped page table have the same layout, and the 2nd
218  * doubleword of a partition table entry has the same layout as
219  * the PTCR register.
220  */
kvmppc_mmu_radix_translate_table(struct kvm_vcpu * vcpu,gva_t eaddr,struct kvmppc_pte * gpte,u64 table,int table_index,u64 * pte_ret_p)221 int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
222 				     struct kvmppc_pte *gpte, u64 table,
223 				     int table_index, u64 *pte_ret_p)
224 {
225 	struct kvm *kvm = vcpu->kvm;
226 	int ret;
227 	unsigned long size, ptbl, root;
228 	struct prtb_entry entry;
229 
230 	if ((table & PRTS_MASK) > 24)
231 		return -EINVAL;
232 	size = 1ul << ((table & PRTS_MASK) + 12);
233 
234 	/* Is the table big enough to contain this entry? */
235 	if ((table_index * sizeof(entry)) >= size)
236 		return -EINVAL;
237 
238 	/* Read the table to find the root of the radix tree */
239 	ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
240 	ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
241 	if (ret)
242 		return ret;
243 
244 	/* Root is stored in the first double word */
245 	root = be64_to_cpu(entry.prtb0);
246 
247 	return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p);
248 }
249 
kvmppc_mmu_radix_xlate(struct kvm_vcpu * vcpu,gva_t eaddr,struct kvmppc_pte * gpte,bool data,bool iswrite)250 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
251 			   struct kvmppc_pte *gpte, bool data, bool iswrite)
252 {
253 	u32 pid;
254 	u64 pte;
255 	int ret;
256 
257 	/* Work out effective PID */
258 	switch (eaddr >> 62) {
259 	case 0:
260 		pid = vcpu->arch.pid;
261 		break;
262 	case 3:
263 		pid = 0;
264 		break;
265 	default:
266 		return -EINVAL;
267 	}
268 
269 	ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte,
270 				vcpu->kvm->arch.process_table, pid, &pte);
271 	if (ret)
272 		return ret;
273 
274 	/* Check privilege (applies only to process scoped translations) */
275 	if (kvmppc_get_msr(vcpu) & MSR_PR) {
276 		if (pte & _PAGE_PRIVILEGED) {
277 			gpte->may_read = 0;
278 			gpte->may_write = 0;
279 			gpte->may_execute = 0;
280 		}
281 	} else {
282 		if (!(pte & _PAGE_PRIVILEGED)) {
283 			/* Check AMR/IAMR to see if strict mode is in force */
284 			if (vcpu->arch.amr & (1ul << 62))
285 				gpte->may_read = 0;
286 			if (vcpu->arch.amr & (1ul << 63))
287 				gpte->may_write = 0;
288 			if (vcpu->arch.iamr & (1ul << 62))
289 				gpte->may_execute = 0;
290 		}
291 	}
292 
293 	return 0;
294 }
295 
kvmppc_radix_tlbie_page(struct kvm * kvm,unsigned long addr,unsigned int pshift,unsigned int lpid)296 void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
297 			     unsigned int pshift, unsigned int lpid)
298 {
299 	unsigned long psize = PAGE_SIZE;
300 	int psi;
301 	long rc;
302 	unsigned long rb;
303 
304 	if (pshift)
305 		psize = 1UL << pshift;
306 	else
307 		pshift = PAGE_SHIFT;
308 
309 	addr &= ~(psize - 1);
310 
311 	if (!kvmhv_on_pseries()) {
312 		radix__flush_tlb_lpid_page(lpid, addr, psize);
313 		return;
314 	}
315 
316 	psi = shift_to_mmu_psize(pshift);
317 	rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
318 	rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
319 				lpid, rb);
320 	if (rc)
321 		pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
322 }
323 
kvmppc_radix_flush_pwc(struct kvm * kvm,unsigned int lpid)324 static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
325 {
326 	long rc;
327 
328 	if (!kvmhv_on_pseries()) {
329 		radix__flush_pwc_lpid(lpid);
330 		return;
331 	}
332 
333 	rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
334 				lpid, TLBIEL_INVAL_SET_LPID);
335 	if (rc)
336 		pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
337 }
338 
kvmppc_radix_update_pte(struct kvm * kvm,pte_t * ptep,unsigned long clr,unsigned long set,unsigned long addr,unsigned int shift)339 static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
340 				      unsigned long clr, unsigned long set,
341 				      unsigned long addr, unsigned int shift)
342 {
343 	return __radix_pte_update(ptep, clr, set);
344 }
345 
kvmppc_radix_set_pte_at(struct kvm * kvm,unsigned long addr,pte_t * ptep,pte_t pte)346 void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
347 			     pte_t *ptep, pte_t pte)
348 {
349 	radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
350 }
351 
352 static struct kmem_cache *kvm_pte_cache;
353 static struct kmem_cache *kvm_pmd_cache;
354 
kvmppc_pte_alloc(void)355 static pte_t *kvmppc_pte_alloc(void)
356 {
357 	pte_t *pte;
358 
359 	pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
360 	/* pmd_populate() will only reference _pa(pte). */
361 	kmemleak_ignore(pte);
362 
363 	return pte;
364 }
365 
kvmppc_pte_free(pte_t * ptep)366 static void kvmppc_pte_free(pte_t *ptep)
367 {
368 	kmem_cache_free(kvm_pte_cache, ptep);
369 }
370 
kvmppc_pmd_alloc(void)371 static pmd_t *kvmppc_pmd_alloc(void)
372 {
373 	pmd_t *pmd;
374 
375 	pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
376 	/* pud_populate() will only reference _pa(pmd). */
377 	kmemleak_ignore(pmd);
378 
379 	return pmd;
380 }
381 
kvmppc_pmd_free(pmd_t * pmdp)382 static void kvmppc_pmd_free(pmd_t *pmdp)
383 {
384 	kmem_cache_free(kvm_pmd_cache, pmdp);
385 }
386 
387 /* Called with kvm->mmu_lock held */
kvmppc_unmap_pte(struct kvm * kvm,pte_t * pte,unsigned long gpa,unsigned int shift,const struct kvm_memory_slot * memslot,unsigned int lpid)388 void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
389 		      unsigned int shift,
390 		      const struct kvm_memory_slot *memslot,
391 		      unsigned int lpid)
392 
393 {
394 	unsigned long old;
395 	unsigned long gfn = gpa >> PAGE_SHIFT;
396 	unsigned long page_size = PAGE_SIZE;
397 	unsigned long hpa;
398 
399 	old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
400 	kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
401 
402 	/* The following only applies to L1 entries */
403 	if (lpid != kvm->arch.lpid)
404 		return;
405 
406 	if (!memslot) {
407 		memslot = gfn_to_memslot(kvm, gfn);
408 		if (!memslot)
409 			return;
410 	}
411 	if (shift) { /* 1GB or 2MB page */
412 		page_size = 1ul << shift;
413 		if (shift == PMD_SHIFT)
414 			kvm->stat.num_2M_pages--;
415 		else if (shift == PUD_SHIFT)
416 			kvm->stat.num_1G_pages--;
417 	}
418 
419 	gpa &= ~(page_size - 1);
420 	hpa = old & PTE_RPN_MASK;
421 	kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
422 
423 	if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap)
424 		kvmppc_update_dirty_map(memslot, gfn, page_size);
425 }
426 
427 /*
428  * kvmppc_free_p?d are used to free existing page tables, and recursively
429  * descend and clear and free children.
430  * Callers are responsible for flushing the PWC.
431  *
432  * When page tables are being unmapped/freed as part of page fault path
433  * (full == false), ptes are not expected. There is code to unmap them
434  * and emit a warning if encountered, but there may already be data
435  * corruption due to the unexpected mappings.
436  */
kvmppc_unmap_free_pte(struct kvm * kvm,pte_t * pte,bool full,unsigned int lpid)437 static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
438 				  unsigned int lpid)
439 {
440 	if (full) {
441 		memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
442 	} else {
443 		pte_t *p = pte;
444 		unsigned long it;
445 
446 		for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
447 			if (pte_val(*p) == 0)
448 				continue;
449 			WARN_ON_ONCE(1);
450 			kvmppc_unmap_pte(kvm, p,
451 					 pte_pfn(*p) << PAGE_SHIFT,
452 					 PAGE_SHIFT, NULL, lpid);
453 		}
454 	}
455 
456 	kvmppc_pte_free(pte);
457 }
458 
kvmppc_unmap_free_pmd(struct kvm * kvm,pmd_t * pmd,bool full,unsigned int lpid)459 static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
460 				  unsigned int lpid)
461 {
462 	unsigned long im;
463 	pmd_t *p = pmd;
464 
465 	for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
466 		if (!pmd_present(*p))
467 			continue;
468 		if (pmd_is_leaf(*p)) {
469 			if (full) {
470 				pmd_clear(p);
471 			} else {
472 				WARN_ON_ONCE(1);
473 				kvmppc_unmap_pte(kvm, (pte_t *)p,
474 					 pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
475 					 PMD_SHIFT, NULL, lpid);
476 			}
477 		} else {
478 			pte_t *pte;
479 
480 			pte = pte_offset_map(p, 0);
481 			kvmppc_unmap_free_pte(kvm, pte, full, lpid);
482 			pmd_clear(p);
483 		}
484 	}
485 	kvmppc_pmd_free(pmd);
486 }
487 
kvmppc_unmap_free_pud(struct kvm * kvm,pud_t * pud,unsigned int lpid)488 static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
489 				  unsigned int lpid)
490 {
491 	unsigned long iu;
492 	pud_t *p = pud;
493 
494 	for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
495 		if (!pud_present(*p))
496 			continue;
497 		if (pud_is_leaf(*p)) {
498 			pud_clear(p);
499 		} else {
500 			pmd_t *pmd;
501 
502 			pmd = pmd_offset(p, 0);
503 			kvmppc_unmap_free_pmd(kvm, pmd, true, lpid);
504 			pud_clear(p);
505 		}
506 	}
507 	pud_free(kvm->mm, pud);
508 }
509 
kvmppc_free_pgtable_radix(struct kvm * kvm,pgd_t * pgd,unsigned int lpid)510 void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
511 {
512 	unsigned long ig;
513 
514 	for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
515 		pud_t *pud;
516 
517 		if (!pgd_present(*pgd))
518 			continue;
519 		pud = pud_offset(pgd, 0);
520 		kvmppc_unmap_free_pud(kvm, pud, lpid);
521 		pgd_clear(pgd);
522 	}
523 }
524 
kvmppc_free_radix(struct kvm * kvm)525 void kvmppc_free_radix(struct kvm *kvm)
526 {
527 	if (kvm->arch.pgtable) {
528 		kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable,
529 					  kvm->arch.lpid);
530 		pgd_free(kvm->mm, kvm->arch.pgtable);
531 		kvm->arch.pgtable = NULL;
532 	}
533 }
534 
kvmppc_unmap_free_pmd_entry_table(struct kvm * kvm,pmd_t * pmd,unsigned long gpa,unsigned int lpid)535 static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
536 					unsigned long gpa, unsigned int lpid)
537 {
538 	pte_t *pte = pte_offset_kernel(pmd, 0);
539 
540 	/*
541 	 * Clearing the pmd entry then flushing the PWC ensures that the pte
542 	 * page no longer be cached by the MMU, so can be freed without
543 	 * flushing the PWC again.
544 	 */
545 	pmd_clear(pmd);
546 	kvmppc_radix_flush_pwc(kvm, lpid);
547 
548 	kvmppc_unmap_free_pte(kvm, pte, false, lpid);
549 }
550 
kvmppc_unmap_free_pud_entry_table(struct kvm * kvm,pud_t * pud,unsigned long gpa,unsigned int lpid)551 static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
552 					unsigned long gpa, unsigned int lpid)
553 {
554 	pmd_t *pmd = pmd_offset(pud, 0);
555 
556 	/*
557 	 * Clearing the pud entry then flushing the PWC ensures that the pmd
558 	 * page and any children pte pages will no longer be cached by the MMU,
559 	 * so can be freed without flushing the PWC again.
560 	 */
561 	pud_clear(pud);
562 	kvmppc_radix_flush_pwc(kvm, lpid);
563 
564 	kvmppc_unmap_free_pmd(kvm, pmd, false, lpid);
565 }
566 
567 /*
568  * There are a number of bits which may differ between different faults to
569  * the same partition scope entry. RC bits, in the course of cleaning and
570  * aging. And the write bit can change, either the access could have been
571  * upgraded, or a read fault could happen concurrently with a write fault
572  * that sets those bits first.
573  */
574 #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
575 
kvmppc_create_pte(struct kvm * kvm,pgd_t * pgtable,pte_t pte,unsigned long gpa,unsigned int level,unsigned long mmu_seq,unsigned int lpid,unsigned long * rmapp,struct rmap_nested ** n_rmap)576 int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
577 		      unsigned long gpa, unsigned int level,
578 		      unsigned long mmu_seq, unsigned int lpid,
579 		      unsigned long *rmapp, struct rmap_nested **n_rmap)
580 {
581 	pgd_t *pgd;
582 	pud_t *pud, *new_pud = NULL;
583 	pmd_t *pmd, *new_pmd = NULL;
584 	pte_t *ptep, *new_ptep = NULL;
585 	int ret;
586 
587 	/* Traverse the guest's 2nd-level tree, allocate new levels needed */
588 	pgd = pgtable + pgd_index(gpa);
589 	pud = NULL;
590 	if (pgd_present(*pgd))
591 		pud = pud_offset(pgd, gpa);
592 	else
593 		new_pud = pud_alloc_one(kvm->mm, gpa);
594 
595 	pmd = NULL;
596 	if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
597 		pmd = pmd_offset(pud, gpa);
598 	else if (level <= 1)
599 		new_pmd = kvmppc_pmd_alloc();
600 
601 	if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
602 		new_ptep = kvmppc_pte_alloc();
603 
604 	/* Check if we might have been invalidated; let the guest retry if so */
605 	spin_lock(&kvm->mmu_lock);
606 	ret = -EAGAIN;
607 	if (mmu_notifier_retry(kvm, mmu_seq))
608 		goto out_unlock;
609 
610 	/* Now traverse again under the lock and change the tree */
611 	ret = -ENOMEM;
612 	if (pgd_none(*pgd)) {
613 		if (!new_pud)
614 			goto out_unlock;
615 		pgd_populate(kvm->mm, pgd, new_pud);
616 		new_pud = NULL;
617 	}
618 	pud = pud_offset(pgd, gpa);
619 	if (pud_is_leaf(*pud)) {
620 		unsigned long hgpa = gpa & PUD_MASK;
621 
622 		/* Check if we raced and someone else has set the same thing */
623 		if (level == 2) {
624 			if (pud_raw(*pud) == pte_raw(pte)) {
625 				ret = 0;
626 				goto out_unlock;
627 			}
628 			/* Valid 1GB page here already, add our extra bits */
629 			WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) &
630 							PTE_BITS_MUST_MATCH);
631 			kvmppc_radix_update_pte(kvm, (pte_t *)pud,
632 					      0, pte_val(pte), hgpa, PUD_SHIFT);
633 			ret = 0;
634 			goto out_unlock;
635 		}
636 		/*
637 		 * If we raced with another CPU which has just put
638 		 * a 1GB pte in after we saw a pmd page, try again.
639 		 */
640 		if (!new_pmd) {
641 			ret = -EAGAIN;
642 			goto out_unlock;
643 		}
644 		/* Valid 1GB page here already, remove it */
645 		kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL,
646 				 lpid);
647 	}
648 	if (level == 2) {
649 		if (!pud_none(*pud)) {
650 			/*
651 			 * There's a page table page here, but we wanted to
652 			 * install a large page, so remove and free the page
653 			 * table page.
654 			 */
655 			kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid);
656 		}
657 		kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
658 		if (rmapp && n_rmap)
659 			kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
660 		ret = 0;
661 		goto out_unlock;
662 	}
663 	if (pud_none(*pud)) {
664 		if (!new_pmd)
665 			goto out_unlock;
666 		pud_populate(kvm->mm, pud, new_pmd);
667 		new_pmd = NULL;
668 	}
669 	pmd = pmd_offset(pud, gpa);
670 	if (pmd_is_leaf(*pmd)) {
671 		unsigned long lgpa = gpa & PMD_MASK;
672 
673 		/* Check if we raced and someone else has set the same thing */
674 		if (level == 1) {
675 			if (pmd_raw(*pmd) == pte_raw(pte)) {
676 				ret = 0;
677 				goto out_unlock;
678 			}
679 			/* Valid 2MB page here already, add our extra bits */
680 			WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) &
681 							PTE_BITS_MUST_MATCH);
682 			kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
683 					0, pte_val(pte), lgpa, PMD_SHIFT);
684 			ret = 0;
685 			goto out_unlock;
686 		}
687 
688 		/*
689 		 * If we raced with another CPU which has just put
690 		 * a 2MB pte in after we saw a pte page, try again.
691 		 */
692 		if (!new_ptep) {
693 			ret = -EAGAIN;
694 			goto out_unlock;
695 		}
696 		/* Valid 2MB page here already, remove it */
697 		kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL,
698 				 lpid);
699 	}
700 	if (level == 1) {
701 		if (!pmd_none(*pmd)) {
702 			/*
703 			 * There's a page table page here, but we wanted to
704 			 * install a large page, so remove and free the page
705 			 * table page.
706 			 */
707 			kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid);
708 		}
709 		kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
710 		if (rmapp && n_rmap)
711 			kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
712 		ret = 0;
713 		goto out_unlock;
714 	}
715 	if (pmd_none(*pmd)) {
716 		if (!new_ptep)
717 			goto out_unlock;
718 		pmd_populate(kvm->mm, pmd, new_ptep);
719 		new_ptep = NULL;
720 	}
721 	ptep = pte_offset_kernel(pmd, gpa);
722 	if (pte_present(*ptep)) {
723 		/* Check if someone else set the same thing */
724 		if (pte_raw(*ptep) == pte_raw(pte)) {
725 			ret = 0;
726 			goto out_unlock;
727 		}
728 		/* Valid page here already, add our extra bits */
729 		WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) &
730 							PTE_BITS_MUST_MATCH);
731 		kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0);
732 		ret = 0;
733 		goto out_unlock;
734 	}
735 	kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
736 	if (rmapp && n_rmap)
737 		kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
738 	ret = 0;
739 
740  out_unlock:
741 	spin_unlock(&kvm->mmu_lock);
742 	if (new_pud)
743 		pud_free(kvm->mm, new_pud);
744 	if (new_pmd)
745 		kvmppc_pmd_free(new_pmd);
746 	if (new_ptep)
747 		kvmppc_pte_free(new_ptep);
748 	return ret;
749 }
750 
kvmppc_hv_handle_set_rc(struct kvm * kvm,pgd_t * pgtable,bool writing,unsigned long gpa,unsigned int lpid)751 bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing,
752 			     unsigned long gpa, unsigned int lpid)
753 {
754 	unsigned long pgflags;
755 	unsigned int shift;
756 	pte_t *ptep;
757 
758 	/*
759 	 * Need to set an R or C bit in the 2nd-level tables;
760 	 * since we are just helping out the hardware here,
761 	 * it is sufficient to do what the hardware does.
762 	 */
763 	pgflags = _PAGE_ACCESSED;
764 	if (writing)
765 		pgflags |= _PAGE_DIRTY;
766 	/*
767 	 * We are walking the secondary (partition-scoped) page table here.
768 	 * We can do this without disabling irq because the Linux MM
769 	 * subsystem doesn't do THP splits and collapses on this tree.
770 	 */
771 	ptep = __find_linux_pte(pgtable, gpa, NULL, &shift);
772 	if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) {
773 		kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift);
774 		return true;
775 	}
776 	return false;
777 }
778 
kvmppc_book3s_instantiate_page(struct kvm_vcpu * vcpu,unsigned long gpa,struct kvm_memory_slot * memslot,bool writing,bool kvm_ro,pte_t * inserted_pte,unsigned int * levelp)779 int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
780 				   unsigned long gpa,
781 				   struct kvm_memory_slot *memslot,
782 				   bool writing, bool kvm_ro,
783 				   pte_t *inserted_pte, unsigned int *levelp)
784 {
785 	struct kvm *kvm = vcpu->kvm;
786 	struct page *page = NULL;
787 	unsigned long mmu_seq;
788 	unsigned long hva, gfn = gpa >> PAGE_SHIFT;
789 	bool upgrade_write = false;
790 	bool *upgrade_p = &upgrade_write;
791 	pte_t pte, *ptep;
792 	unsigned int shift, level;
793 	int ret;
794 	bool large_enable;
795 
796 	/* used to check for invalidations in progress */
797 	mmu_seq = kvm->mmu_notifier_seq;
798 	smp_rmb();
799 
800 	/*
801 	 * Do a fast check first, since __gfn_to_pfn_memslot doesn't
802 	 * do it with !atomic && !async, which is how we call it.
803 	 * We always ask for write permission since the common case
804 	 * is that the page is writable.
805 	 */
806 	hva = gfn_to_hva_memslot(memslot, gfn);
807 	if (!kvm_ro && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
808 		upgrade_write = true;
809 	} else {
810 		unsigned long pfn;
811 
812 		/* Call KVM generic code to do the slow-path check */
813 		pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
814 					   writing, upgrade_p);
815 		if (is_error_noslot_pfn(pfn))
816 			return -EFAULT;
817 		page = NULL;
818 		if (pfn_valid(pfn)) {
819 			page = pfn_to_page(pfn);
820 			if (PageReserved(page))
821 				page = NULL;
822 		}
823 	}
824 
825 	/*
826 	 * Read the PTE from the process' radix tree and use that
827 	 * so we get the shift and attribute bits.
828 	 */
829 	local_irq_disable();
830 	ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
831 	/*
832 	 * If the PTE disappeared temporarily due to a THP
833 	 * collapse, just return and let the guest try again.
834 	 */
835 	if (!ptep) {
836 		local_irq_enable();
837 		if (page)
838 			put_page(page);
839 		return RESUME_GUEST;
840 	}
841 	pte = *ptep;
842 	local_irq_enable();
843 
844 	/* If we're logging dirty pages, always map single pages */
845 	large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES);
846 
847 	/* Get pte level from shift/size */
848 	if (large_enable && shift == PUD_SHIFT &&
849 	    (gpa & (PUD_SIZE - PAGE_SIZE)) ==
850 	    (hva & (PUD_SIZE - PAGE_SIZE))) {
851 		level = 2;
852 	} else if (large_enable && shift == PMD_SHIFT &&
853 		   (gpa & (PMD_SIZE - PAGE_SIZE)) ==
854 		   (hva & (PMD_SIZE - PAGE_SIZE))) {
855 		level = 1;
856 	} else {
857 		level = 0;
858 		if (shift > PAGE_SHIFT) {
859 			/*
860 			 * If the pte maps more than one page, bring over
861 			 * bits from the virtual address to get the real
862 			 * address of the specific single page we want.
863 			 */
864 			unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
865 			pte = __pte(pte_val(pte) | (hva & rpnmask));
866 		}
867 	}
868 
869 	pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
870 	if (writing || upgrade_write) {
871 		if (pte_val(pte) & _PAGE_WRITE)
872 			pte = __pte(pte_val(pte) | _PAGE_DIRTY);
873 	} else {
874 		pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
875 	}
876 
877 	/* Allocate space in the tree and write the PTE */
878 	ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level,
879 				mmu_seq, kvm->arch.lpid, NULL, NULL);
880 	if (inserted_pte)
881 		*inserted_pte = pte;
882 	if (levelp)
883 		*levelp = level;
884 
885 	if (page) {
886 		if (!ret && (pte_val(pte) & _PAGE_WRITE))
887 			set_page_dirty_lock(page);
888 		put_page(page);
889 	}
890 
891 	/* Increment number of large pages if we (successfully) inserted one */
892 	if (!ret) {
893 		if (level == 1)
894 			kvm->stat.num_2M_pages++;
895 		else if (level == 2)
896 			kvm->stat.num_1G_pages++;
897 	}
898 
899 	return ret;
900 }
901 
kvmppc_book3s_radix_page_fault(struct kvm_run * run,struct kvm_vcpu * vcpu,unsigned long ea,unsigned long dsisr)902 int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
903 				   unsigned long ea, unsigned long dsisr)
904 {
905 	struct kvm *kvm = vcpu->kvm;
906 	unsigned long gpa, gfn;
907 	struct kvm_memory_slot *memslot;
908 	long ret;
909 	bool writing = !!(dsisr & DSISR_ISSTORE);
910 	bool kvm_ro = false;
911 
912 	/* Check for unusual errors */
913 	if (dsisr & DSISR_UNSUPP_MMU) {
914 		pr_err("KVM: Got unsupported MMU fault\n");
915 		return -EFAULT;
916 	}
917 	if (dsisr & DSISR_BADACCESS) {
918 		/* Reflect to the guest as DSI */
919 		pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
920 		kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
921 		return RESUME_GUEST;
922 	}
923 
924 	/* Translate the logical address */
925 	gpa = vcpu->arch.fault_gpa & ~0xfffUL;
926 	gpa &= ~0xF000000000000000ul;
927 	gfn = gpa >> PAGE_SHIFT;
928 	if (!(dsisr & DSISR_PRTABLE_FAULT))
929 		gpa |= ea & 0xfff;
930 
931 	/* Get the corresponding memslot */
932 	memslot = gfn_to_memslot(kvm, gfn);
933 
934 	/* No memslot means it's an emulated MMIO region */
935 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
936 		if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
937 			     DSISR_SET_RC)) {
938 			/*
939 			 * Bad address in guest page table tree, or other
940 			 * unusual error - reflect it to the guest as DSI.
941 			 */
942 			kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
943 			return RESUME_GUEST;
944 		}
945 		return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
946 	}
947 
948 	if (memslot->flags & KVM_MEM_READONLY) {
949 		if (writing) {
950 			/* give the guest a DSI */
951 			kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE |
952 						       DSISR_PROTFAULT);
953 			return RESUME_GUEST;
954 		}
955 		kvm_ro = true;
956 	}
957 
958 	/* Failed to set the reference/change bits */
959 	if (dsisr & DSISR_SET_RC) {
960 		spin_lock(&kvm->mmu_lock);
961 		if (kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable,
962 					    writing, gpa, kvm->arch.lpid))
963 			dsisr &= ~DSISR_SET_RC;
964 		spin_unlock(&kvm->mmu_lock);
965 
966 		if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
967 			       DSISR_PROTFAULT | DSISR_SET_RC)))
968 			return RESUME_GUEST;
969 	}
970 
971 	/* Try to insert a pte */
972 	ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing,
973 					     kvm_ro, NULL, NULL);
974 
975 	if (ret == 0 || ret == -EAGAIN)
976 		ret = RESUME_GUEST;
977 	return ret;
978 }
979 
980 /* Called with kvm->mmu_lock held */
kvm_unmap_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn)981 int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
982 		    unsigned long gfn)
983 {
984 	pte_t *ptep;
985 	unsigned long gpa = gfn << PAGE_SHIFT;
986 	unsigned int shift;
987 
988 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
989 	if (ptep && pte_present(*ptep))
990 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
991 				 kvm->arch.lpid);
992 	return 0;
993 }
994 
995 /* Called with kvm->mmu_lock held */
kvm_age_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn)996 int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
997 		  unsigned long gfn)
998 {
999 	pte_t *ptep;
1000 	unsigned long gpa = gfn << PAGE_SHIFT;
1001 	unsigned int shift;
1002 	int ref = 0;
1003 	unsigned long old, *rmapp;
1004 
1005 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1006 	if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
1007 		old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
1008 					      gpa, shift);
1009 		/* XXX need to flush tlb here? */
1010 		/* Also clear bit in ptes in shadow pgtable for nested guests */
1011 		rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1012 		kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0,
1013 					       old & PTE_RPN_MASK,
1014 					       1UL << shift);
1015 		ref = 1;
1016 	}
1017 	return ref;
1018 }
1019 
1020 /* Called with kvm->mmu_lock held */
kvm_test_age_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn)1021 int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
1022 		       unsigned long gfn)
1023 {
1024 	pte_t *ptep;
1025 	unsigned long gpa = gfn << PAGE_SHIFT;
1026 	unsigned int shift;
1027 	int ref = 0;
1028 
1029 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1030 	if (ptep && pte_present(*ptep) && pte_young(*ptep))
1031 		ref = 1;
1032 	return ref;
1033 }
1034 
1035 /* Returns the number of PAGE_SIZE pages that are dirty */
kvm_radix_test_clear_dirty(struct kvm * kvm,struct kvm_memory_slot * memslot,int pagenum)1036 static int kvm_radix_test_clear_dirty(struct kvm *kvm,
1037 				struct kvm_memory_slot *memslot, int pagenum)
1038 {
1039 	unsigned long gfn = memslot->base_gfn + pagenum;
1040 	unsigned long gpa = gfn << PAGE_SHIFT;
1041 	pte_t *ptep;
1042 	unsigned int shift;
1043 	int ret = 0;
1044 	unsigned long old, *rmapp;
1045 
1046 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1047 	if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
1048 		ret = 1;
1049 		if (shift)
1050 			ret = 1 << (shift - PAGE_SHIFT);
1051 		spin_lock(&kvm->mmu_lock);
1052 		old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
1053 					      gpa, shift);
1054 		kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
1055 		/* Also clear bit in ptes in shadow pgtable for nested guests */
1056 		rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1057 		kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0,
1058 					       old & PTE_RPN_MASK,
1059 					       1UL << shift);
1060 		spin_unlock(&kvm->mmu_lock);
1061 	}
1062 	return ret;
1063 }
1064 
kvmppc_hv_get_dirty_log_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long * map)1065 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
1066 			struct kvm_memory_slot *memslot, unsigned long *map)
1067 {
1068 	unsigned long i, j;
1069 	int npages;
1070 
1071 	for (i = 0; i < memslot->npages; i = j) {
1072 		npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
1073 
1074 		/*
1075 		 * Note that if npages > 0 then i must be a multiple of npages,
1076 		 * since huge pages are only used to back the guest at guest
1077 		 * real addresses that are a multiple of their size.
1078 		 * Since we have at most one PTE covering any given guest
1079 		 * real address, if npages > 1 we can skip to i + npages.
1080 		 */
1081 		j = i + 1;
1082 		if (npages) {
1083 			set_dirty_bits(map, i, npages);
1084 			j = i + npages;
1085 		}
1086 	}
1087 	return 0;
1088 }
1089 
kvmppc_radix_flush_memslot(struct kvm * kvm,const struct kvm_memory_slot * memslot)1090 void kvmppc_radix_flush_memslot(struct kvm *kvm,
1091 				const struct kvm_memory_slot *memslot)
1092 {
1093 	unsigned long n;
1094 	pte_t *ptep;
1095 	unsigned long gpa;
1096 	unsigned int shift;
1097 
1098 	gpa = memslot->base_gfn << PAGE_SHIFT;
1099 	spin_lock(&kvm->mmu_lock);
1100 	for (n = memslot->npages; n; --n) {
1101 		ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1102 		if (ptep && pte_present(*ptep))
1103 			kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
1104 					 kvm->arch.lpid);
1105 		gpa += PAGE_SIZE;
1106 	}
1107 	/*
1108 	 * Increase the mmu notifier sequence number to prevent any page
1109 	 * fault that read the memslot earlier from writing a PTE.
1110 	 */
1111 	kvm->mmu_notifier_seq++;
1112 	spin_unlock(&kvm->mmu_lock);
1113 }
1114 
add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info * info,int psize,int * indexp)1115 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
1116 				 int psize, int *indexp)
1117 {
1118 	if (!mmu_psize_defs[psize].shift)
1119 		return;
1120 	info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
1121 		(mmu_psize_defs[psize].ap << 29);
1122 	++(*indexp);
1123 }
1124 
kvmhv_get_rmmu_info(struct kvm * kvm,struct kvm_ppc_rmmu_info * info)1125 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
1126 {
1127 	int i;
1128 
1129 	if (!radix_enabled())
1130 		return -EINVAL;
1131 	memset(info, 0, sizeof(*info));
1132 
1133 	/* 4k page size */
1134 	info->geometries[0].page_shift = 12;
1135 	info->geometries[0].level_bits[0] = 9;
1136 	for (i = 1; i < 4; ++i)
1137 		info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
1138 	/* 64k page size */
1139 	info->geometries[1].page_shift = 16;
1140 	for (i = 0; i < 4; ++i)
1141 		info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
1142 
1143 	i = 0;
1144 	add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
1145 	add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
1146 	add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
1147 	add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
1148 
1149 	return 0;
1150 }
1151 
kvmppc_init_vm_radix(struct kvm * kvm)1152 int kvmppc_init_vm_radix(struct kvm *kvm)
1153 {
1154 	kvm->arch.pgtable = pgd_alloc(kvm->mm);
1155 	if (!kvm->arch.pgtable)
1156 		return -ENOMEM;
1157 	return 0;
1158 }
1159 
pte_ctor(void * addr)1160 static void pte_ctor(void *addr)
1161 {
1162 	memset(addr, 0, RADIX_PTE_TABLE_SIZE);
1163 }
1164 
pmd_ctor(void * addr)1165 static void pmd_ctor(void *addr)
1166 {
1167 	memset(addr, 0, RADIX_PMD_TABLE_SIZE);
1168 }
1169 
1170 struct debugfs_radix_state {
1171 	struct kvm	*kvm;
1172 	struct mutex	mutex;
1173 	unsigned long	gpa;
1174 	int		lpid;
1175 	int		chars_left;
1176 	int		buf_index;
1177 	char		buf[128];
1178 	u8		hdr;
1179 };
1180 
debugfs_radix_open(struct inode * inode,struct file * file)1181 static int debugfs_radix_open(struct inode *inode, struct file *file)
1182 {
1183 	struct kvm *kvm = inode->i_private;
1184 	struct debugfs_radix_state *p;
1185 
1186 	p = kzalloc(sizeof(*p), GFP_KERNEL);
1187 	if (!p)
1188 		return -ENOMEM;
1189 
1190 	kvm_get_kvm(kvm);
1191 	p->kvm = kvm;
1192 	mutex_init(&p->mutex);
1193 	file->private_data = p;
1194 
1195 	return nonseekable_open(inode, file);
1196 }
1197 
debugfs_radix_release(struct inode * inode,struct file * file)1198 static int debugfs_radix_release(struct inode *inode, struct file *file)
1199 {
1200 	struct debugfs_radix_state *p = file->private_data;
1201 
1202 	kvm_put_kvm(p->kvm);
1203 	kfree(p);
1204 	return 0;
1205 }
1206 
debugfs_radix_read(struct file * file,char __user * buf,size_t len,loff_t * ppos)1207 static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
1208 				 size_t len, loff_t *ppos)
1209 {
1210 	struct debugfs_radix_state *p = file->private_data;
1211 	ssize_t ret, r;
1212 	unsigned long n;
1213 	struct kvm *kvm;
1214 	unsigned long gpa;
1215 	pgd_t *pgt;
1216 	struct kvm_nested_guest *nested;
1217 	pgd_t pgd, *pgdp;
1218 	pud_t pud, *pudp;
1219 	pmd_t pmd, *pmdp;
1220 	pte_t *ptep;
1221 	int shift;
1222 	unsigned long pte;
1223 
1224 	kvm = p->kvm;
1225 	if (!kvm_is_radix(kvm))
1226 		return 0;
1227 
1228 	ret = mutex_lock_interruptible(&p->mutex);
1229 	if (ret)
1230 		return ret;
1231 
1232 	if (p->chars_left) {
1233 		n = p->chars_left;
1234 		if (n > len)
1235 			n = len;
1236 		r = copy_to_user(buf, p->buf + p->buf_index, n);
1237 		n -= r;
1238 		p->chars_left -= n;
1239 		p->buf_index += n;
1240 		buf += n;
1241 		len -= n;
1242 		ret = n;
1243 		if (r) {
1244 			if (!n)
1245 				ret = -EFAULT;
1246 			goto out;
1247 		}
1248 	}
1249 
1250 	gpa = p->gpa;
1251 	nested = NULL;
1252 	pgt = NULL;
1253 	while (len != 0 && p->lpid >= 0) {
1254 		if (gpa >= RADIX_PGTABLE_RANGE) {
1255 			gpa = 0;
1256 			pgt = NULL;
1257 			if (nested) {
1258 				kvmhv_put_nested(nested);
1259 				nested = NULL;
1260 			}
1261 			p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid);
1262 			p->hdr = 0;
1263 			if (p->lpid < 0)
1264 				break;
1265 		}
1266 		if (!pgt) {
1267 			if (p->lpid == 0) {
1268 				pgt = kvm->arch.pgtable;
1269 			} else {
1270 				nested = kvmhv_get_nested(kvm, p->lpid, false);
1271 				if (!nested) {
1272 					gpa = RADIX_PGTABLE_RANGE;
1273 					continue;
1274 				}
1275 				pgt = nested->shadow_pgtable;
1276 			}
1277 		}
1278 		n = 0;
1279 		if (!p->hdr) {
1280 			if (p->lpid > 0)
1281 				n = scnprintf(p->buf, sizeof(p->buf),
1282 					      "\nNested LPID %d: ", p->lpid);
1283 			n += scnprintf(p->buf + n, sizeof(p->buf) - n,
1284 				      "pgdir: %lx\n", (unsigned long)pgt);
1285 			p->hdr = 1;
1286 			goto copy;
1287 		}
1288 
1289 		pgdp = pgt + pgd_index(gpa);
1290 		pgd = READ_ONCE(*pgdp);
1291 		if (!(pgd_val(pgd) & _PAGE_PRESENT)) {
1292 			gpa = (gpa & PGDIR_MASK) + PGDIR_SIZE;
1293 			continue;
1294 		}
1295 
1296 		pudp = pud_offset(&pgd, gpa);
1297 		pud = READ_ONCE(*pudp);
1298 		if (!(pud_val(pud) & _PAGE_PRESENT)) {
1299 			gpa = (gpa & PUD_MASK) + PUD_SIZE;
1300 			continue;
1301 		}
1302 		if (pud_val(pud) & _PAGE_PTE) {
1303 			pte = pud_val(pud);
1304 			shift = PUD_SHIFT;
1305 			goto leaf;
1306 		}
1307 
1308 		pmdp = pmd_offset(&pud, gpa);
1309 		pmd = READ_ONCE(*pmdp);
1310 		if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
1311 			gpa = (gpa & PMD_MASK) + PMD_SIZE;
1312 			continue;
1313 		}
1314 		if (pmd_val(pmd) & _PAGE_PTE) {
1315 			pte = pmd_val(pmd);
1316 			shift = PMD_SHIFT;
1317 			goto leaf;
1318 		}
1319 
1320 		ptep = pte_offset_kernel(&pmd, gpa);
1321 		pte = pte_val(READ_ONCE(*ptep));
1322 		if (!(pte & _PAGE_PRESENT)) {
1323 			gpa += PAGE_SIZE;
1324 			continue;
1325 		}
1326 		shift = PAGE_SHIFT;
1327 	leaf:
1328 		n = scnprintf(p->buf, sizeof(p->buf),
1329 			      " %lx: %lx %d\n", gpa, pte, shift);
1330 		gpa += 1ul << shift;
1331 	copy:
1332 		p->chars_left = n;
1333 		if (n > len)
1334 			n = len;
1335 		r = copy_to_user(buf, p->buf, n);
1336 		n -= r;
1337 		p->chars_left -= n;
1338 		p->buf_index = n;
1339 		buf += n;
1340 		len -= n;
1341 		ret += n;
1342 		if (r) {
1343 			if (!ret)
1344 				ret = -EFAULT;
1345 			break;
1346 		}
1347 	}
1348 	p->gpa = gpa;
1349 	if (nested)
1350 		kvmhv_put_nested(nested);
1351 
1352  out:
1353 	mutex_unlock(&p->mutex);
1354 	return ret;
1355 }
1356 
debugfs_radix_write(struct file * file,const char __user * buf,size_t len,loff_t * ppos)1357 static ssize_t debugfs_radix_write(struct file *file, const char __user *buf,
1358 			   size_t len, loff_t *ppos)
1359 {
1360 	return -EACCES;
1361 }
1362 
1363 static const struct file_operations debugfs_radix_fops = {
1364 	.owner	 = THIS_MODULE,
1365 	.open	 = debugfs_radix_open,
1366 	.release = debugfs_radix_release,
1367 	.read	 = debugfs_radix_read,
1368 	.write	 = debugfs_radix_write,
1369 	.llseek	 = generic_file_llseek,
1370 };
1371 
kvmhv_radix_debugfs_init(struct kvm * kvm)1372 void kvmhv_radix_debugfs_init(struct kvm *kvm)
1373 {
1374 	kvm->arch.radix_dentry = debugfs_create_file("radix", 0400,
1375 						     kvm->arch.debugfs_dir, kvm,
1376 						     &debugfs_radix_fops);
1377 }
1378 
kvmppc_radix_init(void)1379 int kvmppc_radix_init(void)
1380 {
1381 	unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE;
1382 
1383 	kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
1384 	if (!kvm_pte_cache)
1385 		return -ENOMEM;
1386 
1387 	size = sizeof(void *) << RADIX_PMD_INDEX_SIZE;
1388 
1389 	kvm_pmd_cache = kmem_cache_create("kvm-pmd", size, size, 0, pmd_ctor);
1390 	if (!kvm_pmd_cache) {
1391 		kmem_cache_destroy(kvm_pte_cache);
1392 		return -ENOMEM;
1393 	}
1394 
1395 	return 0;
1396 }
1397 
kvmppc_radix_exit(void)1398 void kvmppc_radix_exit(void)
1399 {
1400 	kmem_cache_destroy(kvm_pte_cache);
1401 	kmem_cache_destroy(kvm_pmd_cache);
1402 }
1403