• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
15 
16 #include <asm/tlbflush.h>
17 #include <asm/kvm_ppc.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/mmu-hash64.h>
20 #include <asm/hvcall.h>
21 #include <asm/synch.h>
22 #include <asm/ppc-opcode.h>
23 
24 /* Translate address of a vmalloc'd thing to a linear map address */
real_vmalloc_addr(void * x)25 static void *real_vmalloc_addr(void *x)
26 {
27 	unsigned long addr = (unsigned long) x;
28 	pte_t *p;
29 
30 	p = find_linux_pte(swapper_pg_dir, addr);
31 	if (!p || !pte_present(*p))
32 		return NULL;
33 	/* assume we don't have huge pages in vmalloc space... */
34 	addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
35 	return __va(addr);
36 }
37 
38 /*
39  * Add this HPTE into the chain for the real page.
40  * Must be called with the chain locked; it unlocks the chain.
41  */
kvmppc_add_revmap_chain(struct kvm * kvm,struct revmap_entry * rev,unsigned long * rmap,long pte_index,int realmode)42 void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
43 			     unsigned long *rmap, long pte_index, int realmode)
44 {
45 	struct revmap_entry *head, *tail;
46 	unsigned long i;
47 
48 	if (*rmap & KVMPPC_RMAP_PRESENT) {
49 		i = *rmap & KVMPPC_RMAP_INDEX;
50 		head = &kvm->arch.revmap[i];
51 		if (realmode)
52 			head = real_vmalloc_addr(head);
53 		tail = &kvm->arch.revmap[head->back];
54 		if (realmode)
55 			tail = real_vmalloc_addr(tail);
56 		rev->forw = i;
57 		rev->back = head->back;
58 		tail->forw = pte_index;
59 		head->back = pte_index;
60 	} else {
61 		rev->forw = rev->back = pte_index;
62 		i = pte_index;
63 	}
64 	smp_wmb();
65 	*rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT; /* unlock */
66 }
67 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
68 
69 /* Remove this HPTE from the chain for a real page */
remove_revmap_chain(struct kvm * kvm,long pte_index,struct revmap_entry * rev,unsigned long hpte_v,unsigned long hpte_r)70 static void remove_revmap_chain(struct kvm *kvm, long pte_index,
71 				struct revmap_entry *rev,
72 				unsigned long hpte_v, unsigned long hpte_r)
73 {
74 	struct revmap_entry *next, *prev;
75 	unsigned long gfn, ptel, head;
76 	struct kvm_memory_slot *memslot;
77 	unsigned long *rmap;
78 	unsigned long rcbits;
79 
80 	rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
81 	ptel = rev->guest_rpte |= rcbits;
82 	gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
83 	memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
84 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
85 		return;
86 
87 	rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]);
88 	lock_rmap(rmap);
89 
90 	head = *rmap & KVMPPC_RMAP_INDEX;
91 	next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
92 	prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
93 	next->back = rev->back;
94 	prev->forw = rev->forw;
95 	if (head == pte_index) {
96 		head = rev->forw;
97 		if (head == pte_index)
98 			*rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
99 		else
100 			*rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
101 	}
102 	*rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
103 	unlock_rmap(rmap);
104 }
105 
lookup_linux_pte(struct kvm_vcpu * vcpu,unsigned long hva,int writing,unsigned long * pte_sizep)106 static pte_t lookup_linux_pte(struct kvm_vcpu *vcpu, unsigned long hva,
107 			      int writing, unsigned long *pte_sizep)
108 {
109 	pte_t *ptep;
110 	unsigned long ps = *pte_sizep;
111 	unsigned int shift;
112 
113 	ptep = find_linux_pte_or_hugepte(vcpu->arch.pgdir, hva, &shift);
114 	if (!ptep)
115 		return __pte(0);
116 	if (shift)
117 		*pte_sizep = 1ul << shift;
118 	else
119 		*pte_sizep = PAGE_SIZE;
120 	if (ps > *pte_sizep)
121 		return __pte(0);
122 	if (!pte_present(*ptep))
123 		return __pte(0);
124 	return kvmppc_read_update_linux_pte(ptep, writing);
125 }
126 
unlock_hpte(unsigned long * hpte,unsigned long hpte_v)127 static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
128 {
129 	asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
130 	hpte[0] = hpte_v;
131 }
132 
kvmppc_h_enter(struct kvm_vcpu * vcpu,unsigned long flags,long pte_index,unsigned long pteh,unsigned long ptel)133 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
134 		    long pte_index, unsigned long pteh, unsigned long ptel)
135 {
136 	struct kvm *kvm = vcpu->kvm;
137 	unsigned long i, pa, gpa, gfn, psize;
138 	unsigned long slot_fn, hva;
139 	unsigned long *hpte;
140 	struct revmap_entry *rev;
141 	unsigned long g_ptel = ptel;
142 	struct kvm_memory_slot *memslot;
143 	unsigned long *physp, pte_size;
144 	unsigned long is_io;
145 	unsigned long *rmap;
146 	pte_t pte;
147 	unsigned int writing;
148 	unsigned long mmu_seq;
149 	unsigned long rcbits;
150 	bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING;
151 
152 	psize = hpte_page_size(pteh, ptel);
153 	if (!psize)
154 		return H_PARAMETER;
155 	writing = hpte_is_writable(ptel);
156 	pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
157 
158 	/* used later to detect if we might have been invalidated */
159 	mmu_seq = kvm->mmu_notifier_seq;
160 	smp_rmb();
161 
162 	/* Find the memslot (if any) for this address */
163 	gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
164 	gfn = gpa >> PAGE_SHIFT;
165 	memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
166 	pa = 0;
167 	is_io = ~0ul;
168 	rmap = NULL;
169 	if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
170 		/* PPC970 can't do emulated MMIO */
171 		if (!cpu_has_feature(CPU_FTR_ARCH_206))
172 			return H_PARAMETER;
173 		/* Emulated MMIO - mark this with key=31 */
174 		pteh |= HPTE_V_ABSENT;
175 		ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
176 		goto do_insert;
177 	}
178 
179 	/* Check if the requested page fits entirely in the memslot. */
180 	if (!slot_is_aligned(memslot, psize))
181 		return H_PARAMETER;
182 	slot_fn = gfn - memslot->base_gfn;
183 	rmap = &memslot->rmap[slot_fn];
184 
185 	if (!kvm->arch.using_mmu_notifiers) {
186 		physp = kvm->arch.slot_phys[memslot->id];
187 		if (!physp)
188 			return H_PARAMETER;
189 		physp += slot_fn;
190 		if (realmode)
191 			physp = real_vmalloc_addr(physp);
192 		pa = *physp;
193 		if (!pa)
194 			return H_TOO_HARD;
195 		is_io = pa & (HPTE_R_I | HPTE_R_W);
196 		pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
197 		pa &= PAGE_MASK;
198 	} else {
199 		/* Translate to host virtual address */
200 		hva = gfn_to_hva_memslot(memslot, gfn);
201 
202 		/* Look up the Linux PTE for the backing page */
203 		pte_size = psize;
204 		pte = lookup_linux_pte(vcpu, hva, writing, &pte_size);
205 		if (pte_present(pte)) {
206 			if (writing && !pte_write(pte))
207 				/* make the actual HPTE be read-only */
208 				ptel = hpte_make_readonly(ptel);
209 			is_io = hpte_cache_bits(pte_val(pte));
210 			pa = pte_pfn(pte) << PAGE_SHIFT;
211 		}
212 	}
213 	if (pte_size < psize)
214 		return H_PARAMETER;
215 	if (pa && pte_size > psize)
216 		pa |= gpa & (pte_size - 1);
217 
218 	ptel &= ~(HPTE_R_PP0 - psize);
219 	ptel |= pa;
220 
221 	if (pa)
222 		pteh |= HPTE_V_VALID;
223 	else
224 		pteh |= HPTE_V_ABSENT;
225 
226 	/* Check WIMG */
227 	if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
228 		if (is_io)
229 			return H_PARAMETER;
230 		/*
231 		 * Allow guest to map emulated device memory as
232 		 * uncacheable, but actually make it cacheable.
233 		 */
234 		ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
235 		ptel |= HPTE_R_M;
236 	}
237 
238 	/* Find and lock the HPTEG slot to use */
239  do_insert:
240 	if (pte_index >= HPT_NPTE)
241 		return H_PARAMETER;
242 	if (likely((flags & H_EXACT) == 0)) {
243 		pte_index &= ~7UL;
244 		hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
245 		for (i = 0; i < 8; ++i) {
246 			if ((*hpte & HPTE_V_VALID) == 0 &&
247 			    try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
248 					  HPTE_V_ABSENT))
249 				break;
250 			hpte += 2;
251 		}
252 		if (i == 8) {
253 			/*
254 			 * Since try_lock_hpte doesn't retry (not even stdcx.
255 			 * failures), it could be that there is a free slot
256 			 * but we transiently failed to lock it.  Try again,
257 			 * actually locking each slot and checking it.
258 			 */
259 			hpte -= 16;
260 			for (i = 0; i < 8; ++i) {
261 				while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
262 					cpu_relax();
263 				if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)))
264 					break;
265 				*hpte &= ~HPTE_V_HVLOCK;
266 				hpte += 2;
267 			}
268 			if (i == 8)
269 				return H_PTEG_FULL;
270 		}
271 		pte_index += i;
272 	} else {
273 		hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
274 		if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
275 				   HPTE_V_ABSENT)) {
276 			/* Lock the slot and check again */
277 			while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
278 				cpu_relax();
279 			if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
280 				*hpte &= ~HPTE_V_HVLOCK;
281 				return H_PTEG_FULL;
282 			}
283 		}
284 	}
285 
286 	/* Save away the guest's idea of the second HPTE dword */
287 	rev = &kvm->arch.revmap[pte_index];
288 	if (realmode)
289 		rev = real_vmalloc_addr(rev);
290 	if (rev)
291 		rev->guest_rpte = g_ptel;
292 
293 	/* Link HPTE into reverse-map chain */
294 	if (pteh & HPTE_V_VALID) {
295 		if (realmode)
296 			rmap = real_vmalloc_addr(rmap);
297 		lock_rmap(rmap);
298 		/* Check for pending invalidations under the rmap chain lock */
299 		if (kvm->arch.using_mmu_notifiers &&
300 		    mmu_notifier_retry(vcpu, mmu_seq)) {
301 			/* inval in progress, write a non-present HPTE */
302 			pteh |= HPTE_V_ABSENT;
303 			pteh &= ~HPTE_V_VALID;
304 			unlock_rmap(rmap);
305 		} else {
306 			kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
307 						realmode);
308 			/* Only set R/C in real HPTE if already set in *rmap */
309 			rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
310 			ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
311 		}
312 	}
313 
314 	hpte[1] = ptel;
315 
316 	/* Write the first HPTE dword, unlocking the HPTE and making it valid */
317 	eieio();
318 	hpte[0] = pteh;
319 	asm volatile("ptesync" : : : "memory");
320 
321 	vcpu->arch.gpr[4] = pte_index;
322 	return H_SUCCESS;
323 }
324 EXPORT_SYMBOL_GPL(kvmppc_h_enter);
325 
326 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token))
327 
try_lock_tlbie(unsigned int * lock)328 static inline int try_lock_tlbie(unsigned int *lock)
329 {
330 	unsigned int tmp, old;
331 	unsigned int token = LOCK_TOKEN;
332 
333 	asm volatile("1:lwarx	%1,0,%2\n"
334 		     "	cmpwi	cr0,%1,0\n"
335 		     "	bne	2f\n"
336 		     "  stwcx.	%3,0,%2\n"
337 		     "	bne-	1b\n"
338 		     "  isync\n"
339 		     "2:"
340 		     : "=&r" (tmp), "=&r" (old)
341 		     : "r" (lock), "r" (token)
342 		     : "cc", "memory");
343 	return old == 0;
344 }
345 
kvmppc_h_remove(struct kvm_vcpu * vcpu,unsigned long flags,unsigned long pte_index,unsigned long avpn,unsigned long va)346 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
347 		     unsigned long pte_index, unsigned long avpn,
348 		     unsigned long va)
349 {
350 	struct kvm *kvm = vcpu->kvm;
351 	unsigned long *hpte;
352 	unsigned long v, r, rb;
353 	struct revmap_entry *rev;
354 
355 	if (pte_index >= HPT_NPTE)
356 		return H_PARAMETER;
357 	hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
358 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
359 		cpu_relax();
360 	if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
361 	    ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
362 	    ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
363 		hpte[0] &= ~HPTE_V_HVLOCK;
364 		return H_NOT_FOUND;
365 	}
366 
367 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
368 	v = hpte[0] & ~HPTE_V_HVLOCK;
369 	if (v & HPTE_V_VALID) {
370 		hpte[0] &= ~HPTE_V_VALID;
371 		rb = compute_tlbie_rb(v, hpte[1], pte_index);
372 		if (!(flags & H_LOCAL) && atomic_read(&kvm->online_vcpus) > 1) {
373 			while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
374 				cpu_relax();
375 			asm volatile("ptesync" : : : "memory");
376 			asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
377 				     : : "r" (rb), "r" (kvm->arch.lpid));
378 			asm volatile("ptesync" : : : "memory");
379 			kvm->arch.tlbie_lock = 0;
380 		} else {
381 			asm volatile("ptesync" : : : "memory");
382 			asm volatile("tlbiel %0" : : "r" (rb));
383 			asm volatile("ptesync" : : : "memory");
384 		}
385 		/* Read PTE low word after tlbie to get final R/C values */
386 		remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
387 	}
388 	r = rev->guest_rpte;
389 	unlock_hpte(hpte, 0);
390 
391 	vcpu->arch.gpr[4] = v;
392 	vcpu->arch.gpr[5] = r;
393 	return H_SUCCESS;
394 }
395 
kvmppc_h_bulk_remove(struct kvm_vcpu * vcpu)396 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
397 {
398 	struct kvm *kvm = vcpu->kvm;
399 	unsigned long *args = &vcpu->arch.gpr[4];
400 	unsigned long *hp, *hptes[4], tlbrb[4];
401 	long int i, j, k, n, found, indexes[4];
402 	unsigned long flags, req, pte_index, rcbits;
403 	long int local = 0;
404 	long int ret = H_SUCCESS;
405 	struct revmap_entry *rev, *revs[4];
406 
407 	if (atomic_read(&kvm->online_vcpus) == 1)
408 		local = 1;
409 	for (i = 0; i < 4 && ret == H_SUCCESS; ) {
410 		n = 0;
411 		for (; i < 4; ++i) {
412 			j = i * 2;
413 			pte_index = args[j];
414 			flags = pte_index >> 56;
415 			pte_index &= ((1ul << 56) - 1);
416 			req = flags >> 6;
417 			flags &= 3;
418 			if (req == 3) {		/* no more requests */
419 				i = 4;
420 				break;
421 			}
422 			if (req != 1 || flags == 3 || pte_index >= HPT_NPTE) {
423 				/* parameter error */
424 				args[j] = ((0xa0 | flags) << 56) + pte_index;
425 				ret = H_PARAMETER;
426 				break;
427 			}
428 			hp = (unsigned long *)
429 				(kvm->arch.hpt_virt + (pte_index << 4));
430 			/* to avoid deadlock, don't spin except for first */
431 			if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
432 				if (n)
433 					break;
434 				while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
435 					cpu_relax();
436 			}
437 			found = 0;
438 			if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) {
439 				switch (flags & 3) {
440 				case 0:		/* absolute */
441 					found = 1;
442 					break;
443 				case 1:		/* andcond */
444 					if (!(hp[0] & args[j + 1]))
445 						found = 1;
446 					break;
447 				case 2:		/* AVPN */
448 					if ((hp[0] & ~0x7fUL) == args[j + 1])
449 						found = 1;
450 					break;
451 				}
452 			}
453 			if (!found) {
454 				hp[0] &= ~HPTE_V_HVLOCK;
455 				args[j] = ((0x90 | flags) << 56) + pte_index;
456 				continue;
457 			}
458 
459 			args[j] = ((0x80 | flags) << 56) + pte_index;
460 			rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
461 
462 			if (!(hp[0] & HPTE_V_VALID)) {
463 				/* insert R and C bits from PTE */
464 				rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
465 				args[j] |= rcbits << (56 - 5);
466 				hp[0] = 0;
467 				continue;
468 			}
469 
470 			hp[0] &= ~HPTE_V_VALID;		/* leave it locked */
471 			tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index);
472 			indexes[n] = j;
473 			hptes[n] = hp;
474 			revs[n] = rev;
475 			++n;
476 		}
477 
478 		if (!n)
479 			break;
480 
481 		/* Now that we've collected a batch, do the tlbies */
482 		if (!local) {
483 			while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
484 				cpu_relax();
485 			asm volatile("ptesync" : : : "memory");
486 			for (k = 0; k < n; ++k)
487 				asm volatile(PPC_TLBIE(%1,%0) : :
488 					     "r" (tlbrb[k]),
489 					     "r" (kvm->arch.lpid));
490 			asm volatile("eieio; tlbsync; ptesync" : : : "memory");
491 			kvm->arch.tlbie_lock = 0;
492 		} else {
493 			asm volatile("ptesync" : : : "memory");
494 			for (k = 0; k < n; ++k)
495 				asm volatile("tlbiel %0" : : "r" (tlbrb[k]));
496 			asm volatile("ptesync" : : : "memory");
497 		}
498 
499 		/* Read PTE low words after tlbie to get final R/C values */
500 		for (k = 0; k < n; ++k) {
501 			j = indexes[k];
502 			pte_index = args[j] & ((1ul << 56) - 1);
503 			hp = hptes[k];
504 			rev = revs[k];
505 			remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]);
506 			rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
507 			args[j] |= rcbits << (56 - 5);
508 			hp[0] = 0;
509 		}
510 	}
511 
512 	return ret;
513 }
514 
kvmppc_h_protect(struct kvm_vcpu * vcpu,unsigned long flags,unsigned long pte_index,unsigned long avpn,unsigned long va)515 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
516 		      unsigned long pte_index, unsigned long avpn,
517 		      unsigned long va)
518 {
519 	struct kvm *kvm = vcpu->kvm;
520 	unsigned long *hpte;
521 	struct revmap_entry *rev;
522 	unsigned long v, r, rb, mask, bits;
523 
524 	if (pte_index >= HPT_NPTE)
525 		return H_PARAMETER;
526 
527 	hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
528 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
529 		cpu_relax();
530 	if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
531 	    ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
532 		hpte[0] &= ~HPTE_V_HVLOCK;
533 		return H_NOT_FOUND;
534 	}
535 
536 	if (atomic_read(&kvm->online_vcpus) == 1)
537 		flags |= H_LOCAL;
538 	v = hpte[0];
539 	bits = (flags << 55) & HPTE_R_PP0;
540 	bits |= (flags << 48) & HPTE_R_KEY_HI;
541 	bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
542 
543 	/* Update guest view of 2nd HPTE dword */
544 	mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
545 		HPTE_R_KEY_HI | HPTE_R_KEY_LO;
546 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
547 	if (rev) {
548 		r = (rev->guest_rpte & ~mask) | bits;
549 		rev->guest_rpte = r;
550 	}
551 	r = (hpte[1] & ~mask) | bits;
552 
553 	/* Update HPTE */
554 	if (v & HPTE_V_VALID) {
555 		rb = compute_tlbie_rb(v, r, pte_index);
556 		hpte[0] = v & ~HPTE_V_VALID;
557 		if (!(flags & H_LOCAL)) {
558 			while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
559 				cpu_relax();
560 			asm volatile("ptesync" : : : "memory");
561 			asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
562 				     : : "r" (rb), "r" (kvm->arch.lpid));
563 			asm volatile("ptesync" : : : "memory");
564 			kvm->arch.tlbie_lock = 0;
565 		} else {
566 			asm volatile("ptesync" : : : "memory");
567 			asm volatile("tlbiel %0" : : "r" (rb));
568 			asm volatile("ptesync" : : : "memory");
569 		}
570 	}
571 	hpte[1] = r;
572 	eieio();
573 	hpte[0] = v & ~HPTE_V_HVLOCK;
574 	asm volatile("ptesync" : : : "memory");
575 	return H_SUCCESS;
576 }
577 
kvmppc_h_read(struct kvm_vcpu * vcpu,unsigned long flags,unsigned long pte_index)578 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
579 		   unsigned long pte_index)
580 {
581 	struct kvm *kvm = vcpu->kvm;
582 	unsigned long *hpte, v, r;
583 	int i, n = 1;
584 	struct revmap_entry *rev = NULL;
585 
586 	if (pte_index >= HPT_NPTE)
587 		return H_PARAMETER;
588 	if (flags & H_READ_4) {
589 		pte_index &= ~3;
590 		n = 4;
591 	}
592 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
593 	for (i = 0; i < n; ++i, ++pte_index) {
594 		hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
595 		v = hpte[0] & ~HPTE_V_HVLOCK;
596 		r = hpte[1];
597 		if (v & HPTE_V_ABSENT) {
598 			v &= ~HPTE_V_ABSENT;
599 			v |= HPTE_V_VALID;
600 		}
601 		if (v & HPTE_V_VALID)
602 			r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
603 		vcpu->arch.gpr[4 + i * 2] = v;
604 		vcpu->arch.gpr[5 + i * 2] = r;
605 	}
606 	return H_SUCCESS;
607 }
608 
kvmppc_invalidate_hpte(struct kvm * kvm,unsigned long * hptep,unsigned long pte_index)609 void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
610 			unsigned long pte_index)
611 {
612 	unsigned long rb;
613 
614 	hptep[0] &= ~HPTE_V_VALID;
615 	rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
616 	while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
617 		cpu_relax();
618 	asm volatile("ptesync" : : : "memory");
619 	asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
620 		     : : "r" (rb), "r" (kvm->arch.lpid));
621 	asm volatile("ptesync" : : : "memory");
622 	kvm->arch.tlbie_lock = 0;
623 }
624 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
625 
kvmppc_clear_ref_hpte(struct kvm * kvm,unsigned long * hptep,unsigned long pte_index)626 void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
627 			   unsigned long pte_index)
628 {
629 	unsigned long rb;
630 	unsigned char rbyte;
631 
632 	rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
633 	rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
634 	/* modify only the second-last byte, which contains the ref bit */
635 	*((char *)hptep + 14) = rbyte;
636 	while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
637 		cpu_relax();
638 	asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
639 		     : : "r" (rb), "r" (kvm->arch.lpid));
640 	asm volatile("ptesync" : : : "memory");
641 	kvm->arch.tlbie_lock = 0;
642 }
643 EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
644 
645 static int slb_base_page_shift[4] = {
646 	24,	/* 16M */
647 	16,	/* 64k */
648 	34,	/* 16G */
649 	20,	/* 1M, unsupported */
650 };
651 
kvmppc_hv_find_lock_hpte(struct kvm * kvm,gva_t eaddr,unsigned long slb_v,unsigned long valid)652 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
653 			      unsigned long valid)
654 {
655 	unsigned int i;
656 	unsigned int pshift;
657 	unsigned long somask;
658 	unsigned long vsid, hash;
659 	unsigned long avpn;
660 	unsigned long *hpte;
661 	unsigned long mask, val;
662 	unsigned long v, r;
663 
664 	/* Get page shift, work out hash and AVPN etc. */
665 	mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
666 	val = 0;
667 	pshift = 12;
668 	if (slb_v & SLB_VSID_L) {
669 		mask |= HPTE_V_LARGE;
670 		val |= HPTE_V_LARGE;
671 		pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
672 	}
673 	if (slb_v & SLB_VSID_B_1T) {
674 		somask = (1UL << 40) - 1;
675 		vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
676 		vsid ^= vsid << 25;
677 	} else {
678 		somask = (1UL << 28) - 1;
679 		vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
680 	}
681 	hash = (vsid ^ ((eaddr & somask) >> pshift)) & HPT_HASH_MASK;
682 	avpn = slb_v & ~(somask >> 16);	/* also includes B */
683 	avpn |= (eaddr & somask) >> 16;
684 
685 	if (pshift >= 24)
686 		avpn &= ~((1UL << (pshift - 16)) - 1);
687 	else
688 		avpn &= ~0x7fUL;
689 	val |= avpn;
690 
691 	for (;;) {
692 		hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7));
693 
694 		for (i = 0; i < 16; i += 2) {
695 			/* Read the PTE racily */
696 			v = hpte[i] & ~HPTE_V_HVLOCK;
697 
698 			/* Check valid/absent, hash, segment size and AVPN */
699 			if (!(v & valid) || (v & mask) != val)
700 				continue;
701 
702 			/* Lock the PTE and read it under the lock */
703 			while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
704 				cpu_relax();
705 			v = hpte[i] & ~HPTE_V_HVLOCK;
706 			r = hpte[i+1];
707 
708 			/*
709 			 * Check the HPTE again, including large page size
710 			 * Since we don't currently allow any MPSS (mixed
711 			 * page-size segment) page sizes, it is sufficient
712 			 * to check against the actual page size.
713 			 */
714 			if ((v & valid) && (v & mask) == val &&
715 			    hpte_page_size(v, r) == (1ul << pshift))
716 				/* Return with the HPTE still locked */
717 				return (hash << 3) + (i >> 1);
718 
719 			/* Unlock and move on */
720 			hpte[i] = v;
721 		}
722 
723 		if (val & HPTE_V_SECONDARY)
724 			break;
725 		val |= HPTE_V_SECONDARY;
726 		hash = hash ^ HPT_HASH_MASK;
727 	}
728 	return -1;
729 }
730 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
731 
732 /*
733  * Called in real mode to check whether an HPTE not found fault
734  * is due to accessing a paged-out page or an emulated MMIO page,
735  * or if a protection fault is due to accessing a page that the
736  * guest wanted read/write access to but which we made read-only.
737  * Returns a possibly modified status (DSISR) value if not
738  * (i.e. pass the interrupt to the guest),
739  * -1 to pass the fault up to host kernel mode code, -2 to do that
740  * and also load the instruction word (for MMIO emulation),
741  * or 0 if we should make the guest retry the access.
742  */
kvmppc_hpte_hv_fault(struct kvm_vcpu * vcpu,unsigned long addr,unsigned long slb_v,unsigned int status,bool data)743 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
744 			  unsigned long slb_v, unsigned int status, bool data)
745 {
746 	struct kvm *kvm = vcpu->kvm;
747 	long int index;
748 	unsigned long v, r, gr;
749 	unsigned long *hpte;
750 	unsigned long valid;
751 	struct revmap_entry *rev;
752 	unsigned long pp, key;
753 
754 	/* For protection fault, expect to find a valid HPTE */
755 	valid = HPTE_V_VALID;
756 	if (status & DSISR_NOHPTE)
757 		valid |= HPTE_V_ABSENT;
758 
759 	index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
760 	if (index < 0) {
761 		if (status & DSISR_NOHPTE)
762 			return status;	/* there really was no HPTE */
763 		return 0;		/* for prot fault, HPTE disappeared */
764 	}
765 	hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
766 	v = hpte[0] & ~HPTE_V_HVLOCK;
767 	r = hpte[1];
768 	rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
769 	gr = rev->guest_rpte;
770 
771 	unlock_hpte(hpte, v);
772 
773 	/* For not found, if the HPTE is valid by now, retry the instruction */
774 	if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
775 		return 0;
776 
777 	/* Check access permissions to the page */
778 	pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
779 	key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
780 	status &= ~DSISR_NOHPTE;	/* DSISR_NOHPTE == SRR1_ISI_NOPT */
781 	if (!data) {
782 		if (gr & (HPTE_R_N | HPTE_R_G))
783 			return status | SRR1_ISI_N_OR_G;
784 		if (!hpte_read_permission(pp, slb_v & key))
785 			return status | SRR1_ISI_PROT;
786 	} else if (status & DSISR_ISSTORE) {
787 		/* check write permission */
788 		if (!hpte_write_permission(pp, slb_v & key))
789 			return status | DSISR_PROTFAULT;
790 	} else {
791 		if (!hpte_read_permission(pp, slb_v & key))
792 			return status | DSISR_PROTFAULT;
793 	}
794 
795 	/* Check storage key, if applicable */
796 	if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
797 		unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
798 		if (status & DSISR_ISSTORE)
799 			perm >>= 1;
800 		if (perm & 1)
801 			return status | DSISR_KEYFAULT;
802 	}
803 
804 	/* Save HPTE info for virtual-mode handler */
805 	vcpu->arch.pgfault_addr = addr;
806 	vcpu->arch.pgfault_index = index;
807 	vcpu->arch.pgfault_hpte[0] = v;
808 	vcpu->arch.pgfault_hpte[1] = r;
809 
810 	/* Check the storage key to see if it is possibly emulated MMIO */
811 	if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
812 	    (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
813 	    (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
814 		return -2;	/* MMIO emulation - load instr word */
815 
816 	return -1;		/* send fault up to host kernel mode */
817 }
818