• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6  * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/highmem.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/hugetlb.h>
17 #include <linux/list.h>
18 #include <linux/stringify.h>
19 
20 #include <asm/kvm_ppc.h>
21 #include <asm/kvm_book3s.h>
22 #include <asm/book3s/64/mmu-hash.h>
23 #include <asm/mmu_context.h>
24 #include <asm/hvcall.h>
25 #include <asm/synch.h>
26 #include <asm/ppc-opcode.h>
27 #include <asm/udbg.h>
28 #include <asm/iommu.h>
29 #include <asm/tce.h>
30 #include <asm/pte-walk.h>
31 
32 #ifdef CONFIG_BUG
33 
34 #define WARN_ON_ONCE_RM(condition)	({			\
35 	static bool __section(".data.unlikely") __warned;	\
36 	int __ret_warn_once = !!(condition);			\
37 								\
38 	if (unlikely(__ret_warn_once && !__warned)) {		\
39 		__warned = true;				\
40 		pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n",	\
41 				__stringify(condition),		\
42 				__func__, __LINE__);		\
43 		dump_stack();					\
44 	}							\
45 	unlikely(__ret_warn_once);				\
46 })
47 
48 #else
49 
50 #define WARN_ON_ONCE_RM(condition) ({				\
51 	int __ret_warn_on = !!(condition);			\
52 	unlikely(__ret_warn_on);				\
53 })
54 
55 #endif
56 
57 /*
58  * Finds a TCE table descriptor by LIOBN.
59  *
60  * WARNING: This will be called in real or virtual mode on HV KVM and virtual
61  *          mode on PR KVM
62  */
kvmppc_find_table(struct kvm * kvm,unsigned long liobn)63 struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
64 		unsigned long liobn)
65 {
66 	struct kvmppc_spapr_tce_table *stt;
67 
68 	list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
69 		if (stt->liobn == liobn)
70 			return stt;
71 
72 	return NULL;
73 }
74 EXPORT_SYMBOL_GPL(kvmppc_find_table);
75 
76 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
kvmppc_rm_tce_to_ua(struct kvm * kvm,unsigned long tce,unsigned long * ua)77 static long kvmppc_rm_tce_to_ua(struct kvm *kvm,
78 				unsigned long tce, unsigned long *ua)
79 {
80 	unsigned long gfn = tce >> PAGE_SHIFT;
81 	struct kvm_memory_slot *memslot;
82 
83 	memslot = search_memslots(kvm_memslots_raw(kvm), gfn);
84 	if (!memslot)
85 		return -EINVAL;
86 
87 	*ua = __gfn_to_hva_memslot(memslot, gfn) |
88 		(tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
89 
90 	return 0;
91 }
92 
93 /*
94  * Validates TCE address.
95  * At the moment flags and page mask are validated.
96  * As the host kernel does not access those addresses (just puts them
97  * to the table and user space is supposed to process them), we can skip
98  * checking other things (such as TCE is a guest RAM address or the page
99  * was actually allocated).
100  */
kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table * stt,unsigned long tce)101 static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
102 		unsigned long tce)
103 {
104 	unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
105 	enum dma_data_direction dir = iommu_tce_direction(tce);
106 	struct kvmppc_spapr_tce_iommu_table *stit;
107 	unsigned long ua = 0;
108 
109 	/* Allow userspace to poison TCE table */
110 	if (dir == DMA_NONE)
111 		return H_SUCCESS;
112 
113 	if (iommu_tce_check_gpa(stt->page_shift, gpa))
114 		return H_PARAMETER;
115 
116 	if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua))
117 		return H_TOO_HARD;
118 
119 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
120 		unsigned long hpa = 0;
121 		struct mm_iommu_table_group_mem_t *mem;
122 		long shift = stit->tbl->it_page_shift;
123 
124 		mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
125 		if (!mem)
126 			return H_TOO_HARD;
127 
128 		if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa))
129 			return H_TOO_HARD;
130 	}
131 
132 	return H_SUCCESS;
133 }
134 
135 /* Note on the use of page_address() in real mode,
136  *
137  * It is safe to use page_address() in real mode on ppc64 because
138  * page_address() is always defined as lowmem_page_address()
139  * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
140  * operation and does not access page struct.
141  *
142  * Theoretically page_address() could be defined different
143  * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
144  * would have to be enabled.
145  * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
146  * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
147  * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
148  * is not expected to be enabled on ppc32, page_address()
149  * is safe for ppc32 as well.
150  *
151  * WARNING: This will be called in real-mode on HV KVM and virtual
152  *          mode on PR KVM
153  */
kvmppc_page_address(struct page * page)154 static u64 *kvmppc_page_address(struct page *page)
155 {
156 #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
157 #error TODO: fix to avoid page_address() here
158 #endif
159 	return (u64 *) page_address(page);
160 }
161 
162 /*
163  * Handles TCE requests for emulated devices.
164  * Puts guest TCE values to the table and expects user space to convert them.
165  * Cannot fail so kvmppc_rm_tce_validate must be called before it.
166  */
kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table * stt,unsigned long idx,unsigned long tce)167 static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
168 		unsigned long idx, unsigned long tce)
169 {
170 	struct page *page;
171 	u64 *tbl;
172 
173 	idx -= stt->offset;
174 	page = stt->pages[idx / TCES_PER_PAGE];
175 	/*
176 	 * kvmppc_rm_ioba_validate() allows pages not be allocated if TCE is
177 	 * being cleared, otherwise it returns H_TOO_HARD and we skip this.
178 	 */
179 	if (!page) {
180 		WARN_ON_ONCE_RM(tce != 0);
181 		return;
182 	}
183 	tbl = kvmppc_page_address(page);
184 
185 	tbl[idx % TCES_PER_PAGE] = tce;
186 }
187 
188 /*
189  * TCEs pages are allocated in kvmppc_rm_tce_put() which won't be able to do so
190  * in real mode.
191  * Check if kvmppc_rm_tce_put() can succeed in real mode, i.e. a TCEs page is
192  * allocated or not required (when clearing a tce entry).
193  */
kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table * stt,unsigned long ioba,unsigned long npages,bool clearing)194 static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt,
195 		unsigned long ioba, unsigned long npages, bool clearing)
196 {
197 	unsigned long i, idx, sttpage, sttpages;
198 	unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages);
199 
200 	if (ret)
201 		return ret;
202 	/*
203 	 * clearing==true says kvmppc_rm_tce_put won't be allocating pages
204 	 * for empty tces.
205 	 */
206 	if (clearing)
207 		return H_SUCCESS;
208 
209 	idx = (ioba >> stt->page_shift) - stt->offset;
210 	sttpage = idx / TCES_PER_PAGE;
211 	sttpages = ALIGN(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) /
212 			TCES_PER_PAGE;
213 	for (i = sttpage; i < sttpage + sttpages; ++i)
214 		if (!stt->pages[i])
215 			return H_TOO_HARD;
216 
217 	return H_SUCCESS;
218 }
219 
iommu_tce_xchg_no_kill_rm(struct mm_struct * mm,struct iommu_table * tbl,unsigned long entry,unsigned long * hpa,enum dma_data_direction * direction)220 static long iommu_tce_xchg_no_kill_rm(struct mm_struct *mm,
221 		struct iommu_table *tbl,
222 		unsigned long entry, unsigned long *hpa,
223 		enum dma_data_direction *direction)
224 {
225 	long ret;
226 
227 	ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true);
228 
229 	if (!ret && ((*direction == DMA_FROM_DEVICE) ||
230 				(*direction == DMA_BIDIRECTIONAL))) {
231 		__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
232 		/*
233 		 * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
234 		 * calling this so we still get here a valid UA.
235 		 */
236 		if (pua && *pua)
237 			mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
238 	}
239 
240 	return ret;
241 }
242 
iommu_tce_kill_rm(struct iommu_table * tbl,unsigned long entry,unsigned long pages)243 static void iommu_tce_kill_rm(struct iommu_table *tbl,
244 		unsigned long entry, unsigned long pages)
245 {
246 	if (tbl->it_ops->tce_kill)
247 		tbl->it_ops->tce_kill(tbl, entry, pages, true);
248 }
249 
kvmppc_rm_clear_tce(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)250 static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
251 		unsigned long entry)
252 {
253 	unsigned long hpa = 0;
254 	enum dma_data_direction dir = DMA_NONE;
255 
256 	iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
257 }
258 
kvmppc_rm_tce_iommu_mapped_dec(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)259 static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
260 		struct iommu_table *tbl, unsigned long entry)
261 {
262 	struct mm_iommu_table_group_mem_t *mem = NULL;
263 	const unsigned long pgsize = 1ULL << tbl->it_page_shift;
264 	__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
265 
266 	if (!pua)
267 		/* it_userspace allocation might be delayed */
268 		return H_TOO_HARD;
269 
270 	mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
271 	if (!mem)
272 		return H_TOO_HARD;
273 
274 	mm_iommu_mapped_dec(mem);
275 
276 	*pua = cpu_to_be64(0);
277 
278 	return H_SUCCESS;
279 }
280 
kvmppc_rm_tce_iommu_do_unmap(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)281 static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
282 		struct iommu_table *tbl, unsigned long entry)
283 {
284 	enum dma_data_direction dir = DMA_NONE;
285 	unsigned long hpa = 0;
286 	long ret;
287 
288 	if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir))
289 		/*
290 		 * real mode xchg can fail if struct page crosses
291 		 * a page boundary
292 		 */
293 		return H_TOO_HARD;
294 
295 	if (dir == DMA_NONE)
296 		return H_SUCCESS;
297 
298 	ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
299 	if (ret)
300 		iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
301 
302 	return ret;
303 }
304 
kvmppc_rm_tce_iommu_unmap(struct kvm * kvm,struct kvmppc_spapr_tce_table * stt,struct iommu_table * tbl,unsigned long entry)305 static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
306 		struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
307 		unsigned long entry)
308 {
309 	unsigned long i, ret = H_SUCCESS;
310 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
311 	unsigned long io_entry = entry * subpages;
312 
313 	for (i = 0; i < subpages; ++i) {
314 		ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
315 		if (ret != H_SUCCESS)
316 			break;
317 	}
318 
319 	return ret;
320 }
321 
kvmppc_rm_tce_iommu_do_map(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry,unsigned long ua,enum dma_data_direction dir)322 static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
323 		unsigned long entry, unsigned long ua,
324 		enum dma_data_direction dir)
325 {
326 	long ret;
327 	unsigned long hpa = 0;
328 	__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
329 	struct mm_iommu_table_group_mem_t *mem;
330 
331 	if (!pua)
332 		/* it_userspace allocation might be delayed */
333 		return H_TOO_HARD;
334 
335 	mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
336 	if (!mem)
337 		return H_TOO_HARD;
338 
339 	if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
340 			&hpa)))
341 		return H_TOO_HARD;
342 
343 	if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
344 		return H_TOO_HARD;
345 
346 	ret = iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
347 	if (ret) {
348 		mm_iommu_mapped_dec(mem);
349 		/*
350 		 * real mode xchg can fail if struct page crosses
351 		 * a page boundary
352 		 */
353 		return H_TOO_HARD;
354 	}
355 
356 	if (dir != DMA_NONE)
357 		kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
358 
359 	*pua = cpu_to_be64(ua);
360 
361 	return 0;
362 }
363 
kvmppc_rm_tce_iommu_map(struct kvm * kvm,struct kvmppc_spapr_tce_table * stt,struct iommu_table * tbl,unsigned long entry,unsigned long ua,enum dma_data_direction dir)364 static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
365 		struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
366 		unsigned long entry, unsigned long ua,
367 		enum dma_data_direction dir)
368 {
369 	unsigned long i, pgoff, ret = H_SUCCESS;
370 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
371 	unsigned long io_entry = entry * subpages;
372 
373 	for (i = 0, pgoff = 0; i < subpages;
374 			++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
375 
376 		ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
377 				io_entry + i, ua + pgoff, dir);
378 		if (ret != H_SUCCESS)
379 			break;
380 	}
381 
382 	return ret;
383 }
384 
kvmppc_rm_h_put_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce)385 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
386 		unsigned long ioba, unsigned long tce)
387 {
388 	struct kvmppc_spapr_tce_table *stt;
389 	long ret;
390 	struct kvmppc_spapr_tce_iommu_table *stit;
391 	unsigned long entry, ua = 0;
392 	enum dma_data_direction dir;
393 
394 	/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
395 	/* 	    liobn, ioba, tce); */
396 
397 	/* For radix, we might be in virtual mode, so punt */
398 	if (kvm_is_radix(vcpu->kvm))
399 		return H_TOO_HARD;
400 
401 	stt = kvmppc_find_table(vcpu->kvm, liobn);
402 	if (!stt)
403 		return H_TOO_HARD;
404 
405 	ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0);
406 	if (ret != H_SUCCESS)
407 		return ret;
408 
409 	ret = kvmppc_rm_tce_validate(stt, tce);
410 	if (ret != H_SUCCESS)
411 		return ret;
412 
413 	dir = iommu_tce_direction(tce);
414 	if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua))
415 		return H_PARAMETER;
416 
417 	entry = ioba >> stt->page_shift;
418 
419 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
420 		if (dir == DMA_NONE)
421 			ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
422 					stit->tbl, entry);
423 		else
424 			ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
425 					stit->tbl, entry, ua, dir);
426 
427 		iommu_tce_kill_rm(stit->tbl, entry, 1);
428 
429 		if (ret != H_SUCCESS) {
430 			kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
431 			return ret;
432 		}
433 	}
434 
435 	kvmppc_rm_tce_put(stt, entry, tce);
436 
437 	return H_SUCCESS;
438 }
439 
kvmppc_rm_ua_to_hpa(struct kvm_vcpu * vcpu,unsigned long mmu_seq,unsigned long ua,unsigned long * phpa)440 static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
441 				unsigned long ua, unsigned long *phpa)
442 {
443 	pte_t *ptep, pte;
444 	unsigned shift = 0;
445 
446 	/*
447 	 * Called in real mode with MSR_EE = 0. We are safe here.
448 	 * It is ok to do the lookup with arch.pgdir here, because
449 	 * we are doing this on secondary cpus and current task there
450 	 * is not the hypervisor. Also this is safe against THP in the
451 	 * host, because an IPI to primary thread will wait for the secondary
452 	 * to exit which will agains result in the below page table walk
453 	 * to finish.
454 	 */
455 	/* an rmap lock won't make it safe. because that just ensure hash
456 	 * page table entries are removed with rmap lock held. After that
457 	 * mmu notifier returns and we go ahead and removing ptes from Qemu page table.
458 	 */
459 	ptep = find_kvm_host_pte(vcpu->kvm, mmu_seq, ua, &shift);
460 	if (!ptep)
461 		return -ENXIO;
462 
463 	pte = READ_ONCE(*ptep);
464 	if (!pte_present(pte))
465 		return -ENXIO;
466 
467 	if (!shift)
468 		shift = PAGE_SHIFT;
469 
470 	/* Avoid handling anything potentially complicated in realmode */
471 	if (shift > PAGE_SHIFT)
472 		return -EAGAIN;
473 
474 	if (!pte_young(pte))
475 		return -EAGAIN;
476 
477 	*phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
478 			(ua & ~PAGE_MASK);
479 
480 	return 0;
481 }
482 
kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_list,unsigned long npages)483 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
484 		unsigned long liobn, unsigned long ioba,
485 		unsigned long tce_list,	unsigned long npages)
486 {
487 	struct kvm *kvm = vcpu->kvm;
488 	struct kvmppc_spapr_tce_table *stt;
489 	long i, ret = H_SUCCESS;
490 	unsigned long tces, entry, ua = 0;
491 	unsigned long mmu_seq;
492 	bool prereg = false;
493 	struct kvmppc_spapr_tce_iommu_table *stit;
494 
495 	/* For radix, we might be in virtual mode, so punt */
496 	if (kvm_is_radix(vcpu->kvm))
497 		return H_TOO_HARD;
498 
499 	/*
500 	 * used to check for invalidations in progress
501 	 */
502 	mmu_seq = kvm->mmu_notifier_seq;
503 	smp_rmb();
504 
505 	stt = kvmppc_find_table(vcpu->kvm, liobn);
506 	if (!stt)
507 		return H_TOO_HARD;
508 
509 	entry = ioba >> stt->page_shift;
510 	/*
511 	 * The spec says that the maximum size of the list is 512 TCEs
512 	 * so the whole table addressed resides in 4K page
513 	 */
514 	if (npages > 512)
515 		return H_PARAMETER;
516 
517 	if (tce_list & (SZ_4K - 1))
518 		return H_PARAMETER;
519 
520 	ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false);
521 	if (ret != H_SUCCESS)
522 		return ret;
523 
524 	if (mm_iommu_preregistered(vcpu->kvm->mm)) {
525 		/*
526 		 * We get here if guest memory was pre-registered which
527 		 * is normally VFIO case and gpa->hpa translation does not
528 		 * depend on hpt.
529 		 */
530 		struct mm_iommu_table_group_mem_t *mem;
531 
532 		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
533 			return H_TOO_HARD;
534 
535 		mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
536 		if (mem)
537 			prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
538 					IOMMU_PAGE_SHIFT_4K, &tces) == 0;
539 	}
540 
541 	if (!prereg) {
542 		/*
543 		 * This is usually a case of a guest with emulated devices only
544 		 * when TCE list is not in preregistered memory.
545 		 * We do not require memory to be preregistered in this case
546 		 * so lock rmap and do __find_linux_pte_or_hugepte().
547 		 */
548 		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
549 			return H_TOO_HARD;
550 
551 		arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
552 		if (kvmppc_rm_ua_to_hpa(vcpu, mmu_seq, ua, &tces)) {
553 			ret = H_TOO_HARD;
554 			goto unlock_exit;
555 		}
556 	}
557 
558 	for (i = 0; i < npages; ++i) {
559 		unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
560 
561 		ret = kvmppc_rm_tce_validate(stt, tce);
562 		if (ret != H_SUCCESS)
563 			goto unlock_exit;
564 	}
565 
566 	for (i = 0; i < npages; ++i) {
567 		unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
568 
569 		ua = 0;
570 		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
571 			ret = H_PARAMETER;
572 			goto invalidate_exit;
573 		}
574 
575 		list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
576 			ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
577 					stit->tbl, entry + i, ua,
578 					iommu_tce_direction(tce));
579 
580 			if (ret != H_SUCCESS) {
581 				kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
582 						entry);
583 				goto invalidate_exit;
584 			}
585 		}
586 
587 		kvmppc_rm_tce_put(stt, entry + i, tce);
588 	}
589 
590 invalidate_exit:
591 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
592 		iommu_tce_kill_rm(stit->tbl, entry, npages);
593 
594 unlock_exit:
595 	if (!prereg)
596 		arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
597 	return ret;
598 }
599 
kvmppc_rm_h_stuff_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_value,unsigned long npages)600 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
601 		unsigned long liobn, unsigned long ioba,
602 		unsigned long tce_value, unsigned long npages)
603 {
604 	struct kvmppc_spapr_tce_table *stt;
605 	long i, ret;
606 	struct kvmppc_spapr_tce_iommu_table *stit;
607 
608 	/* For radix, we might be in virtual mode, so punt */
609 	if (kvm_is_radix(vcpu->kvm))
610 		return H_TOO_HARD;
611 
612 	stt = kvmppc_find_table(vcpu->kvm, liobn);
613 	if (!stt)
614 		return H_TOO_HARD;
615 
616 	ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0);
617 	if (ret != H_SUCCESS)
618 		return ret;
619 
620 	/* Check permission bits only to allow userspace poison TCE for debug */
621 	if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
622 		return H_PARAMETER;
623 
624 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
625 		unsigned long entry = ioba >> stt->page_shift;
626 
627 		for (i = 0; i < npages; ++i) {
628 			ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
629 					stit->tbl, entry + i);
630 
631 			if (ret == H_SUCCESS)
632 				continue;
633 
634 			if (ret == H_TOO_HARD)
635 				goto invalidate_exit;
636 
637 			WARN_ON_ONCE_RM(1);
638 			kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
639 		}
640 	}
641 
642 	for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
643 		kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
644 
645 invalidate_exit:
646 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
647 		iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
648 
649 	return ret;
650 }
651 
652 /* This can be called in either virtual mode or real mode */
kvmppc_h_get_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba)653 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
654 		      unsigned long ioba)
655 {
656 	struct kvmppc_spapr_tce_table *stt;
657 	long ret;
658 	unsigned long idx;
659 	struct page *page;
660 	u64 *tbl;
661 
662 	stt = kvmppc_find_table(vcpu->kvm, liobn);
663 	if (!stt)
664 		return H_TOO_HARD;
665 
666 	ret = kvmppc_ioba_validate(stt, ioba, 1);
667 	if (ret != H_SUCCESS)
668 		return ret;
669 
670 	idx = (ioba >> stt->page_shift) - stt->offset;
671 	page = stt->pages[idx / TCES_PER_PAGE];
672 	if (!page) {
673 		vcpu->arch.regs.gpr[4] = 0;
674 		return H_SUCCESS;
675 	}
676 	tbl = (u64 *)page_address(page);
677 
678 	vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
679 
680 	return H_SUCCESS;
681 }
682 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
683 
684 #endif /* KVM_BOOK3S_HV_POSSIBLE */
685