1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
18 */
19
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/hugetlb.h>
28 #include <linux/list.h>
29
30 #include <asm/tlbflush.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/kvm_book3s.h>
33 #include <asm/book3s/64/mmu-hash.h>
34 #include <asm/mmu_context.h>
35 #include <asm/hvcall.h>
36 #include <asm/synch.h>
37 #include <asm/ppc-opcode.h>
38 #include <asm/kvm_host.h>
39 #include <asm/udbg.h>
40 #include <asm/iommu.h>
41 #include <asm/tce.h>
42 #include <asm/pte-walk.h>
43
44 #ifdef CONFIG_BUG
45
46 #define WARN_ON_ONCE_RM(condition) ({ \
47 static bool __section(.data.unlikely) __warned; \
48 int __ret_warn_once = !!(condition); \
49 \
50 if (unlikely(__ret_warn_once && !__warned)) { \
51 __warned = true; \
52 pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
53 __stringify(condition), \
54 __func__, __LINE__); \
55 dump_stack(); \
56 } \
57 unlikely(__ret_warn_once); \
58 })
59
60 #else
61
62 #define WARN_ON_ONCE_RM(condition) ({ \
63 int __ret_warn_on = !!(condition); \
64 unlikely(__ret_warn_on); \
65 })
66
67 #endif
68
69 #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
70
71 /*
72 * Finds a TCE table descriptor by LIOBN.
73 *
74 * WARNING: This will be called in real or virtual mode on HV KVM and virtual
75 * mode on PR KVM
76 */
kvmppc_find_table(struct kvm * kvm,unsigned long liobn)77 struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
78 unsigned long liobn)
79 {
80 struct kvmppc_spapr_tce_table *stt;
81
82 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
83 if (stt->liobn == liobn)
84 return stt;
85
86 return NULL;
87 }
88 EXPORT_SYMBOL_GPL(kvmppc_find_table);
89
90 /*
91 * Validates TCE address.
92 * At the moment flags and page mask are validated.
93 * As the host kernel does not access those addresses (just puts them
94 * to the table and user space is supposed to process them), we can skip
95 * checking other things (such as TCE is a guest RAM address or the page
96 * was actually allocated).
97 *
98 * WARNING: This will be called in real-mode on HV KVM and virtual
99 * mode on PR KVM
100 */
kvmppc_tce_validate(struct kvmppc_spapr_tce_table * stt,unsigned long tce)101 long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
102 {
103 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
104 enum dma_data_direction dir = iommu_tce_direction(tce);
105
106 /* Allow userspace to poison TCE table */
107 if (dir == DMA_NONE)
108 return H_SUCCESS;
109
110 if (iommu_tce_check_gpa(stt->page_shift, gpa))
111 return H_PARAMETER;
112
113 return H_SUCCESS;
114 }
115 EXPORT_SYMBOL_GPL(kvmppc_tce_validate);
116
117 /* Note on the use of page_address() in real mode,
118 *
119 * It is safe to use page_address() in real mode on ppc64 because
120 * page_address() is always defined as lowmem_page_address()
121 * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
122 * operation and does not access page struct.
123 *
124 * Theoretically page_address() could be defined different
125 * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
126 * would have to be enabled.
127 * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
128 * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
129 * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
130 * is not expected to be enabled on ppc32, page_address()
131 * is safe for ppc32 as well.
132 *
133 * WARNING: This will be called in real-mode on HV KVM and virtual
134 * mode on PR KVM
135 */
kvmppc_page_address(struct page * page)136 static u64 *kvmppc_page_address(struct page *page)
137 {
138 #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
139 #error TODO: fix to avoid page_address() here
140 #endif
141 return (u64 *) page_address(page);
142 }
143
144 /*
145 * Handles TCE requests for emulated devices.
146 * Puts guest TCE values to the table and expects user space to convert them.
147 * Called in both real and virtual modes.
148 * Cannot fail so kvmppc_tce_validate must be called before it.
149 *
150 * WARNING: This will be called in real-mode on HV KVM and virtual
151 * mode on PR KVM
152 */
kvmppc_tce_put(struct kvmppc_spapr_tce_table * stt,unsigned long idx,unsigned long tce)153 void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
154 unsigned long idx, unsigned long tce)
155 {
156 struct page *page;
157 u64 *tbl;
158
159 idx -= stt->offset;
160 page = stt->pages[idx / TCES_PER_PAGE];
161 tbl = kvmppc_page_address(page);
162
163 tbl[idx % TCES_PER_PAGE] = tce;
164 }
165 EXPORT_SYMBOL_GPL(kvmppc_tce_put);
166
kvmppc_gpa_to_ua(struct kvm * kvm,unsigned long gpa,unsigned long * ua,unsigned long ** prmap)167 long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
168 unsigned long *ua, unsigned long **prmap)
169 {
170 unsigned long gfn = gpa >> PAGE_SHIFT;
171 struct kvm_memory_slot *memslot;
172
173 memslot = search_memslots(kvm_memslots(kvm), gfn);
174 if (!memslot)
175 return -EINVAL;
176
177 *ua = __gfn_to_hva_memslot(memslot, gfn) |
178 (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
179
180 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
181 if (prmap)
182 *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
183 #endif
184
185 return 0;
186 }
187 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
188
189 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
kvmppc_rm_clear_tce(struct iommu_table * tbl,unsigned long entry)190 static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry)
191 {
192 unsigned long hpa = 0;
193 enum dma_data_direction dir = DMA_NONE;
194
195 iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
196 }
197
kvmppc_rm_tce_iommu_mapped_dec(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)198 static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
199 struct iommu_table *tbl, unsigned long entry)
200 {
201 struct mm_iommu_table_group_mem_t *mem = NULL;
202 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
203 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
204
205 if (!pua)
206 /* it_userspace allocation might be delayed */
207 return H_TOO_HARD;
208
209 pua = (void *) vmalloc_to_phys(pua);
210 if (WARN_ON_ONCE_RM(!pua))
211 return H_HARDWARE;
212
213 mem = mm_iommu_lookup_rm(kvm->mm, *pua, pgsize);
214 if (!mem)
215 return H_TOO_HARD;
216
217 mm_iommu_mapped_dec(mem);
218
219 *pua = 0;
220
221 return H_SUCCESS;
222 }
223
kvmppc_rm_tce_iommu_unmap(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)224 static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
225 struct iommu_table *tbl, unsigned long entry)
226 {
227 enum dma_data_direction dir = DMA_NONE;
228 unsigned long hpa = 0;
229 long ret;
230
231 if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir))
232 /*
233 * real mode xchg can fail if struct page crosses
234 * a page boundary
235 */
236 return H_TOO_HARD;
237
238 if (dir == DMA_NONE)
239 return H_SUCCESS;
240
241 ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
242 if (ret)
243 iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
244
245 return ret;
246 }
247
kvmppc_rm_tce_iommu_map(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry,unsigned long ua,enum dma_data_direction dir)248 static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
249 unsigned long entry, unsigned long ua,
250 enum dma_data_direction dir)
251 {
252 long ret;
253 unsigned long hpa = 0;
254 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
255 struct mm_iommu_table_group_mem_t *mem;
256
257 if (!pua)
258 /* it_userspace allocation might be delayed */
259 return H_TOO_HARD;
260
261 mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
262 if (!mem)
263 return H_TOO_HARD;
264
265 if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
266 &hpa)))
267 return H_TOO_HARD;
268
269 pua = (void *) vmalloc_to_phys(pua);
270 if (WARN_ON_ONCE_RM(!pua))
271 return H_HARDWARE;
272
273 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
274 return H_TOO_HARD;
275
276 ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
277 if (ret) {
278 mm_iommu_mapped_dec(mem);
279 /*
280 * real mode xchg can fail if struct page crosses
281 * a page boundary
282 */
283 return H_TOO_HARD;
284 }
285
286 if (dir != DMA_NONE)
287 kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
288
289 *pua = ua;
290
291 return 0;
292 }
293
kvmppc_rm_h_put_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce)294 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
295 unsigned long ioba, unsigned long tce)
296 {
297 struct kvmppc_spapr_tce_table *stt;
298 long ret;
299 struct kvmppc_spapr_tce_iommu_table *stit;
300 unsigned long entry, ua = 0;
301 enum dma_data_direction dir;
302
303 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
304 /* liobn, ioba, tce); */
305
306 /* For radix, we might be in virtual mode, so punt */
307 if (kvm_is_radix(vcpu->kvm))
308 return H_TOO_HARD;
309
310 stt = kvmppc_find_table(vcpu->kvm, liobn);
311 if (!stt)
312 return H_TOO_HARD;
313
314 ret = kvmppc_ioba_validate(stt, ioba, 1);
315 if (ret != H_SUCCESS)
316 return ret;
317
318 ret = kvmppc_tce_validate(stt, tce);
319 if (ret != H_SUCCESS)
320 return ret;
321
322 dir = iommu_tce_direction(tce);
323 if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
324 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
325 return H_PARAMETER;
326
327 entry = ioba >> stt->page_shift;
328
329 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
330 if (dir == DMA_NONE)
331 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
332 stit->tbl, entry);
333 else
334 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
335 stit->tbl, entry, ua, dir);
336
337 if (ret == H_SUCCESS)
338 continue;
339
340 if (ret == H_TOO_HARD)
341 return ret;
342
343 WARN_ON_ONCE_RM(1);
344 kvmppc_rm_clear_tce(stit->tbl, entry);
345 }
346
347 kvmppc_tce_put(stt, entry, tce);
348
349 return H_SUCCESS;
350 }
351
kvmppc_rm_ua_to_hpa(struct kvm_vcpu * vcpu,unsigned long ua,unsigned long * phpa)352 static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
353 unsigned long ua, unsigned long *phpa)
354 {
355 pte_t *ptep, pte;
356 unsigned shift = 0;
357
358 /*
359 * Called in real mode with MSR_EE = 0. We are safe here.
360 * It is ok to do the lookup with arch.pgdir here, because
361 * we are doing this on secondary cpus and current task there
362 * is not the hypervisor. Also this is safe against THP in the
363 * host, because an IPI to primary thread will wait for the secondary
364 * to exit which will agains result in the below page table walk
365 * to finish.
366 */
367 ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
368 if (!ptep || !pte_present(*ptep))
369 return -ENXIO;
370 pte = *ptep;
371
372 if (!shift)
373 shift = PAGE_SHIFT;
374
375 /* Avoid handling anything potentially complicated in realmode */
376 if (shift > PAGE_SHIFT)
377 return -EAGAIN;
378
379 if (!pte_young(pte))
380 return -EAGAIN;
381
382 *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
383 (ua & ~PAGE_MASK);
384
385 return 0;
386 }
387
kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_list,unsigned long npages)388 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
389 unsigned long liobn, unsigned long ioba,
390 unsigned long tce_list, unsigned long npages)
391 {
392 struct kvmppc_spapr_tce_table *stt;
393 long i, ret = H_SUCCESS;
394 unsigned long tces, entry, ua = 0;
395 unsigned long *rmap = NULL;
396 bool prereg = false;
397 struct kvmppc_spapr_tce_iommu_table *stit;
398
399 /* For radix, we might be in virtual mode, so punt */
400 if (kvm_is_radix(vcpu->kvm))
401 return H_TOO_HARD;
402
403 stt = kvmppc_find_table(vcpu->kvm, liobn);
404 if (!stt)
405 return H_TOO_HARD;
406
407 entry = ioba >> stt->page_shift;
408 /*
409 * The spec says that the maximum size of the list is 512 TCEs
410 * so the whole table addressed resides in 4K page
411 */
412 if (npages > 512)
413 return H_PARAMETER;
414
415 if (tce_list & (SZ_4K - 1))
416 return H_PARAMETER;
417
418 ret = kvmppc_ioba_validate(stt, ioba, npages);
419 if (ret != H_SUCCESS)
420 return ret;
421
422 if (mm_iommu_preregistered(vcpu->kvm->mm)) {
423 /*
424 * We get here if guest memory was pre-registered which
425 * is normally VFIO case and gpa->hpa translation does not
426 * depend on hpt.
427 */
428 struct mm_iommu_table_group_mem_t *mem;
429
430 if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
431 return H_TOO_HARD;
432
433 mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
434 if (mem)
435 prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
436 IOMMU_PAGE_SHIFT_4K, &tces) == 0;
437 }
438
439 if (!prereg) {
440 /*
441 * This is usually a case of a guest with emulated devices only
442 * when TCE list is not in preregistered memory.
443 * We do not require memory to be preregistered in this case
444 * so lock rmap and do __find_linux_pte_or_hugepte().
445 */
446 if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
447 return H_TOO_HARD;
448
449 rmap = (void *) vmalloc_to_phys(rmap);
450 if (WARN_ON_ONCE_RM(!rmap))
451 return H_TOO_HARD;
452
453 /*
454 * Synchronize with the MMU notifier callbacks in
455 * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
456 * While we have the rmap lock, code running on other CPUs
457 * cannot finish unmapping the host real page that backs
458 * this guest real page, so we are OK to access the host
459 * real page.
460 */
461 lock_rmap(rmap);
462 if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
463 ret = H_TOO_HARD;
464 goto unlock_exit;
465 }
466 }
467
468 for (i = 0; i < npages; ++i) {
469 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
470
471 ret = kvmppc_tce_validate(stt, tce);
472 if (ret != H_SUCCESS)
473 goto unlock_exit;
474
475 ua = 0;
476 if (kvmppc_gpa_to_ua(vcpu->kvm,
477 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
478 &ua, NULL)) {
479 ret = H_PARAMETER;
480 goto unlock_exit;
481 }
482
483 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
484 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
485 stit->tbl, entry + i, ua,
486 iommu_tce_direction(tce));
487
488 if (ret == H_SUCCESS)
489 continue;
490
491 if (ret == H_TOO_HARD)
492 goto unlock_exit;
493
494 WARN_ON_ONCE_RM(1);
495 kvmppc_rm_clear_tce(stit->tbl, entry);
496 }
497
498 kvmppc_tce_put(stt, entry + i, tce);
499 }
500
501 unlock_exit:
502 if (rmap)
503 unlock_rmap(rmap);
504
505 return ret;
506 }
507
kvmppc_rm_h_stuff_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_value,unsigned long npages)508 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
509 unsigned long liobn, unsigned long ioba,
510 unsigned long tce_value, unsigned long npages)
511 {
512 struct kvmppc_spapr_tce_table *stt;
513 long i, ret;
514 struct kvmppc_spapr_tce_iommu_table *stit;
515
516 /* For radix, we might be in virtual mode, so punt */
517 if (kvm_is_radix(vcpu->kvm))
518 return H_TOO_HARD;
519
520 stt = kvmppc_find_table(vcpu->kvm, liobn);
521 if (!stt)
522 return H_TOO_HARD;
523
524 ret = kvmppc_ioba_validate(stt, ioba, npages);
525 if (ret != H_SUCCESS)
526 return ret;
527
528 /* Check permission bits only to allow userspace poison TCE for debug */
529 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
530 return H_PARAMETER;
531
532 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
533 unsigned long entry = ioba >> stit->tbl->it_page_shift;
534
535 for (i = 0; i < npages; ++i) {
536 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
537 stit->tbl, entry + i);
538
539 if (ret == H_SUCCESS)
540 continue;
541
542 if (ret == H_TOO_HARD)
543 return ret;
544
545 WARN_ON_ONCE_RM(1);
546 kvmppc_rm_clear_tce(stit->tbl, entry);
547 }
548 }
549
550 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
551 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
552
553 return H_SUCCESS;
554 }
555
556 /* This can be called in either virtual mode or real mode */
kvmppc_h_get_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba)557 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
558 unsigned long ioba)
559 {
560 struct kvmppc_spapr_tce_table *stt;
561 long ret;
562 unsigned long idx;
563 struct page *page;
564 u64 *tbl;
565
566 stt = kvmppc_find_table(vcpu->kvm, liobn);
567 if (!stt)
568 return H_TOO_HARD;
569
570 ret = kvmppc_ioba_validate(stt, ioba, 1);
571 if (ret != H_SUCCESS)
572 return ret;
573
574 idx = (ioba >> stt->page_shift) - stt->offset;
575 page = stt->pages[idx / TCES_PER_PAGE];
576 tbl = (u64 *)page_address(page);
577
578 vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
579
580 return H_SUCCESS;
581 }
582 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
583
584 #endif /* KVM_BOOK3S_HV_POSSIBLE */
585