• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17  * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
18  */
19 
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/hugetlb.h>
28 #include <linux/list.h>
29 
30 #include <asm/tlbflush.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/kvm_book3s.h>
33 #include <asm/book3s/64/mmu-hash.h>
34 #include <asm/mmu_context.h>
35 #include <asm/hvcall.h>
36 #include <asm/synch.h>
37 #include <asm/ppc-opcode.h>
38 #include <asm/kvm_host.h>
39 #include <asm/udbg.h>
40 #include <asm/iommu.h>
41 #include <asm/tce.h>
42 #include <asm/iommu.h>
43 
44 #define TCES_PER_PAGE	(PAGE_SIZE / sizeof(u64))
45 
46 /*
47  * Finds a TCE table descriptor by LIOBN.
48  *
49  * WARNING: This will be called in real or virtual mode on HV KVM and virtual
50  *          mode on PR KVM
51  */
kvmppc_find_table(struct kvm_vcpu * vcpu,unsigned long liobn)52 struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu,
53 		unsigned long liobn)
54 {
55 	struct kvm *kvm = vcpu->kvm;
56 	struct kvmppc_spapr_tce_table *stt;
57 
58 	list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
59 		if (stt->liobn == liobn)
60 			return stt;
61 
62 	return NULL;
63 }
64 EXPORT_SYMBOL_GPL(kvmppc_find_table);
65 
66 /*
67  * Validates IO address.
68  *
69  * WARNING: This will be called in real-mode on HV KVM and virtual
70  *          mode on PR KVM
71  */
kvmppc_ioba_validate(struct kvmppc_spapr_tce_table * stt,unsigned long ioba,unsigned long npages)72 long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
73 		unsigned long ioba, unsigned long npages)
74 {
75 	unsigned long mask = (1ULL << stt->page_shift) - 1;
76 	unsigned long idx = ioba >> stt->page_shift;
77 
78 	if ((ioba & mask) || (idx < stt->offset) ||
79 			(idx - stt->offset + npages > stt->size) ||
80 			(idx + npages < idx))
81 		return H_PARAMETER;
82 
83 	return H_SUCCESS;
84 }
85 EXPORT_SYMBOL_GPL(kvmppc_ioba_validate);
86 
87 /*
88  * Validates TCE address.
89  * At the moment flags and page mask are validated.
90  * As the host kernel does not access those addresses (just puts them
91  * to the table and user space is supposed to process them), we can skip
92  * checking other things (such as TCE is a guest RAM address or the page
93  * was actually allocated).
94  *
95  * WARNING: This will be called in real-mode on HV KVM and virtual
96  *          mode on PR KVM
97  */
kvmppc_tce_validate(struct kvmppc_spapr_tce_table * stt,unsigned long tce)98 long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
99 {
100 	unsigned long page_mask = ~((1ULL << stt->page_shift) - 1);
101 	unsigned long mask = ~(page_mask | TCE_PCI_WRITE | TCE_PCI_READ);
102 
103 	if (tce & mask)
104 		return H_PARAMETER;
105 
106 	return H_SUCCESS;
107 }
108 EXPORT_SYMBOL_GPL(kvmppc_tce_validate);
109 
110 /* Note on the use of page_address() in real mode,
111  *
112  * It is safe to use page_address() in real mode on ppc64 because
113  * page_address() is always defined as lowmem_page_address()
114  * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
115  * operation and does not access page struct.
116  *
117  * Theoretically page_address() could be defined different
118  * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
119  * would have to be enabled.
120  * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
121  * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
122  * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
123  * is not expected to be enabled on ppc32, page_address()
124  * is safe for ppc32 as well.
125  *
126  * WARNING: This will be called in real-mode on HV KVM and virtual
127  *          mode on PR KVM
128  */
kvmppc_page_address(struct page * page)129 static u64 *kvmppc_page_address(struct page *page)
130 {
131 #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
132 #error TODO: fix to avoid page_address() here
133 #endif
134 	return (u64 *) page_address(page);
135 }
136 
137 /*
138  * Handles TCE requests for emulated devices.
139  * Puts guest TCE values to the table and expects user space to convert them.
140  * Called in both real and virtual modes.
141  * Cannot fail so kvmppc_tce_validate must be called before it.
142  *
143  * WARNING: This will be called in real-mode on HV KVM and virtual
144  *          mode on PR KVM
145  */
kvmppc_tce_put(struct kvmppc_spapr_tce_table * stt,unsigned long idx,unsigned long tce)146 void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
147 		unsigned long idx, unsigned long tce)
148 {
149 	struct page *page;
150 	u64 *tbl;
151 
152 	idx -= stt->offset;
153 	page = stt->pages[idx / TCES_PER_PAGE];
154 	tbl = kvmppc_page_address(page);
155 
156 	tbl[idx % TCES_PER_PAGE] = tce;
157 }
158 EXPORT_SYMBOL_GPL(kvmppc_tce_put);
159 
kvmppc_gpa_to_ua(struct kvm * kvm,unsigned long gpa,unsigned long * ua,unsigned long ** prmap)160 long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
161 		unsigned long *ua, unsigned long **prmap)
162 {
163 	unsigned long gfn = gpa >> PAGE_SHIFT;
164 	struct kvm_memory_slot *memslot;
165 
166 	memslot = search_memslots(kvm_memslots(kvm), gfn);
167 	if (!memslot)
168 		return -EINVAL;
169 
170 	*ua = __gfn_to_hva_memslot(memslot, gfn) |
171 		(gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
172 
173 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
174 	if (prmap)
175 		*prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
176 #endif
177 
178 	return 0;
179 }
180 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
181 
182 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
kvmppc_rm_h_put_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce)183 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
184 		unsigned long ioba, unsigned long tce)
185 {
186 	struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
187 	long ret;
188 
189 	/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
190 	/* 	    liobn, ioba, tce); */
191 
192 	if (!stt)
193 		return H_TOO_HARD;
194 
195 	ret = kvmppc_ioba_validate(stt, ioba, 1);
196 	if (ret != H_SUCCESS)
197 		return ret;
198 
199 	ret = kvmppc_tce_validate(stt, tce);
200 	if (ret != H_SUCCESS)
201 		return ret;
202 
203 	kvmppc_tce_put(stt, ioba >> stt->page_shift, tce);
204 
205 	return H_SUCCESS;
206 }
207 
kvmppc_rm_ua_to_hpa(struct kvm_vcpu * vcpu,unsigned long ua,unsigned long * phpa)208 static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
209 		unsigned long ua, unsigned long *phpa)
210 {
211 	pte_t *ptep, pte;
212 	unsigned shift = 0;
213 
214 	ptep = __find_linux_pte_or_hugepte(vcpu->arch.pgdir, ua, NULL, &shift);
215 	if (!ptep || !pte_present(*ptep))
216 		return -ENXIO;
217 	pte = *ptep;
218 
219 	if (!shift)
220 		shift = PAGE_SHIFT;
221 
222 	/* Avoid handling anything potentially complicated in realmode */
223 	if (shift > PAGE_SHIFT)
224 		return -EAGAIN;
225 
226 	if (!pte_young(pte))
227 		return -EAGAIN;
228 
229 	*phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
230 			(ua & ~PAGE_MASK);
231 
232 	return 0;
233 }
234 
kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_list,unsigned long npages)235 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
236 		unsigned long liobn, unsigned long ioba,
237 		unsigned long tce_list,	unsigned long npages)
238 {
239 	struct kvmppc_spapr_tce_table *stt;
240 	long i, ret = H_SUCCESS;
241 	unsigned long tces, entry, ua = 0;
242 	unsigned long *rmap = NULL;
243 
244 	stt = kvmppc_find_table(vcpu, liobn);
245 	if (!stt)
246 		return H_TOO_HARD;
247 
248 	entry = ioba >> stt->page_shift;
249 	/*
250 	 * The spec says that the maximum size of the list is 512 TCEs
251 	 * so the whole table addressed resides in 4K page
252 	 */
253 	if (npages > 512)
254 		return H_PARAMETER;
255 
256 	if (tce_list & (SZ_4K - 1))
257 		return H_PARAMETER;
258 
259 	ret = kvmppc_ioba_validate(stt, ioba, npages);
260 	if (ret != H_SUCCESS)
261 		return ret;
262 
263 	if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
264 		return H_TOO_HARD;
265 
266 	rmap = (void *) vmalloc_to_phys(rmap);
267 
268 	/*
269 	 * Synchronize with the MMU notifier callbacks in
270 	 * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
271 	 * While we have the rmap lock, code running on other CPUs
272 	 * cannot finish unmapping the host real page that backs
273 	 * this guest real page, so we are OK to access the host
274 	 * real page.
275 	 */
276 	lock_rmap(rmap);
277 	if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
278 		ret = H_TOO_HARD;
279 		goto unlock_exit;
280 	}
281 
282 	for (i = 0; i < npages; ++i) {
283 		unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
284 
285 		ret = kvmppc_tce_validate(stt, tce);
286 		if (ret != H_SUCCESS)
287 			goto unlock_exit;
288 
289 		kvmppc_tce_put(stt, entry + i, tce);
290 	}
291 
292 unlock_exit:
293 	unlock_rmap(rmap);
294 
295 	return ret;
296 }
297 
kvmppc_rm_h_stuff_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_value,unsigned long npages)298 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
299 		unsigned long liobn, unsigned long ioba,
300 		unsigned long tce_value, unsigned long npages)
301 {
302 	struct kvmppc_spapr_tce_table *stt;
303 	long i, ret;
304 
305 	stt = kvmppc_find_table(vcpu, liobn);
306 	if (!stt)
307 		return H_TOO_HARD;
308 
309 	ret = kvmppc_ioba_validate(stt, ioba, npages);
310 	if (ret != H_SUCCESS)
311 		return ret;
312 
313 	/* Check permission bits only to allow userspace poison TCE for debug */
314 	if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
315 		return H_PARAMETER;
316 
317 	for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
318 		kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
319 
320 	return H_SUCCESS;
321 }
322 
kvmppc_h_get_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba)323 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
324 		      unsigned long ioba)
325 {
326 	struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
327 	long ret;
328 	unsigned long idx;
329 	struct page *page;
330 	u64 *tbl;
331 
332 	if (!stt)
333 		return H_TOO_HARD;
334 
335 	ret = kvmppc_ioba_validate(stt, ioba, 1);
336 	if (ret != H_SUCCESS)
337 		return ret;
338 
339 	idx = (ioba >> stt->page_shift) - stt->offset;
340 	page = stt->pages[idx / TCES_PER_PAGE];
341 	tbl = (u64 *)page_address(page);
342 
343 	vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
344 
345 	return H_SUCCESS;
346 }
347 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
348 
349 #endif /* KVM_BOOK3S_HV_POSSIBLE */
350