1 /*
2 * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License, version 2, as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 */
21
22 #include <linux/kvm_host.h>
23
24 #include <asm/kvm_ppc.h>
25 #include <asm/kvm_book3s.h>
26 #include <asm/mmu-hash64.h>
27 #include <asm/machdep.h>
28 #include <asm/mmu_context.h>
29 #include <asm/hw_irq.h>
30 #include "trace_pr.h"
31 #include "book3s.h"
32
33 #define PTE_SIZE 12
34
kvmppc_mmu_invalidate_pte(struct kvm_vcpu * vcpu,struct hpte_cache * pte)35 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
36 {
37 ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
38 pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M,
39 false);
40 }
41
42 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
43 * a hash, so we don't waste cycles on looping */
kvmppc_sid_hash(struct kvm_vcpu * vcpu,u64 gvsid)44 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
45 {
46 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
47 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
48 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
49 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
50 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
51 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
52 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
53 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
54 }
55
56
find_sid_vsid(struct kvm_vcpu * vcpu,u64 gvsid)57 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
58 {
59 struct kvmppc_sid_map *map;
60 u16 sid_map_mask;
61
62 if (kvmppc_get_msr(vcpu) & MSR_PR)
63 gvsid |= VSID_PR;
64
65 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
66 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
67 if (map->valid && (map->guest_vsid == gvsid)) {
68 trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
69 return map;
70 }
71
72 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
73 if (map->valid && (map->guest_vsid == gvsid)) {
74 trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
75 return map;
76 }
77
78 trace_kvm_book3s_slb_fail(sid_map_mask, gvsid);
79 return NULL;
80 }
81
kvmppc_mmu_map_page(struct kvm_vcpu * vcpu,struct kvmppc_pte * orig_pte,bool iswrite)82 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
83 bool iswrite)
84 {
85 unsigned long vpn;
86 pfn_t hpaddr;
87 ulong hash, hpteg;
88 u64 vsid;
89 int ret;
90 int rflags = 0x192;
91 int vflags = 0;
92 int attempt = 0;
93 struct kvmppc_sid_map *map;
94 int r = 0;
95 int hpsize = MMU_PAGE_4K;
96 bool writable;
97 unsigned long mmu_seq;
98 struct kvm *kvm = vcpu->kvm;
99 struct hpte_cache *cpte;
100 unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
101 unsigned long pfn;
102
103 /* used to check for invalidations in progress */
104 mmu_seq = kvm->mmu_notifier_seq;
105 smp_rmb();
106
107 /* Get host physical address for gpa */
108 pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
109 if (is_error_noslot_pfn(pfn)) {
110 printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
111 orig_pte->raddr);
112 r = -EINVAL;
113 goto out;
114 }
115 hpaddr = pfn << PAGE_SHIFT;
116
117 /* and write the mapping ea -> hpa into the pt */
118 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
119 map = find_sid_vsid(vcpu, vsid);
120 if (!map) {
121 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
122 WARN_ON(ret < 0);
123 map = find_sid_vsid(vcpu, vsid);
124 }
125 if (!map) {
126 printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
127 vsid, orig_pte->eaddr);
128 WARN_ON(true);
129 r = -EINVAL;
130 goto out;
131 }
132
133 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
134
135 kvm_set_pfn_accessed(pfn);
136 if (!orig_pte->may_write || !writable)
137 rflags |= PP_RXRX;
138 else {
139 mark_page_dirty(vcpu->kvm, gfn);
140 kvm_set_pfn_dirty(pfn);
141 }
142
143 if (!orig_pte->may_execute)
144 rflags |= HPTE_R_N;
145 else
146 kvmppc_mmu_flush_icache(pfn);
147
148 /*
149 * Use 64K pages if possible; otherwise, on 64K page kernels,
150 * we need to transfer 4 more bits from guest real to host real addr.
151 */
152 if (vsid & VSID_64K)
153 hpsize = MMU_PAGE_64K;
154 else
155 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
156
157 hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
158
159 cpte = kvmppc_mmu_hpte_cache_next(vcpu);
160
161 spin_lock(&kvm->mmu_lock);
162 if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
163 r = -EAGAIN;
164 goto out_unlock;
165 }
166
167 map_again:
168 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
169
170 /* In case we tried normal mapping already, let's nuke old entries */
171 if (attempt > 1)
172 if (ppc_md.hpte_remove(hpteg) < 0) {
173 r = -1;
174 goto out_unlock;
175 }
176
177 ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
178 hpsize, hpsize, MMU_SEGSIZE_256M);
179
180 if (ret == -1) {
181 /* If we couldn't map a primary PTE, try a secondary */
182 hash = ~hash;
183 vflags ^= HPTE_V_SECONDARY;
184 attempt++;
185 goto map_again;
186 } else if (ret < 0) {
187 r = -EIO;
188 goto out_unlock;
189 } else {
190 trace_kvm_book3s_64_mmu_map(rflags, hpteg,
191 vpn, hpaddr, orig_pte);
192
193 /* The ppc_md code may give us a secondary entry even though we
194 asked for a primary. Fix up. */
195 if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
196 hash = ~hash;
197 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
198 }
199
200 cpte->slot = hpteg + (ret & 7);
201 cpte->host_vpn = vpn;
202 cpte->pte = *orig_pte;
203 cpte->pfn = pfn;
204 cpte->pagesize = hpsize;
205
206 kvmppc_mmu_hpte_cache_map(vcpu, cpte);
207 cpte = NULL;
208 }
209
210 out_unlock:
211 spin_unlock(&kvm->mmu_lock);
212 kvm_release_pfn_clean(pfn);
213 if (cpte)
214 kvmppc_mmu_hpte_cache_free(cpte);
215
216 out:
217 return r;
218 }
219
kvmppc_mmu_unmap_page(struct kvm_vcpu * vcpu,struct kvmppc_pte * pte)220 void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
221 {
222 u64 mask = 0xfffffffffULL;
223 u64 vsid;
224
225 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
226 if (vsid & VSID_64K)
227 mask = 0xffffffff0ULL;
228 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
229 }
230
create_sid_map(struct kvm_vcpu * vcpu,u64 gvsid)231 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
232 {
233 struct kvmppc_sid_map *map;
234 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
235 u16 sid_map_mask;
236 static int backwards_map = 0;
237
238 if (kvmppc_get_msr(vcpu) & MSR_PR)
239 gvsid |= VSID_PR;
240
241 /* We might get collisions that trap in preceding order, so let's
242 map them differently */
243
244 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
245 if (backwards_map)
246 sid_map_mask = SID_MAP_MASK - sid_map_mask;
247
248 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
249
250 /* Make sure we're taking the other map next time */
251 backwards_map = !backwards_map;
252
253 /* Uh-oh ... out of mappings. Let's flush! */
254 if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
255 vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
256 memset(vcpu_book3s->sid_map, 0,
257 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
258 kvmppc_mmu_pte_flush(vcpu, 0, 0);
259 kvmppc_mmu_flush_segments(vcpu);
260 }
261 map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M);
262
263 map->guest_vsid = gvsid;
264 map->valid = true;
265
266 trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid);
267
268 return map;
269 }
270
kvmppc_mmu_next_segment(struct kvm_vcpu * vcpu,ulong esid)271 static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
272 {
273 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
274 int i;
275 int max_slb_size = 64;
276 int found_inval = -1;
277 int r;
278
279 /* Are we overwriting? */
280 for (i = 0; i < svcpu->slb_max; i++) {
281 if (!(svcpu->slb[i].esid & SLB_ESID_V))
282 found_inval = i;
283 else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
284 r = i;
285 goto out;
286 }
287 }
288
289 /* Found a spare entry that was invalidated before */
290 if (found_inval >= 0) {
291 r = found_inval;
292 goto out;
293 }
294
295 /* No spare invalid entry, so create one */
296
297 if (mmu_slb_size < 64)
298 max_slb_size = mmu_slb_size;
299
300 /* Overflowing -> purge */
301 if ((svcpu->slb_max) == max_slb_size)
302 kvmppc_mmu_flush_segments(vcpu);
303
304 r = svcpu->slb_max;
305 svcpu->slb_max++;
306
307 out:
308 svcpu_put(svcpu);
309 return r;
310 }
311
kvmppc_mmu_map_segment(struct kvm_vcpu * vcpu,ulong eaddr)312 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
313 {
314 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
315 u64 esid = eaddr >> SID_SHIFT;
316 u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
317 u64 slb_vsid = SLB_VSID_USER;
318 u64 gvsid;
319 int slb_index;
320 struct kvmppc_sid_map *map;
321 int r = 0;
322
323 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
324
325 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
326 /* Invalidate an entry */
327 svcpu->slb[slb_index].esid = 0;
328 r = -ENOENT;
329 goto out;
330 }
331
332 map = find_sid_vsid(vcpu, gvsid);
333 if (!map)
334 map = create_sid_map(vcpu, gvsid);
335
336 map->guest_esid = esid;
337
338 slb_vsid |= (map->host_vsid << 12);
339 slb_vsid &= ~SLB_VSID_KP;
340 slb_esid |= slb_index;
341
342 #ifdef CONFIG_PPC_64K_PAGES
343 /* Set host segment base page size to 64K if possible */
344 if (gvsid & VSID_64K)
345 slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
346 #endif
347
348 svcpu->slb[slb_index].esid = slb_esid;
349 svcpu->slb[slb_index].vsid = slb_vsid;
350
351 trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
352
353 out:
354 svcpu_put(svcpu);
355 return r;
356 }
357
kvmppc_mmu_flush_segment(struct kvm_vcpu * vcpu,ulong ea,ulong seg_size)358 void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)
359 {
360 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
361 ulong seg_mask = -seg_size;
362 int i;
363
364 for (i = 0; i < svcpu->slb_max; i++) {
365 if ((svcpu->slb[i].esid & SLB_ESID_V) &&
366 (svcpu->slb[i].esid & seg_mask) == ea) {
367 /* Invalidate this entry */
368 svcpu->slb[i].esid = 0;
369 }
370 }
371
372 svcpu_put(svcpu);
373 }
374
kvmppc_mmu_flush_segments(struct kvm_vcpu * vcpu)375 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
376 {
377 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
378 svcpu->slb_max = 0;
379 svcpu->slb[0].esid = 0;
380 svcpu_put(svcpu);
381 }
382
kvmppc_mmu_destroy_pr(struct kvm_vcpu * vcpu)383 void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
384 {
385 kvmppc_mmu_hpte_destroy(vcpu);
386 __destroy_context(to_book3s(vcpu)->context_id[0]);
387 }
388
kvmppc_mmu_init(struct kvm_vcpu * vcpu)389 int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
390 {
391 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
392 int err;
393
394 err = __init_new_context();
395 if (err < 0)
396 return -1;
397 vcpu3s->context_id[0] = err;
398
399 vcpu3s->proto_vsid_max = ((u64)(vcpu3s->context_id[0] + 1)
400 << ESID_BITS) - 1;
401 vcpu3s->proto_vsid_first = (u64)vcpu3s->context_id[0] << ESID_BITS;
402 vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
403
404 kvmppc_mmu_hpte_init(vcpu);
405
406 return 0;
407 }
408