• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corporation, 2018
4  * Authors Suraj Jitindar Singh <sjitindarsingh@gmail.com>
5  *	   Paul Mackerras <paulus@ozlabs.org>
6  *
7  * Description: KVM functions specific to running nested KVM-HV guests
8  * on Book3S processors (specifically POWER9 and later).
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/kvm_host.h>
13 #include <linux/llist.h>
14 #include <linux/pgtable.h>
15 
16 #include <asm/kvm_ppc.h>
17 #include <asm/kvm_book3s.h>
18 #include <asm/mmu.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pte-walk.h>
21 #include <asm/reg.h>
22 #include <asm/plpar_wrappers.h>
23 #include <asm/firmware.h>
24 
25 static struct patb_entry *pseries_partition_tb;
26 
27 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp);
28 static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free);
29 
kvmhv_save_hv_regs(struct kvm_vcpu * vcpu,struct hv_guest_state * hr)30 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
31 {
32 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
33 
34 	hr->pcr = vc->pcr | PCR_MASK;
35 	hr->dpdes = vcpu->arch.doorbell_request;
36 	hr->hfscr = vcpu->arch.hfscr;
37 	hr->tb_offset = vc->tb_offset;
38 	hr->dawr0 = vcpu->arch.dawr0;
39 	hr->dawrx0 = vcpu->arch.dawrx0;
40 	hr->ciabr = vcpu->arch.ciabr;
41 	hr->purr = vcpu->arch.purr;
42 	hr->spurr = vcpu->arch.spurr;
43 	hr->ic = vcpu->arch.ic;
44 	hr->vtb = vc->vtb;
45 	hr->srr0 = vcpu->arch.shregs.srr0;
46 	hr->srr1 = vcpu->arch.shregs.srr1;
47 	hr->sprg[0] = vcpu->arch.shregs.sprg0;
48 	hr->sprg[1] = vcpu->arch.shregs.sprg1;
49 	hr->sprg[2] = vcpu->arch.shregs.sprg2;
50 	hr->sprg[3] = vcpu->arch.shregs.sprg3;
51 	hr->pidr = vcpu->arch.pid;
52 	hr->cfar = vcpu->arch.cfar;
53 	hr->ppr = vcpu->arch.ppr;
54 	hr->dawr1 = vcpu->arch.dawr1;
55 	hr->dawrx1 = vcpu->arch.dawrx1;
56 }
57 
58 /* Use noinline_for_stack due to https://llvm.org/pr49610 */
byteswap_pt_regs(struct pt_regs * regs)59 static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
60 {
61 	unsigned long *addr = (unsigned long *) regs;
62 
63 	for (; addr < ((unsigned long *) (regs + 1)); addr++)
64 		*addr = swab64(*addr);
65 }
66 
byteswap_hv_regs(struct hv_guest_state * hr)67 static void byteswap_hv_regs(struct hv_guest_state *hr)
68 {
69 	hr->version = swab64(hr->version);
70 	hr->lpid = swab32(hr->lpid);
71 	hr->vcpu_token = swab32(hr->vcpu_token);
72 	hr->lpcr = swab64(hr->lpcr);
73 	hr->pcr = swab64(hr->pcr) | PCR_MASK;
74 	hr->amor = swab64(hr->amor);
75 	hr->dpdes = swab64(hr->dpdes);
76 	hr->hfscr = swab64(hr->hfscr);
77 	hr->tb_offset = swab64(hr->tb_offset);
78 	hr->dawr0 = swab64(hr->dawr0);
79 	hr->dawrx0 = swab64(hr->dawrx0);
80 	hr->ciabr = swab64(hr->ciabr);
81 	hr->hdec_expiry = swab64(hr->hdec_expiry);
82 	hr->purr = swab64(hr->purr);
83 	hr->spurr = swab64(hr->spurr);
84 	hr->ic = swab64(hr->ic);
85 	hr->vtb = swab64(hr->vtb);
86 	hr->hdar = swab64(hr->hdar);
87 	hr->hdsisr = swab64(hr->hdsisr);
88 	hr->heir = swab64(hr->heir);
89 	hr->asdr = swab64(hr->asdr);
90 	hr->srr0 = swab64(hr->srr0);
91 	hr->srr1 = swab64(hr->srr1);
92 	hr->sprg[0] = swab64(hr->sprg[0]);
93 	hr->sprg[1] = swab64(hr->sprg[1]);
94 	hr->sprg[2] = swab64(hr->sprg[2]);
95 	hr->sprg[3] = swab64(hr->sprg[3]);
96 	hr->pidr = swab64(hr->pidr);
97 	hr->cfar = swab64(hr->cfar);
98 	hr->ppr = swab64(hr->ppr);
99 	hr->dawr1 = swab64(hr->dawr1);
100 	hr->dawrx1 = swab64(hr->dawrx1);
101 }
102 
save_hv_return_state(struct kvm_vcpu * vcpu,struct hv_guest_state * hr)103 static void save_hv_return_state(struct kvm_vcpu *vcpu,
104 				 struct hv_guest_state *hr)
105 {
106 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
107 
108 	hr->dpdes = vcpu->arch.doorbell_request;
109 	hr->purr = vcpu->arch.purr;
110 	hr->spurr = vcpu->arch.spurr;
111 	hr->ic = vcpu->arch.ic;
112 	hr->vtb = vc->vtb;
113 	hr->srr0 = vcpu->arch.shregs.srr0;
114 	hr->srr1 = vcpu->arch.shregs.srr1;
115 	hr->sprg[0] = vcpu->arch.shregs.sprg0;
116 	hr->sprg[1] = vcpu->arch.shregs.sprg1;
117 	hr->sprg[2] = vcpu->arch.shregs.sprg2;
118 	hr->sprg[3] = vcpu->arch.shregs.sprg3;
119 	hr->pidr = vcpu->arch.pid;
120 	hr->cfar = vcpu->arch.cfar;
121 	hr->ppr = vcpu->arch.ppr;
122 	switch (vcpu->arch.trap) {
123 	case BOOK3S_INTERRUPT_H_DATA_STORAGE:
124 		hr->hdar = vcpu->arch.fault_dar;
125 		hr->hdsisr = vcpu->arch.fault_dsisr;
126 		hr->asdr = vcpu->arch.fault_gpa;
127 		break;
128 	case BOOK3S_INTERRUPT_H_INST_STORAGE:
129 		hr->asdr = vcpu->arch.fault_gpa;
130 		break;
131 	case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
132 		hr->hfscr = ((~HFSCR_INTR_CAUSE & hr->hfscr) |
133 			     (HFSCR_INTR_CAUSE & vcpu->arch.hfscr));
134 		break;
135 	case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
136 		hr->heir = vcpu->arch.emul_inst;
137 		break;
138 	}
139 }
140 
restore_hv_regs(struct kvm_vcpu * vcpu,const struct hv_guest_state * hr)141 static void restore_hv_regs(struct kvm_vcpu *vcpu, const struct hv_guest_state *hr)
142 {
143 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
144 
145 	vc->pcr = hr->pcr | PCR_MASK;
146 	vcpu->arch.doorbell_request = hr->dpdes;
147 	vcpu->arch.hfscr = hr->hfscr;
148 	vcpu->arch.dawr0 = hr->dawr0;
149 	vcpu->arch.dawrx0 = hr->dawrx0;
150 	vcpu->arch.ciabr = hr->ciabr;
151 	vcpu->arch.purr = hr->purr;
152 	vcpu->arch.spurr = hr->spurr;
153 	vcpu->arch.ic = hr->ic;
154 	vc->vtb = hr->vtb;
155 	vcpu->arch.shregs.srr0 = hr->srr0;
156 	vcpu->arch.shregs.srr1 = hr->srr1;
157 	vcpu->arch.shregs.sprg0 = hr->sprg[0];
158 	vcpu->arch.shregs.sprg1 = hr->sprg[1];
159 	vcpu->arch.shregs.sprg2 = hr->sprg[2];
160 	vcpu->arch.shregs.sprg3 = hr->sprg[3];
161 	vcpu->arch.pid = hr->pidr;
162 	vcpu->arch.cfar = hr->cfar;
163 	vcpu->arch.ppr = hr->ppr;
164 	vcpu->arch.dawr1 = hr->dawr1;
165 	vcpu->arch.dawrx1 = hr->dawrx1;
166 }
167 
kvmhv_restore_hv_return_state(struct kvm_vcpu * vcpu,struct hv_guest_state * hr)168 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
169 				   struct hv_guest_state *hr)
170 {
171 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
172 
173 	/*
174 	 * This L2 vCPU might have received a doorbell while H_ENTER_NESTED was being handled.
175 	 * Make sure we preserve the doorbell if it was either:
176 	 *   a) Sent after H_ENTER_NESTED was called on this vCPU (arch.doorbell_request would be 1)
177 	 *   b) Doorbell was not handled and L2 exited for some other reason (hr->dpdes would be 1)
178 	 */
179 	vcpu->arch.doorbell_request = vcpu->arch.doorbell_request | hr->dpdes;
180 	vcpu->arch.hfscr = hr->hfscr;
181 	vcpu->arch.purr = hr->purr;
182 	vcpu->arch.spurr = hr->spurr;
183 	vcpu->arch.ic = hr->ic;
184 	vc->vtb = hr->vtb;
185 	vcpu->arch.fault_dar = hr->hdar;
186 	vcpu->arch.fault_dsisr = hr->hdsisr;
187 	vcpu->arch.fault_gpa = hr->asdr;
188 	vcpu->arch.emul_inst = hr->heir;
189 	vcpu->arch.shregs.srr0 = hr->srr0;
190 	vcpu->arch.shregs.srr1 = hr->srr1;
191 	vcpu->arch.shregs.sprg0 = hr->sprg[0];
192 	vcpu->arch.shregs.sprg1 = hr->sprg[1];
193 	vcpu->arch.shregs.sprg2 = hr->sprg[2];
194 	vcpu->arch.shregs.sprg3 = hr->sprg[3];
195 	vcpu->arch.pid = hr->pidr;
196 	vcpu->arch.cfar = hr->cfar;
197 	vcpu->arch.ppr = hr->ppr;
198 }
199 
kvmhv_nested_mmio_needed(struct kvm_vcpu * vcpu,u64 regs_ptr)200 static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
201 {
202 	/* No need to reflect the page fault to L1, we've handled it */
203 	vcpu->arch.trap = 0;
204 
205 	/*
206 	 * Since the L2 gprs have already been written back into L1 memory when
207 	 * we complete the mmio, store the L1 memory location of the L2 gpr
208 	 * being loaded into by the mmio so that the loaded value can be
209 	 * written there in kvmppc_complete_mmio_load()
210 	 */
211 	if (((vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) == KVM_MMIO_REG_GPR)
212 	    && (vcpu->mmio_is_write == 0)) {
213 		vcpu->arch.nested_io_gpr = (gpa_t) regs_ptr +
214 					   offsetof(struct pt_regs,
215 						    gpr[vcpu->arch.io_gpr]);
216 		vcpu->arch.io_gpr = KVM_MMIO_REG_NESTED_GPR;
217 	}
218 }
219 
kvmhv_read_guest_state_and_regs(struct kvm_vcpu * vcpu,struct hv_guest_state * l2_hv,struct pt_regs * l2_regs,u64 hv_ptr,u64 regs_ptr)220 static int kvmhv_read_guest_state_and_regs(struct kvm_vcpu *vcpu,
221 					   struct hv_guest_state *l2_hv,
222 					   struct pt_regs *l2_regs,
223 					   u64 hv_ptr, u64 regs_ptr)
224 {
225 	int size;
226 
227 	if (kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv->version,
228 				sizeof(l2_hv->version)))
229 		return -1;
230 
231 	if (kvmppc_need_byteswap(vcpu))
232 		l2_hv->version = swab64(l2_hv->version);
233 
234 	size = hv_guest_state_size(l2_hv->version);
235 	if (size < 0)
236 		return -1;
237 
238 	return kvm_vcpu_read_guest(vcpu, hv_ptr, l2_hv, size) ||
239 		kvm_vcpu_read_guest(vcpu, regs_ptr, l2_regs,
240 				    sizeof(struct pt_regs));
241 }
242 
kvmhv_write_guest_state_and_regs(struct kvm_vcpu * vcpu,struct hv_guest_state * l2_hv,struct pt_regs * l2_regs,u64 hv_ptr,u64 regs_ptr)243 static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu *vcpu,
244 					    struct hv_guest_state *l2_hv,
245 					    struct pt_regs *l2_regs,
246 					    u64 hv_ptr, u64 regs_ptr)
247 {
248 	int size;
249 
250 	size = hv_guest_state_size(l2_hv->version);
251 	if (size < 0)
252 		return -1;
253 
254 	return kvm_vcpu_write_guest(vcpu, hv_ptr, l2_hv, size) ||
255 		kvm_vcpu_write_guest(vcpu, regs_ptr, l2_regs,
256 				     sizeof(struct pt_regs));
257 }
258 
load_l2_hv_regs(struct kvm_vcpu * vcpu,const struct hv_guest_state * l2_hv,const struct hv_guest_state * l1_hv,u64 * lpcr)259 static void load_l2_hv_regs(struct kvm_vcpu *vcpu,
260 			    const struct hv_guest_state *l2_hv,
261 			    const struct hv_guest_state *l1_hv, u64 *lpcr)
262 {
263 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
264 	u64 mask;
265 
266 	restore_hv_regs(vcpu, l2_hv);
267 
268 	/*
269 	 * Don't let L1 change LPCR bits for the L2 except these:
270 	 */
271 	mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | LPCR_MER;
272 
273 	/*
274 	 * Additional filtering is required depending on hardware
275 	 * and configuration.
276 	 */
277 	*lpcr = kvmppc_filter_lpcr_hv(vcpu->kvm,
278 				      (vc->lpcr & ~mask) | (*lpcr & mask));
279 
280 	/*
281 	 * Don't let L1 enable features for L2 which we don't allow for L1,
282 	 * but preserve the interrupt cause field.
283 	 */
284 	vcpu->arch.hfscr = l2_hv->hfscr & (HFSCR_INTR_CAUSE | vcpu->arch.hfscr_permitted);
285 
286 	/* Don't let data address watchpoint match in hypervisor state */
287 	vcpu->arch.dawrx0 = l2_hv->dawrx0 & ~DAWRX_HYP;
288 	vcpu->arch.dawrx1 = l2_hv->dawrx1 & ~DAWRX_HYP;
289 
290 	/* Don't let completed instruction address breakpt match in HV state */
291 	if ((l2_hv->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
292 		vcpu->arch.ciabr = l2_hv->ciabr & ~CIABR_PRIV;
293 }
294 
kvmhv_enter_nested_guest(struct kvm_vcpu * vcpu)295 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
296 {
297 	long int err, r;
298 	struct kvm_nested_guest *l2;
299 	struct pt_regs l2_regs, saved_l1_regs;
300 	struct hv_guest_state l2_hv = {0}, saved_l1_hv;
301 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
302 	u64 hv_ptr, regs_ptr;
303 	u64 hdec_exp, lpcr;
304 	s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
305 
306 	if (vcpu->kvm->arch.l1_ptcr == 0)
307 		return H_NOT_AVAILABLE;
308 
309 	if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
310 		return H_BAD_MODE;
311 
312 	/* copy parameters in */
313 	hv_ptr = kvmppc_get_gpr(vcpu, 4);
314 	regs_ptr = kvmppc_get_gpr(vcpu, 5);
315 	kvm_vcpu_srcu_read_lock(vcpu);
316 	err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
317 					      hv_ptr, regs_ptr);
318 	kvm_vcpu_srcu_read_unlock(vcpu);
319 	if (err)
320 		return H_PARAMETER;
321 
322 	if (kvmppc_need_byteswap(vcpu))
323 		byteswap_hv_regs(&l2_hv);
324 	if (l2_hv.version > HV_GUEST_STATE_VERSION)
325 		return H_P2;
326 
327 	if (kvmppc_need_byteswap(vcpu))
328 		byteswap_pt_regs(&l2_regs);
329 	if (l2_hv.vcpu_token >= NR_CPUS)
330 		return H_PARAMETER;
331 
332 	/*
333 	 * L1 must have set up a suspended state to enter the L2 in a
334 	 * transactional state, and only in that case. These have to be
335 	 * filtered out here to prevent causing a TM Bad Thing in the
336 	 * host HRFID. We could synthesize a TM Bad Thing back to the L1
337 	 * here but there doesn't seem like much point.
338 	 */
339 	if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) {
340 		if (!MSR_TM_ACTIVE(l2_regs.msr))
341 			return H_BAD_MODE;
342 	} else {
343 		if (l2_regs.msr & MSR_TS_MASK)
344 			return H_BAD_MODE;
345 		if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK))
346 			return H_BAD_MODE;
347 	}
348 
349 	/* translate lpid */
350 	l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
351 	if (!l2)
352 		return H_PARAMETER;
353 	if (!l2->l1_gr_to_hr) {
354 		mutex_lock(&l2->tlb_lock);
355 		kvmhv_update_ptbl_cache(l2);
356 		mutex_unlock(&l2->tlb_lock);
357 	}
358 
359 	/* save l1 values of things */
360 	vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
361 	saved_l1_regs = vcpu->arch.regs;
362 	kvmhv_save_hv_regs(vcpu, &saved_l1_hv);
363 
364 	/* convert TB values/offsets to host (L0) values */
365 	hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
366 	vc->tb_offset += l2_hv.tb_offset;
367 	vcpu->arch.dec_expires += l2_hv.tb_offset;
368 
369 	/* set L1 state to L2 state */
370 	vcpu->arch.nested = l2;
371 	vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
372 	vcpu->arch.nested_hfscr = l2_hv.hfscr;
373 	vcpu->arch.regs = l2_regs;
374 
375 	/* Guest must always run with ME enabled, HV disabled. */
376 	vcpu->arch.shregs.msr = (vcpu->arch.regs.msr | MSR_ME) & ~MSR_HV;
377 
378 	lpcr = l2_hv.lpcr;
379 	load_l2_hv_regs(vcpu, &l2_hv, &saved_l1_hv, &lpcr);
380 
381 	vcpu->arch.ret = RESUME_GUEST;
382 	vcpu->arch.trap = 0;
383 	do {
384 		r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr);
385 	} while (is_kvmppc_resume_guest(r));
386 
387 	/* save L2 state for return */
388 	l2_regs = vcpu->arch.regs;
389 	l2_regs.msr = vcpu->arch.shregs.msr;
390 	delta_purr = vcpu->arch.purr - l2_hv.purr;
391 	delta_spurr = vcpu->arch.spurr - l2_hv.spurr;
392 	delta_ic = vcpu->arch.ic - l2_hv.ic;
393 	delta_vtb = vc->vtb - l2_hv.vtb;
394 	save_hv_return_state(vcpu, &l2_hv);
395 
396 	/* restore L1 state */
397 	vcpu->arch.nested = NULL;
398 	vcpu->arch.regs = saved_l1_regs;
399 	vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK;
400 	/* set L1 MSR TS field according to L2 transaction state */
401 	if (l2_regs.msr & MSR_TS_MASK)
402 		vcpu->arch.shregs.msr |= MSR_TS_S;
403 	vc->tb_offset = saved_l1_hv.tb_offset;
404 	/* XXX: is this always the same delta as saved_l1_hv.tb_offset? */
405 	vcpu->arch.dec_expires -= l2_hv.tb_offset;
406 	restore_hv_regs(vcpu, &saved_l1_hv);
407 	vcpu->arch.purr += delta_purr;
408 	vcpu->arch.spurr += delta_spurr;
409 	vcpu->arch.ic += delta_ic;
410 	vc->vtb += delta_vtb;
411 
412 	kvmhv_put_nested(l2);
413 
414 	/* copy l2_hv_state and regs back to guest */
415 	if (kvmppc_need_byteswap(vcpu)) {
416 		byteswap_hv_regs(&l2_hv);
417 		byteswap_pt_regs(&l2_regs);
418 	}
419 	kvm_vcpu_srcu_read_lock(vcpu);
420 	err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
421 					       hv_ptr, regs_ptr);
422 	kvm_vcpu_srcu_read_unlock(vcpu);
423 	if (err)
424 		return H_AUTHORITY;
425 
426 	if (r == -EINTR)
427 		return H_INTERRUPT;
428 
429 	if (vcpu->mmio_needed) {
430 		kvmhv_nested_mmio_needed(vcpu, regs_ptr);
431 		return H_TOO_HARD;
432 	}
433 
434 	return vcpu->arch.trap;
435 }
436 
437 unsigned long nested_capabilities;
438 
kvmhv_nested_init(void)439 long kvmhv_nested_init(void)
440 {
441 	long int ptb_order;
442 	unsigned long ptcr, host_capabilities;
443 	long rc;
444 
445 	if (!kvmhv_on_pseries())
446 		return 0;
447 	if (!radix_enabled())
448 		return -ENODEV;
449 
450 	rc = plpar_guest_get_capabilities(0, &host_capabilities);
451 	if (rc == H_SUCCESS) {
452 		unsigned long capabilities = 0;
453 
454 		if (cpu_has_feature(CPU_FTR_ARCH_31))
455 			capabilities |= H_GUEST_CAP_POWER10;
456 		if (cpu_has_feature(CPU_FTR_ARCH_300))
457 			capabilities |= H_GUEST_CAP_POWER9;
458 
459 		nested_capabilities = capabilities & host_capabilities;
460 		rc = plpar_guest_set_capabilities(0, nested_capabilities);
461 		if (rc != H_SUCCESS) {
462 			pr_err("kvm-hv: Could not configure parent hypervisor capabilities (rc=%ld)",
463 			       rc);
464 			return -ENODEV;
465 		}
466 
467 		static_branch_enable(&__kvmhv_is_nestedv2);
468 		return 0;
469 	}
470 
471 	pr_info("kvm-hv: nestedv2 get capabilities hcall failed, falling back to nestedv1 (rc=%ld)\n",
472 		rc);
473 	/* Partition table entry is 1<<4 bytes in size, hence the 4. */
474 	ptb_order = KVM_MAX_NESTED_GUESTS_SHIFT + 4;
475 	/* Minimum partition table size is 1<<12 bytes */
476 	if (ptb_order < 12)
477 		ptb_order = 12;
478 	pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order,
479 				       GFP_KERNEL);
480 	if (!pseries_partition_tb) {
481 		pr_err("kvm-hv: failed to allocated nested partition table\n");
482 		return -ENOMEM;
483 	}
484 
485 	ptcr = __pa(pseries_partition_tb) | (ptb_order - 12);
486 	rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr);
487 	if (rc != H_SUCCESS) {
488 		pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
489 		       rc);
490 		kfree(pseries_partition_tb);
491 		pseries_partition_tb = NULL;
492 		return -ENODEV;
493 	}
494 
495 	return 0;
496 }
497 
kvmhv_nested_exit(void)498 void kvmhv_nested_exit(void)
499 {
500 	/*
501 	 * N.B. the kvmhv_on_pseries() test is there because it enables
502 	 * the compiler to remove the call to plpar_hcall_norets()
503 	 * when CONFIG_PPC_PSERIES=n.
504 	 */
505 	if (kvmhv_on_pseries() && pseries_partition_tb) {
506 		plpar_hcall_norets(H_SET_PARTITION_TABLE, 0);
507 		kfree(pseries_partition_tb);
508 		pseries_partition_tb = NULL;
509 	}
510 }
511 
kvmhv_flush_lpid(u64 lpid)512 void kvmhv_flush_lpid(u64 lpid)
513 {
514 	long rc;
515 
516 	if (!kvmhv_on_pseries()) {
517 		radix__flush_all_lpid(lpid);
518 		return;
519 	}
520 
521 	if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
522 		rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
523 					lpid, TLBIEL_INVAL_SET_LPID);
524 	else
525 		rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
526 					    H_RPTI_TYPE_NESTED |
527 					    H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
528 					    H_RPTI_TYPE_PAT,
529 					    H_RPTI_PAGE_ALL, 0, -1UL);
530 	if (rc)
531 		pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
532 }
533 
kvmhv_set_ptbl_entry(u64 lpid,u64 dw0,u64 dw1)534 void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1)
535 {
536 	if (!kvmhv_on_pseries()) {
537 		mmu_partition_table_set_entry(lpid, dw0, dw1, true);
538 		return;
539 	}
540 
541 	if (kvmhv_is_nestedv1()) {
542 		pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
543 		pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
544 		/* L0 will do the necessary barriers */
545 		kvmhv_flush_lpid(lpid);
546 	}
547 
548 	if (kvmhv_is_nestedv2())
549 		kvmhv_nestedv2_set_ptbl_entry(lpid, dw0, dw1);
550 }
551 
kvmhv_set_nested_ptbl(struct kvm_nested_guest * gp)552 static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
553 {
554 	unsigned long dw0;
555 
556 	dw0 = PATB_HR | radix__get_tree_size() |
557 		__pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE;
558 	kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
559 }
560 
561 /*
562  * Handle the H_SET_PARTITION_TABLE hcall.
563  * r4 = guest real address of partition table + log_2(size) - 12
564  * (formatted as for the PTCR).
565  */
kvmhv_set_partition_table(struct kvm_vcpu * vcpu)566 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
567 {
568 	struct kvm *kvm = vcpu->kvm;
569 	unsigned long ptcr = kvmppc_get_gpr(vcpu, 4);
570 	int srcu_idx;
571 	long ret = H_SUCCESS;
572 
573 	srcu_idx = srcu_read_lock(&kvm->srcu);
574 	/* Check partition size and base address. */
575 	if ((ptcr & PRTS_MASK) + 12 - 4 > KVM_MAX_NESTED_GUESTS_SHIFT ||
576 	    !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
577 		ret = H_PARAMETER;
578 	srcu_read_unlock(&kvm->srcu, srcu_idx);
579 	if (ret == H_SUCCESS)
580 		kvm->arch.l1_ptcr = ptcr;
581 
582 	return ret;
583 }
584 
585 /*
586  * Handle the H_COPY_TOFROM_GUEST hcall.
587  * r4 = L1 lpid of nested guest
588  * r5 = pid
589  * r6 = eaddr to access
590  * r7 = to buffer (L1 gpa)
591  * r8 = from buffer (L1 gpa)
592  * r9 = n bytes to copy
593  */
kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu * vcpu)594 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
595 {
596 	struct kvm_nested_guest *gp;
597 	int l1_lpid = kvmppc_get_gpr(vcpu, 4);
598 	int pid = kvmppc_get_gpr(vcpu, 5);
599 	gva_t eaddr = kvmppc_get_gpr(vcpu, 6);
600 	gpa_t gp_to = (gpa_t) kvmppc_get_gpr(vcpu, 7);
601 	gpa_t gp_from = (gpa_t) kvmppc_get_gpr(vcpu, 8);
602 	void *buf;
603 	unsigned long n = kvmppc_get_gpr(vcpu, 9);
604 	bool is_load = !!gp_to;
605 	long rc;
606 
607 	if (gp_to && gp_from) /* One must be NULL to determine the direction */
608 		return H_PARAMETER;
609 
610 	if (eaddr & (0xFFFUL << 52))
611 		return H_PARAMETER;
612 
613 	buf = kzalloc(n, GFP_KERNEL | __GFP_NOWARN);
614 	if (!buf)
615 		return H_NO_MEM;
616 
617 	gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false);
618 	if (!gp) {
619 		rc = H_PARAMETER;
620 		goto out_free;
621 	}
622 
623 	mutex_lock(&gp->tlb_lock);
624 
625 	if (is_load) {
626 		/* Load from the nested guest into our buffer */
627 		rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
628 						     eaddr, buf, NULL, n);
629 		if (rc)
630 			goto not_found;
631 
632 		/* Write what was loaded into our buffer back to the L1 guest */
633 		kvm_vcpu_srcu_read_lock(vcpu);
634 		rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
635 		kvm_vcpu_srcu_read_unlock(vcpu);
636 		if (rc)
637 			goto not_found;
638 	} else {
639 		/* Load the data to be stored from the L1 guest into our buf */
640 		kvm_vcpu_srcu_read_lock(vcpu);
641 		rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
642 		kvm_vcpu_srcu_read_unlock(vcpu);
643 		if (rc)
644 			goto not_found;
645 
646 		/* Store from our buffer into the nested guest */
647 		rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
648 						     eaddr, NULL, buf, n);
649 		if (rc)
650 			goto not_found;
651 	}
652 
653 out_unlock:
654 	mutex_unlock(&gp->tlb_lock);
655 	kvmhv_put_nested(gp);
656 out_free:
657 	kfree(buf);
658 	return rc;
659 not_found:
660 	rc = H_NOT_FOUND;
661 	goto out_unlock;
662 }
663 
664 /*
665  * Reload the partition table entry for a guest.
666  * Caller must hold gp->tlb_lock.
667  */
kvmhv_update_ptbl_cache(struct kvm_nested_guest * gp)668 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
669 {
670 	int ret;
671 	struct patb_entry ptbl_entry;
672 	unsigned long ptbl_addr;
673 	struct kvm *kvm = gp->l1_host;
674 
675 	ret = -EFAULT;
676 	ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
677 	if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) {
678 		int srcu_idx = srcu_read_lock(&kvm->srcu);
679 		ret = kvm_read_guest(kvm, ptbl_addr,
680 				     &ptbl_entry, sizeof(ptbl_entry));
681 		srcu_read_unlock(&kvm->srcu, srcu_idx);
682 	}
683 	if (ret) {
684 		gp->l1_gr_to_hr = 0;
685 		gp->process_table = 0;
686 	} else {
687 		gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0);
688 		gp->process_table = be64_to_cpu(ptbl_entry.patb1);
689 	}
690 	kvmhv_set_nested_ptbl(gp);
691 }
692 
kvmhv_vm_nested_init(struct kvm * kvm)693 void kvmhv_vm_nested_init(struct kvm *kvm)
694 {
695 	idr_init(&kvm->arch.kvm_nested_guest_idr);
696 }
697 
__find_nested(struct kvm * kvm,int lpid)698 static struct kvm_nested_guest *__find_nested(struct kvm *kvm, int lpid)
699 {
700 	return idr_find(&kvm->arch.kvm_nested_guest_idr, lpid);
701 }
702 
__prealloc_nested(struct kvm * kvm,int lpid)703 static bool __prealloc_nested(struct kvm *kvm, int lpid)
704 {
705 	if (idr_alloc(&kvm->arch.kvm_nested_guest_idr,
706 				NULL, lpid, lpid + 1, GFP_KERNEL) != lpid)
707 		return false;
708 	return true;
709 }
710 
__add_nested(struct kvm * kvm,int lpid,struct kvm_nested_guest * gp)711 static void __add_nested(struct kvm *kvm, int lpid, struct kvm_nested_guest *gp)
712 {
713 	if (idr_replace(&kvm->arch.kvm_nested_guest_idr, gp, lpid))
714 		WARN_ON(1);
715 }
716 
__remove_nested(struct kvm * kvm,int lpid)717 static void __remove_nested(struct kvm *kvm, int lpid)
718 {
719 	idr_remove(&kvm->arch.kvm_nested_guest_idr, lpid);
720 }
721 
kvmhv_alloc_nested(struct kvm * kvm,unsigned int lpid)722 static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
723 {
724 	struct kvm_nested_guest *gp;
725 	long shadow_lpid;
726 
727 	gp = kzalloc(sizeof(*gp), GFP_KERNEL);
728 	if (!gp)
729 		return NULL;
730 	gp->l1_host = kvm;
731 	gp->l1_lpid = lpid;
732 	mutex_init(&gp->tlb_lock);
733 	gp->shadow_pgtable = pgd_alloc(kvm->mm);
734 	if (!gp->shadow_pgtable)
735 		goto out_free;
736 	shadow_lpid = kvmppc_alloc_lpid();
737 	if (shadow_lpid < 0)
738 		goto out_free2;
739 	gp->shadow_lpid = shadow_lpid;
740 	gp->radix = 1;
741 
742 	memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
743 
744 	return gp;
745 
746  out_free2:
747 	pgd_free(kvm->mm, gp->shadow_pgtable);
748  out_free:
749 	kfree(gp);
750 	return NULL;
751 }
752 
753 /*
754  * Free up any resources allocated for a nested guest.
755  */
kvmhv_release_nested(struct kvm_nested_guest * gp)756 static void kvmhv_release_nested(struct kvm_nested_guest *gp)
757 {
758 	struct kvm *kvm = gp->l1_host;
759 
760 	if (gp->shadow_pgtable) {
761 		/*
762 		 * No vcpu is using this struct and no call to
763 		 * kvmhv_get_nested can find this struct,
764 		 * so we don't need to hold kvm->mmu_lock.
765 		 */
766 		kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
767 					  gp->shadow_lpid);
768 		pgd_free(kvm->mm, gp->shadow_pgtable);
769 	}
770 	kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0);
771 	kvmppc_free_lpid(gp->shadow_lpid);
772 	kfree(gp);
773 }
774 
kvmhv_remove_nested(struct kvm_nested_guest * gp)775 static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
776 {
777 	struct kvm *kvm = gp->l1_host;
778 	int lpid = gp->l1_lpid;
779 	long ref;
780 
781 	spin_lock(&kvm->mmu_lock);
782 	if (gp == __find_nested(kvm, lpid)) {
783 		__remove_nested(kvm, lpid);
784 		--gp->refcnt;
785 	}
786 	ref = gp->refcnt;
787 	spin_unlock(&kvm->mmu_lock);
788 	if (ref == 0)
789 		kvmhv_release_nested(gp);
790 }
791 
792 /*
793  * Free up all nested resources allocated for this guest.
794  * This is called with no vcpus of the guest running, when
795  * switching the guest to HPT mode or when destroying the
796  * guest.
797  */
kvmhv_release_all_nested(struct kvm * kvm)798 void kvmhv_release_all_nested(struct kvm *kvm)
799 {
800 	int lpid;
801 	struct kvm_nested_guest *gp;
802 	struct kvm_nested_guest *freelist = NULL;
803 	struct kvm_memory_slot *memslot;
804 	int srcu_idx, bkt;
805 
806 	spin_lock(&kvm->mmu_lock);
807 	idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
808 		__remove_nested(kvm, lpid);
809 		if (--gp->refcnt == 0) {
810 			gp->next = freelist;
811 			freelist = gp;
812 		}
813 	}
814 	idr_destroy(&kvm->arch.kvm_nested_guest_idr);
815 	/* idr is empty and may be reused at this point */
816 	spin_unlock(&kvm->mmu_lock);
817 	while ((gp = freelist) != NULL) {
818 		freelist = gp->next;
819 		kvmhv_release_nested(gp);
820 	}
821 
822 	srcu_idx = srcu_read_lock(&kvm->srcu);
823 	kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
824 		kvmhv_free_memslot_nest_rmap(memslot);
825 	srcu_read_unlock(&kvm->srcu, srcu_idx);
826 }
827 
828 /* caller must hold gp->tlb_lock */
kvmhv_flush_nested(struct kvm_nested_guest * gp)829 static void kvmhv_flush_nested(struct kvm_nested_guest *gp)
830 {
831 	struct kvm *kvm = gp->l1_host;
832 
833 	spin_lock(&kvm->mmu_lock);
834 	kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
835 	spin_unlock(&kvm->mmu_lock);
836 	kvmhv_flush_lpid(gp->shadow_lpid);
837 	kvmhv_update_ptbl_cache(gp);
838 	if (gp->l1_gr_to_hr == 0)
839 		kvmhv_remove_nested(gp);
840 }
841 
kvmhv_get_nested(struct kvm * kvm,int l1_lpid,bool create)842 struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
843 					  bool create)
844 {
845 	struct kvm_nested_guest *gp, *newgp;
846 
847 	if (l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
848 		return NULL;
849 
850 	spin_lock(&kvm->mmu_lock);
851 	gp = __find_nested(kvm, l1_lpid);
852 	if (gp)
853 		++gp->refcnt;
854 	spin_unlock(&kvm->mmu_lock);
855 
856 	if (gp || !create)
857 		return gp;
858 
859 	newgp = kvmhv_alloc_nested(kvm, l1_lpid);
860 	if (!newgp)
861 		return NULL;
862 
863 	if (!__prealloc_nested(kvm, l1_lpid)) {
864 		kvmhv_release_nested(newgp);
865 		return NULL;
866 	}
867 
868 	spin_lock(&kvm->mmu_lock);
869 	gp = __find_nested(kvm, l1_lpid);
870 	if (!gp) {
871 		__add_nested(kvm, l1_lpid, newgp);
872 		++newgp->refcnt;
873 		gp = newgp;
874 		newgp = NULL;
875 	}
876 	++gp->refcnt;
877 	spin_unlock(&kvm->mmu_lock);
878 
879 	if (newgp)
880 		kvmhv_release_nested(newgp);
881 
882 	return gp;
883 }
884 
kvmhv_put_nested(struct kvm_nested_guest * gp)885 void kvmhv_put_nested(struct kvm_nested_guest *gp)
886 {
887 	struct kvm *kvm = gp->l1_host;
888 	long ref;
889 
890 	spin_lock(&kvm->mmu_lock);
891 	ref = --gp->refcnt;
892 	spin_unlock(&kvm->mmu_lock);
893 	if (ref == 0)
894 		kvmhv_release_nested(gp);
895 }
896 
find_kvm_nested_guest_pte(struct kvm * kvm,unsigned long lpid,unsigned long ea,unsigned * hshift)897 pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
898 				 unsigned long ea, unsigned *hshift)
899 {
900 	struct kvm_nested_guest *gp;
901 	pte_t *pte;
902 
903 	gp = __find_nested(kvm, lpid);
904 	if (!gp)
905 		return NULL;
906 
907 	VM_WARN(!spin_is_locked(&kvm->mmu_lock),
908 		"%s called with kvm mmu_lock not held \n", __func__);
909 	pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift);
910 
911 	return pte;
912 }
913 
kvmhv_n_rmap_is_equal(u64 rmap_1,u64 rmap_2)914 static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
915 {
916 	return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK |
917 				       RMAP_NESTED_GPA_MASK));
918 }
919 
kvmhv_insert_nest_rmap(struct kvm * kvm,unsigned long * rmapp,struct rmap_nested ** n_rmap)920 void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
921 			    struct rmap_nested **n_rmap)
922 {
923 	struct llist_node *entry = ((struct llist_head *) rmapp)->first;
924 	struct rmap_nested *cursor;
925 	u64 rmap, new_rmap = (*n_rmap)->rmap;
926 
927 	/* Are there any existing entries? */
928 	if (!(*rmapp)) {
929 		/* No -> use the rmap as a single entry */
930 		*rmapp = new_rmap | RMAP_NESTED_IS_SINGLE_ENTRY;
931 		return;
932 	}
933 
934 	/* Do any entries match what we're trying to insert? */
935 	for_each_nest_rmap_safe(cursor, entry, &rmap) {
936 		if (kvmhv_n_rmap_is_equal(rmap, new_rmap))
937 			return;
938 	}
939 
940 	/* Do we need to create a list or just add the new entry? */
941 	rmap = *rmapp;
942 	if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
943 		*rmapp = 0UL;
944 	llist_add(&((*n_rmap)->list), (struct llist_head *) rmapp);
945 	if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
946 		(*n_rmap)->list.next = (struct llist_node *) rmap;
947 
948 	/* Set NULL so not freed by caller */
949 	*n_rmap = NULL;
950 }
951 
kvmhv_update_nest_rmap_rc(struct kvm * kvm,u64 n_rmap,unsigned long clr,unsigned long set,unsigned long hpa,unsigned long mask)952 static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
953 				      unsigned long clr, unsigned long set,
954 				      unsigned long hpa, unsigned long mask)
955 {
956 	unsigned long gpa;
957 	unsigned int shift, lpid;
958 	pte_t *ptep;
959 
960 	gpa = n_rmap & RMAP_NESTED_GPA_MASK;
961 	lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
962 
963 	/* Find the pte */
964 	ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
965 	/*
966 	 * If the pte is present and the pfn is still the same, update the pte.
967 	 * If the pfn has changed then this is a stale rmap entry, the nested
968 	 * gpa actually points somewhere else now, and there is nothing to do.
969 	 * XXX A future optimisation would be to remove the rmap entry here.
970 	 */
971 	if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) {
972 		__radix_pte_update(ptep, clr, set);
973 		kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
974 	}
975 }
976 
977 /*
978  * For a given list of rmap entries, update the rc bits in all ptes in shadow
979  * page tables for nested guests which are referenced by the rmap list.
980  */
kvmhv_update_nest_rmap_rc_list(struct kvm * kvm,unsigned long * rmapp,unsigned long clr,unsigned long set,unsigned long hpa,unsigned long nbytes)981 void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
982 				    unsigned long clr, unsigned long set,
983 				    unsigned long hpa, unsigned long nbytes)
984 {
985 	struct llist_node *entry = ((struct llist_head *) rmapp)->first;
986 	struct rmap_nested *cursor;
987 	unsigned long rmap, mask;
988 
989 	if ((clr | set) & ~(_PAGE_DIRTY | _PAGE_ACCESSED))
990 		return;
991 
992 	mask = PTE_RPN_MASK & ~(nbytes - 1);
993 	hpa &= mask;
994 
995 	for_each_nest_rmap_safe(cursor, entry, &rmap)
996 		kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask);
997 }
998 
kvmhv_remove_nest_rmap(struct kvm * kvm,u64 n_rmap,unsigned long hpa,unsigned long mask)999 static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
1000 				   unsigned long hpa, unsigned long mask)
1001 {
1002 	struct kvm_nested_guest *gp;
1003 	unsigned long gpa;
1004 	unsigned int shift, lpid;
1005 	pte_t *ptep;
1006 
1007 	gpa = n_rmap & RMAP_NESTED_GPA_MASK;
1008 	lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
1009 	gp = __find_nested(kvm, lpid);
1010 	if (!gp)
1011 		return;
1012 
1013 	/* Find and invalidate the pte */
1014 	ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
1015 	/* Don't spuriously invalidate ptes if the pfn has changed */
1016 	if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
1017 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
1018 }
1019 
kvmhv_remove_nest_rmap_list(struct kvm * kvm,unsigned long * rmapp,unsigned long hpa,unsigned long mask)1020 static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
1021 					unsigned long hpa, unsigned long mask)
1022 {
1023 	struct llist_node *entry = llist_del_all((struct llist_head *) rmapp);
1024 	struct rmap_nested *cursor;
1025 	unsigned long rmap;
1026 
1027 	for_each_nest_rmap_safe(cursor, entry, &rmap) {
1028 		kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask);
1029 		kfree(cursor);
1030 	}
1031 }
1032 
1033 /* called with kvm->mmu_lock held */
kvmhv_remove_nest_rmap_range(struct kvm * kvm,const struct kvm_memory_slot * memslot,unsigned long gpa,unsigned long hpa,unsigned long nbytes)1034 void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
1035 				  const struct kvm_memory_slot *memslot,
1036 				  unsigned long gpa, unsigned long hpa,
1037 				  unsigned long nbytes)
1038 {
1039 	unsigned long gfn, end_gfn;
1040 	unsigned long addr_mask;
1041 
1042 	if (!memslot)
1043 		return;
1044 	gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn;
1045 	end_gfn = gfn + (nbytes >> PAGE_SHIFT);
1046 
1047 	addr_mask = PTE_RPN_MASK & ~(nbytes - 1);
1048 	hpa &= addr_mask;
1049 
1050 	for (; gfn < end_gfn; gfn++) {
1051 		unsigned long *rmap = &memslot->arch.rmap[gfn];
1052 		kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask);
1053 	}
1054 }
1055 
kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot * free)1056 static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free)
1057 {
1058 	unsigned long page;
1059 
1060 	for (page = 0; page < free->npages; page++) {
1061 		unsigned long rmap, *rmapp = &free->arch.rmap[page];
1062 		struct rmap_nested *cursor;
1063 		struct llist_node *entry;
1064 
1065 		entry = llist_del_all((struct llist_head *) rmapp);
1066 		for_each_nest_rmap_safe(cursor, entry, &rmap)
1067 			kfree(cursor);
1068 	}
1069 }
1070 
kvmhv_invalidate_shadow_pte(struct kvm_vcpu * vcpu,struct kvm_nested_guest * gp,long gpa,int * shift_ret)1071 static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
1072 					struct kvm_nested_guest *gp,
1073 					long gpa, int *shift_ret)
1074 {
1075 	struct kvm *kvm = vcpu->kvm;
1076 	bool ret = false;
1077 	pte_t *ptep;
1078 	int shift;
1079 
1080 	spin_lock(&kvm->mmu_lock);
1081 	ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift);
1082 	if (!shift)
1083 		shift = PAGE_SHIFT;
1084 	if (ptep && pte_present(*ptep)) {
1085 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
1086 		ret = true;
1087 	}
1088 	spin_unlock(&kvm->mmu_lock);
1089 
1090 	if (shift_ret)
1091 		*shift_ret = shift;
1092 	return ret;
1093 }
1094 
get_ric(unsigned int instr)1095 static inline int get_ric(unsigned int instr)
1096 {
1097 	return (instr >> 18) & 0x3;
1098 }
1099 
get_prs(unsigned int instr)1100 static inline int get_prs(unsigned int instr)
1101 {
1102 	return (instr >> 17) & 0x1;
1103 }
1104 
get_r(unsigned int instr)1105 static inline int get_r(unsigned int instr)
1106 {
1107 	return (instr >> 16) & 0x1;
1108 }
1109 
get_lpid(unsigned long r_val)1110 static inline int get_lpid(unsigned long r_val)
1111 {
1112 	return r_val & 0xffffffff;
1113 }
1114 
get_is(unsigned long r_val)1115 static inline int get_is(unsigned long r_val)
1116 {
1117 	return (r_val >> 10) & 0x3;
1118 }
1119 
get_ap(unsigned long r_val)1120 static inline int get_ap(unsigned long r_val)
1121 {
1122 	return (r_val >> 5) & 0x7;
1123 }
1124 
get_epn(unsigned long r_val)1125 static inline long get_epn(unsigned long r_val)
1126 {
1127 	return r_val >> 12;
1128 }
1129 
kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu * vcpu,int lpid,int ap,long epn)1130 static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu *vcpu, int lpid,
1131 					int ap, long epn)
1132 {
1133 	struct kvm *kvm = vcpu->kvm;
1134 	struct kvm_nested_guest *gp;
1135 	long npages;
1136 	int shift, shadow_shift;
1137 	unsigned long addr;
1138 
1139 	shift = ap_to_shift(ap);
1140 	addr = epn << 12;
1141 	if (shift < 0)
1142 		/* Invalid ap encoding */
1143 		return -EINVAL;
1144 
1145 	addr &= ~((1UL << shift) - 1);
1146 	npages = 1UL << (shift - PAGE_SHIFT);
1147 
1148 	gp = kvmhv_get_nested(kvm, lpid, false);
1149 	if (!gp) /* No such guest -> nothing to do */
1150 		return 0;
1151 	mutex_lock(&gp->tlb_lock);
1152 
1153 	/* There may be more than one host page backing this single guest pte */
1154 	do {
1155 		kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift);
1156 
1157 		npages -= 1UL << (shadow_shift - PAGE_SHIFT);
1158 		addr += 1UL << shadow_shift;
1159 	} while (npages > 0);
1160 
1161 	mutex_unlock(&gp->tlb_lock);
1162 	kvmhv_put_nested(gp);
1163 	return 0;
1164 }
1165 
kvmhv_emulate_tlbie_lpid(struct kvm_vcpu * vcpu,struct kvm_nested_guest * gp,int ric)1166 static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu,
1167 				     struct kvm_nested_guest *gp, int ric)
1168 {
1169 	struct kvm *kvm = vcpu->kvm;
1170 
1171 	mutex_lock(&gp->tlb_lock);
1172 	switch (ric) {
1173 	case 0:
1174 		/* Invalidate TLB */
1175 		spin_lock(&kvm->mmu_lock);
1176 		kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
1177 					  gp->shadow_lpid);
1178 		kvmhv_flush_lpid(gp->shadow_lpid);
1179 		spin_unlock(&kvm->mmu_lock);
1180 		break;
1181 	case 1:
1182 		/*
1183 		 * Invalidate PWC
1184 		 * We don't cache this -> nothing to do
1185 		 */
1186 		break;
1187 	case 2:
1188 		/* Invalidate TLB, PWC and caching of partition table entries */
1189 		kvmhv_flush_nested(gp);
1190 		break;
1191 	default:
1192 		break;
1193 	}
1194 	mutex_unlock(&gp->tlb_lock);
1195 }
1196 
kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu * vcpu,int ric)1197 static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
1198 {
1199 	struct kvm *kvm = vcpu->kvm;
1200 	struct kvm_nested_guest *gp;
1201 	int lpid;
1202 
1203 	spin_lock(&kvm->mmu_lock);
1204 	idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
1205 		spin_unlock(&kvm->mmu_lock);
1206 		kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1207 		spin_lock(&kvm->mmu_lock);
1208 	}
1209 	spin_unlock(&kvm->mmu_lock);
1210 }
1211 
kvmhv_emulate_priv_tlbie(struct kvm_vcpu * vcpu,unsigned int instr,unsigned long rsval,unsigned long rbval)1212 static int kvmhv_emulate_priv_tlbie(struct kvm_vcpu *vcpu, unsigned int instr,
1213 				    unsigned long rsval, unsigned long rbval)
1214 {
1215 	struct kvm *kvm = vcpu->kvm;
1216 	struct kvm_nested_guest *gp;
1217 	int r, ric, prs, is, ap;
1218 	int lpid;
1219 	long epn;
1220 	int ret = 0;
1221 
1222 	ric = get_ric(instr);
1223 	prs = get_prs(instr);
1224 	r = get_r(instr);
1225 	lpid = get_lpid(rsval);
1226 	is = get_is(rbval);
1227 
1228 	/*
1229 	 * These cases are invalid and are not handled:
1230 	 * r   != 1 -> Only radix supported
1231 	 * prs == 1 -> Not HV privileged
1232 	 * ric == 3 -> No cluster bombs for radix
1233 	 * is  == 1 -> Partition scoped translations not associated with pid
1234 	 * (!is) && (ric == 1 || ric == 2) -> Not supported by ISA
1235 	 */
1236 	if ((!r) || (prs) || (ric == 3) || (is == 1) ||
1237 	    ((!is) && (ric == 1 || ric == 2)))
1238 		return -EINVAL;
1239 
1240 	switch (is) {
1241 	case 0:
1242 		/*
1243 		 * We know ric == 0
1244 		 * Invalidate TLB for a given target address
1245 		 */
1246 		epn = get_epn(rbval);
1247 		ap = get_ap(rbval);
1248 		ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, epn);
1249 		break;
1250 	case 2:
1251 		/* Invalidate matching LPID */
1252 		gp = kvmhv_get_nested(kvm, lpid, false);
1253 		if (gp) {
1254 			kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1255 			kvmhv_put_nested(gp);
1256 		}
1257 		break;
1258 	case 3:
1259 		/* Invalidate ALL LPIDs */
1260 		kvmhv_emulate_tlbie_all_lpid(vcpu, ric);
1261 		break;
1262 	default:
1263 		ret = -EINVAL;
1264 		break;
1265 	}
1266 
1267 	return ret;
1268 }
1269 
1270 /*
1271  * This handles the H_TLB_INVALIDATE hcall.
1272  * Parameters are (r4) tlbie instruction code, (r5) rS contents,
1273  * (r6) rB contents.
1274  */
kvmhv_do_nested_tlbie(struct kvm_vcpu * vcpu)1275 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu)
1276 {
1277 	int ret;
1278 
1279 	ret = kvmhv_emulate_priv_tlbie(vcpu, kvmppc_get_gpr(vcpu, 4),
1280 			kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6));
1281 	if (ret)
1282 		return H_PARAMETER;
1283 	return H_SUCCESS;
1284 }
1285 
do_tlb_invalidate_nested_all(struct kvm_vcpu * vcpu,unsigned long lpid,unsigned long ric)1286 static long do_tlb_invalidate_nested_all(struct kvm_vcpu *vcpu,
1287 					 unsigned long lpid, unsigned long ric)
1288 {
1289 	struct kvm *kvm = vcpu->kvm;
1290 	struct kvm_nested_guest *gp;
1291 
1292 	gp = kvmhv_get_nested(kvm, lpid, false);
1293 	if (gp) {
1294 		kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1295 		kvmhv_put_nested(gp);
1296 	}
1297 	return H_SUCCESS;
1298 }
1299 
1300 /*
1301  * Number of pages above which we invalidate the entire LPID rather than
1302  * flush individual pages.
1303  */
1304 static unsigned long tlb_range_flush_page_ceiling __read_mostly = 33;
1305 
do_tlb_invalidate_nested_tlb(struct kvm_vcpu * vcpu,unsigned long lpid,unsigned long pg_sizes,unsigned long start,unsigned long end)1306 static long do_tlb_invalidate_nested_tlb(struct kvm_vcpu *vcpu,
1307 					 unsigned long lpid,
1308 					 unsigned long pg_sizes,
1309 					 unsigned long start,
1310 					 unsigned long end)
1311 {
1312 	int ret = H_P4;
1313 	unsigned long addr, nr_pages;
1314 	struct mmu_psize_def *def;
1315 	unsigned long psize, ap, page_size;
1316 	bool flush_lpid;
1317 
1318 	for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
1319 		def = &mmu_psize_defs[psize];
1320 		if (!(pg_sizes & def->h_rpt_pgsize))
1321 			continue;
1322 
1323 		nr_pages = (end - start) >> def->shift;
1324 		flush_lpid = nr_pages > tlb_range_flush_page_ceiling;
1325 		if (flush_lpid)
1326 			return do_tlb_invalidate_nested_all(vcpu, lpid,
1327 							RIC_FLUSH_TLB);
1328 		addr = start;
1329 		ap = mmu_get_ap(psize);
1330 		page_size = 1UL << def->shift;
1331 		do {
1332 			ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap,
1333 						   get_epn(addr));
1334 			if (ret)
1335 				return H_P4;
1336 			addr += page_size;
1337 		} while (addr < end);
1338 	}
1339 	return ret;
1340 }
1341 
1342 /*
1343  * Performs partition-scoped invalidations for nested guests
1344  * as part of H_RPT_INVALIDATE hcall.
1345  */
do_h_rpt_invalidate_pat(struct kvm_vcpu * vcpu,unsigned long lpid,unsigned long type,unsigned long pg_sizes,unsigned long start,unsigned long end)1346 long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
1347 			     unsigned long type, unsigned long pg_sizes,
1348 			     unsigned long start, unsigned long end)
1349 {
1350 	/*
1351 	 * If L2 lpid isn't valid, we need to return H_PARAMETER.
1352 	 *
1353 	 * However, nested KVM issues a L2 lpid flush call when creating
1354 	 * partition table entries for L2. This happens even before the
1355 	 * corresponding shadow lpid is created in HV which happens in
1356 	 * H_ENTER_NESTED call. Since we can't differentiate this case from
1357 	 * the invalid case, we ignore such flush requests and return success.
1358 	 */
1359 	if (!__find_nested(vcpu->kvm, lpid))
1360 		return H_SUCCESS;
1361 
1362 	/*
1363 	 * A flush all request can be handled by a full lpid flush only.
1364 	 */
1365 	if ((type & H_RPTI_TYPE_NESTED_ALL) == H_RPTI_TYPE_NESTED_ALL)
1366 		return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_ALL);
1367 
1368 	/*
1369 	 * We don't need to handle a PWC flush like process table here,
1370 	 * because intermediate partition scoped table in nested guest doesn't
1371 	 * really have PWC. Only level we have PWC is in L0 and for nested
1372 	 * invalidate at L0 we always do kvm_flush_lpid() which does
1373 	 * radix__flush_all_lpid(). For range invalidate at any level, we
1374 	 * are not removing the higher level page tables and hence there is
1375 	 * no PWC invalidate needed.
1376 	 *
1377 	 * if (type & H_RPTI_TYPE_PWC) {
1378 	 *	ret = do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_PWC);
1379 	 *	if (ret)
1380 	 *		return H_P4;
1381 	 * }
1382 	 */
1383 
1384 	if (start == 0 && end == -1)
1385 		return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_TLB);
1386 
1387 	if (type & H_RPTI_TYPE_TLB)
1388 		return do_tlb_invalidate_nested_tlb(vcpu, lpid, pg_sizes,
1389 						    start, end);
1390 	return H_SUCCESS;
1391 }
1392 
1393 /* Used to convert a nested guest real address to a L1 guest real address */
kvmhv_translate_addr_nested(struct kvm_vcpu * vcpu,struct kvm_nested_guest * gp,unsigned long n_gpa,unsigned long dsisr,struct kvmppc_pte * gpte_p)1394 static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
1395 				       struct kvm_nested_guest *gp,
1396 				       unsigned long n_gpa, unsigned long dsisr,
1397 				       struct kvmppc_pte *gpte_p)
1398 {
1399 	u64 fault_addr, flags = dsisr & DSISR_ISSTORE;
1400 	int ret;
1401 
1402 	ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr,
1403 					 &fault_addr);
1404 
1405 	if (ret) {
1406 		/* We didn't find a pte */
1407 		if (ret == -EINVAL) {
1408 			/* Unsupported mmu config */
1409 			flags |= DSISR_UNSUPP_MMU;
1410 		} else if (ret == -ENOENT) {
1411 			/* No translation found */
1412 			flags |= DSISR_NOHPTE;
1413 		} else if (ret == -EFAULT) {
1414 			/* Couldn't access L1 real address */
1415 			flags |= DSISR_PRTABLE_FAULT;
1416 			vcpu->arch.fault_gpa = fault_addr;
1417 		} else {
1418 			/* Unknown error */
1419 			return ret;
1420 		}
1421 		goto forward_to_l1;
1422 	} else {
1423 		/* We found a pte -> check permissions */
1424 		if (dsisr & DSISR_ISSTORE) {
1425 			/* Can we write? */
1426 			if (!gpte_p->may_write) {
1427 				flags |= DSISR_PROTFAULT;
1428 				goto forward_to_l1;
1429 			}
1430 		} else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1431 			/* Can we execute? */
1432 			if (!gpte_p->may_execute) {
1433 				flags |= SRR1_ISI_N_G_OR_CIP;
1434 				goto forward_to_l1;
1435 			}
1436 		} else {
1437 			/* Can we read? */
1438 			if (!gpte_p->may_read && !gpte_p->may_write) {
1439 				flags |= DSISR_PROTFAULT;
1440 				goto forward_to_l1;
1441 			}
1442 		}
1443 	}
1444 
1445 	return 0;
1446 
1447 forward_to_l1:
1448 	vcpu->arch.fault_dsisr = flags;
1449 	if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1450 		vcpu->arch.shregs.msr &= SRR1_MSR_BITS;
1451 		vcpu->arch.shregs.msr |= flags;
1452 	}
1453 	return RESUME_HOST;
1454 }
1455 
kvmhv_handle_nested_set_rc(struct kvm_vcpu * vcpu,struct kvm_nested_guest * gp,unsigned long n_gpa,struct kvmppc_pte gpte,unsigned long dsisr)1456 static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
1457 				       struct kvm_nested_guest *gp,
1458 				       unsigned long n_gpa,
1459 				       struct kvmppc_pte gpte,
1460 				       unsigned long dsisr)
1461 {
1462 	struct kvm *kvm = vcpu->kvm;
1463 	bool writing = !!(dsisr & DSISR_ISSTORE);
1464 	u64 pgflags;
1465 	long ret;
1466 
1467 	/* Are the rc bits set in the L1 partition scoped pte? */
1468 	pgflags = _PAGE_ACCESSED;
1469 	if (writing)
1470 		pgflags |= _PAGE_DIRTY;
1471 	if (pgflags & ~gpte.rc)
1472 		return RESUME_HOST;
1473 
1474 	spin_lock(&kvm->mmu_lock);
1475 	/* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
1476 	ret = kvmppc_hv_handle_set_rc(kvm, false, writing,
1477 				      gpte.raddr, kvm->arch.lpid);
1478 	if (!ret) {
1479 		ret = -EINVAL;
1480 		goto out_unlock;
1481 	}
1482 
1483 	/* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
1484 	ret = kvmppc_hv_handle_set_rc(kvm, true, writing,
1485 				      n_gpa, gp->l1_lpid);
1486 	if (!ret)
1487 		ret = -EINVAL;
1488 	else
1489 		ret = 0;
1490 
1491 out_unlock:
1492 	spin_unlock(&kvm->mmu_lock);
1493 	return ret;
1494 }
1495 
kvmppc_radix_level_to_shift(int level)1496 static inline int kvmppc_radix_level_to_shift(int level)
1497 {
1498 	switch (level) {
1499 	case 2:
1500 		return PUD_SHIFT;
1501 	case 1:
1502 		return PMD_SHIFT;
1503 	default:
1504 		return PAGE_SHIFT;
1505 	}
1506 }
1507 
kvmppc_radix_shift_to_level(int shift)1508 static inline int kvmppc_radix_shift_to_level(int shift)
1509 {
1510 	if (shift == PUD_SHIFT)
1511 		return 2;
1512 	if (shift == PMD_SHIFT)
1513 		return 1;
1514 	if (shift == PAGE_SHIFT)
1515 		return 0;
1516 	WARN_ON_ONCE(1);
1517 	return 0;
1518 }
1519 
1520 /* called with gp->tlb_lock held */
__kvmhv_nested_page_fault(struct kvm_vcpu * vcpu,struct kvm_nested_guest * gp)1521 static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
1522 					  struct kvm_nested_guest *gp)
1523 {
1524 	struct kvm *kvm = vcpu->kvm;
1525 	struct kvm_memory_slot *memslot;
1526 	struct rmap_nested *n_rmap;
1527 	struct kvmppc_pte gpte;
1528 	pte_t pte, *pte_p;
1529 	unsigned long mmu_seq;
1530 	unsigned long dsisr = vcpu->arch.fault_dsisr;
1531 	unsigned long ea = vcpu->arch.fault_dar;
1532 	unsigned long *rmapp;
1533 	unsigned long n_gpa, gpa, gfn, perm = 0UL;
1534 	unsigned int shift, l1_shift, level;
1535 	bool writing = !!(dsisr & DSISR_ISSTORE);
1536 	bool kvm_ro = false;
1537 	long int ret;
1538 
1539 	if (!gp->l1_gr_to_hr) {
1540 		kvmhv_update_ptbl_cache(gp);
1541 		if (!gp->l1_gr_to_hr)
1542 			return RESUME_HOST;
1543 	}
1544 
1545 	/* Convert the nested guest real address into a L1 guest real address */
1546 
1547 	n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL;
1548 	if (!(dsisr & DSISR_PRTABLE_FAULT))
1549 		n_gpa |= ea & 0xFFF;
1550 	ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte);
1551 
1552 	/*
1553 	 * If the hardware found a translation but we don't now have a usable
1554 	 * translation in the l1 partition-scoped tree, remove the shadow pte
1555 	 * and let the guest retry.
1556 	 */
1557 	if (ret == RESUME_HOST &&
1558 	    (dsisr & (DSISR_PROTFAULT | DSISR_BADACCESS | DSISR_NOEXEC_OR_G |
1559 		      DSISR_BAD_COPYPASTE)))
1560 		goto inval;
1561 	if (ret)
1562 		return ret;
1563 
1564 	/* Failed to set the reference/change bits */
1565 	if (dsisr & DSISR_SET_RC) {
1566 		ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr);
1567 		if (ret == RESUME_HOST)
1568 			return ret;
1569 		if (ret)
1570 			goto inval;
1571 		dsisr &= ~DSISR_SET_RC;
1572 		if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
1573 			       DSISR_PROTFAULT)))
1574 			return RESUME_GUEST;
1575 	}
1576 
1577 	/*
1578 	 * We took an HISI or HDSI while we were running a nested guest which
1579 	 * means we have no partition scoped translation for that. This means
1580 	 * we need to insert a pte for the mapping into our shadow_pgtable.
1581 	 */
1582 
1583 	l1_shift = gpte.page_shift;
1584 	if (l1_shift < PAGE_SHIFT) {
1585 		/* We don't support l1 using a page size smaller than our own */
1586 		pr_err("KVM: L1 guest page shift (%d) less than our own (%d)\n",
1587 			l1_shift, PAGE_SHIFT);
1588 		return -EINVAL;
1589 	}
1590 	gpa = gpte.raddr;
1591 	gfn = gpa >> PAGE_SHIFT;
1592 
1593 	/* 1. Get the corresponding host memslot */
1594 
1595 	memslot = gfn_to_memslot(kvm, gfn);
1596 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
1597 		if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) {
1598 			/* unusual error -> reflect to the guest as a DSI */
1599 			kvmppc_core_queue_data_storage(vcpu,
1600 					kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
1601 					ea, dsisr);
1602 			return RESUME_GUEST;
1603 		}
1604 
1605 		/* passthrough of emulated MMIO case */
1606 		return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
1607 	}
1608 	if (memslot->flags & KVM_MEM_READONLY) {
1609 		if (writing) {
1610 			/* Give the guest a DSI */
1611 			kvmppc_core_queue_data_storage(vcpu,
1612 					kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
1613 					ea, DSISR_ISSTORE | DSISR_PROTFAULT);
1614 			return RESUME_GUEST;
1615 		}
1616 		kvm_ro = true;
1617 	}
1618 
1619 	/* 2. Find the host pte for this L1 guest real address */
1620 
1621 	/* Used to check for invalidations in progress */
1622 	mmu_seq = kvm->mmu_invalidate_seq;
1623 	smp_rmb();
1624 
1625 	/* See if can find translation in our partition scoped tables for L1 */
1626 	pte = __pte(0);
1627 	spin_lock(&kvm->mmu_lock);
1628 	pte_p = find_kvm_secondary_pte(kvm, gpa, &shift);
1629 	if (!shift)
1630 		shift = PAGE_SHIFT;
1631 	if (pte_p)
1632 		pte = *pte_p;
1633 	spin_unlock(&kvm->mmu_lock);
1634 
1635 	if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) {
1636 		/* No suitable pte found -> try to insert a mapping */
1637 		ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot,
1638 					writing, kvm_ro, &pte, &level);
1639 		if (ret == -EAGAIN)
1640 			return RESUME_GUEST;
1641 		else if (ret)
1642 			return ret;
1643 		shift = kvmppc_radix_level_to_shift(level);
1644 	}
1645 	/* Align gfn to the start of the page */
1646 	gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT;
1647 
1648 	/* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */
1649 
1650 	/* The permissions is the combination of the host and l1 guest ptes */
1651 	perm |= gpte.may_read ? 0UL : _PAGE_READ;
1652 	perm |= gpte.may_write ? 0UL : _PAGE_WRITE;
1653 	perm |= gpte.may_execute ? 0UL : _PAGE_EXEC;
1654 	/* Only set accessed/dirty (rc) bits if set in host and l1 guest ptes */
1655 	perm |= (gpte.rc & _PAGE_ACCESSED) ? 0UL : _PAGE_ACCESSED;
1656 	perm |= ((gpte.rc & _PAGE_DIRTY) && writing) ? 0UL : _PAGE_DIRTY;
1657 	pte = __pte(pte_val(pte) & ~perm);
1658 
1659 	/* What size pte can we insert? */
1660 	if (shift > l1_shift) {
1661 		u64 mask;
1662 		unsigned int actual_shift = PAGE_SHIFT;
1663 		if (PMD_SHIFT < l1_shift)
1664 			actual_shift = PMD_SHIFT;
1665 		mask = (1UL << shift) - (1UL << actual_shift);
1666 		pte = __pte(pte_val(pte) | (gpa & mask));
1667 		shift = actual_shift;
1668 	}
1669 	level = kvmppc_radix_shift_to_level(shift);
1670 	n_gpa &= ~((1UL << shift) - 1);
1671 
1672 	/* 4. Insert the pte into our shadow_pgtable */
1673 
1674 	n_rmap = kzalloc(sizeof(*n_rmap), GFP_KERNEL);
1675 	if (!n_rmap)
1676 		return RESUME_GUEST; /* Let the guest try again */
1677 	n_rmap->rmap = (n_gpa & RMAP_NESTED_GPA_MASK) |
1678 		(((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT);
1679 	rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1680 	ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
1681 				mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
1682 	kfree(n_rmap);
1683 	if (ret == -EAGAIN)
1684 		ret = RESUME_GUEST;	/* Let the guest try again */
1685 
1686 	return ret;
1687 
1688  inval:
1689 	kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL);
1690 	return RESUME_GUEST;
1691 }
1692 
kvmhv_nested_page_fault(struct kvm_vcpu * vcpu)1693 long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
1694 {
1695 	struct kvm_nested_guest *gp = vcpu->arch.nested;
1696 	long int ret;
1697 
1698 	mutex_lock(&gp->tlb_lock);
1699 	ret = __kvmhv_nested_page_fault(vcpu, gp);
1700 	mutex_unlock(&gp->tlb_lock);
1701 	return ret;
1702 }
1703 
kvmhv_nested_next_lpid(struct kvm * kvm,int lpid)1704 int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
1705 {
1706 	int ret = lpid + 1;
1707 
1708 	spin_lock(&kvm->mmu_lock);
1709 	if (!idr_get_next(&kvm->arch.kvm_nested_guest_idr, &ret))
1710 		ret = -1;
1711 	spin_unlock(&kvm->mmu_lock);
1712 
1713 	return ret;
1714 }
1715