• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables machines with Intel VT-x extensions to run virtual
6  * machines without emulation or binary translation.
7  *
8  * MMU support
9  *
10  * Copyright (C) 2006 Qumranet, Inc.
11  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12  *
13  * Authors:
14  *   Yaniv Kamay  <yaniv@qumranet.com>
15  *   Avi Kivity   <avi@qumranet.com>
16  */
17 
18 #include "irq.h"
19 #include "ioapic.h"
20 #include "mmu.h"
21 #include "mmu_internal.h"
22 #include "tdp_mmu.h"
23 #include "x86.h"
24 #include "kvm_cache_regs.h"
25 #include "kvm_emulate.h"
26 #include "cpuid.h"
27 #include "spte.h"
28 
29 #include <linux/kvm_host.h>
30 #include <linux/types.h>
31 #include <linux/string.h>
32 #include <linux/mm.h>
33 #include <linux/highmem.h>
34 #include <linux/moduleparam.h>
35 #include <linux/export.h>
36 #include <linux/swap.h>
37 #include <linux/hugetlb.h>
38 #include <linux/compiler.h>
39 #include <linux/srcu.h>
40 #include <linux/slab.h>
41 #include <linux/sched/signal.h>
42 #include <linux/uaccess.h>
43 #include <linux/hash.h>
44 #include <linux/kern_levels.h>
45 #include <linux/kthread.h>
46 
47 #include <asm/page.h>
48 #include <asm/memtype.h>
49 #include <asm/cmpxchg.h>
50 #include <asm/io.h>
51 #include <asm/vmx.h>
52 #include <asm/kvm_page_track.h>
53 #include "trace.h"
54 
55 #include "paging.h"
56 
57 extern bool itlb_multihit_kvm_mitigation;
58 
59 static int __read_mostly nx_huge_pages = -1;
60 #ifdef CONFIG_PREEMPT_RT
61 /* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
62 static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
63 #else
64 static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
65 #endif
66 
67 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
68 static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
69 
70 static const struct kernel_param_ops nx_huge_pages_ops = {
71 	.set = set_nx_huge_pages,
72 	.get = param_get_bool,
73 };
74 
75 static const struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
76 	.set = set_nx_huge_pages_recovery_ratio,
77 	.get = param_get_uint,
78 };
79 
80 module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
81 __MODULE_PARM_TYPE(nx_huge_pages, "bool");
82 module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
83 		&nx_huge_pages_recovery_ratio, 0644);
84 __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
85 
86 static bool __read_mostly force_flush_and_sync_on_reuse;
87 module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
88 
89 /*
90  * When setting this variable to true it enables Two-Dimensional-Paging
91  * where the hardware walks 2 page tables:
92  * 1. the guest-virtual to guest-physical
93  * 2. while doing 1. it walks guest-physical to host-physical
94  * If the hardware supports that we don't need to do shadow paging.
95  */
96 bool tdp_enabled = false;
97 
98 static int max_huge_page_level __read_mostly;
99 static int max_tdp_level __read_mostly;
100 
101 enum {
102 	AUDIT_PRE_PAGE_FAULT,
103 	AUDIT_POST_PAGE_FAULT,
104 	AUDIT_PRE_PTE_WRITE,
105 	AUDIT_POST_PTE_WRITE,
106 	AUDIT_PRE_SYNC,
107 	AUDIT_POST_SYNC
108 };
109 
110 #ifdef MMU_DEBUG
111 bool dbg = 0;
112 module_param(dbg, bool, 0644);
113 #endif
114 
115 #define PTE_PREFETCH_NUM		8
116 
117 #define PT32_LEVEL_BITS 10
118 
119 #define PT32_LEVEL_SHIFT(level) \
120 		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
121 
122 #define PT32_LVL_OFFSET_MASK(level) \
123 	(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
124 						* PT32_LEVEL_BITS))) - 1))
125 
126 #define PT32_INDEX(address, level)\
127 	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
128 
129 
130 #define PT32_BASE_ADDR_MASK PAGE_MASK
131 #define PT32_DIR_BASE_ADDR_MASK \
132 	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
133 #define PT32_LVL_ADDR_MASK(level) \
134 	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
135 					    * PT32_LEVEL_BITS))) - 1))
136 
137 #include <trace/events/kvm.h>
138 
139 /* make pte_list_desc fit well in cache line */
140 #define PTE_LIST_EXT 3
141 
142 struct pte_list_desc {
143 	u64 *sptes[PTE_LIST_EXT];
144 	struct pte_list_desc *more;
145 };
146 
147 struct kvm_shadow_walk_iterator {
148 	u64 addr;
149 	hpa_t shadow_addr;
150 	u64 *sptep;
151 	int level;
152 	unsigned index;
153 };
154 
155 #define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
156 	for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
157 					 (_root), (_addr));                \
158 	     shadow_walk_okay(&(_walker));			           \
159 	     shadow_walk_next(&(_walker)))
160 
161 #define for_each_shadow_entry(_vcpu, _addr, _walker)            \
162 	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
163 	     shadow_walk_okay(&(_walker));			\
164 	     shadow_walk_next(&(_walker)))
165 
166 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
167 	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
168 	     shadow_walk_okay(&(_walker)) &&				\
169 		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
170 	     __shadow_walk_next(&(_walker), spte))
171 
172 static struct kmem_cache *pte_list_desc_cache;
173 struct kmem_cache *mmu_page_header_cache;
174 static struct percpu_counter kvm_total_used_mmu_pages;
175 
176 static void mmu_spte_set(u64 *sptep, u64 spte);
177 static union kvm_mmu_page_role
178 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
179 
180 #define CREATE_TRACE_POINTS
181 #include "mmutrace.h"
182 
183 
kvm_available_flush_tlb_with_range(void)184 static inline bool kvm_available_flush_tlb_with_range(void)
185 {
186 	return kvm_x86_ops.tlb_remote_flush_with_range;
187 }
188 
kvm_flush_remote_tlbs_with_range(struct kvm * kvm,struct kvm_tlb_range * range)189 static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
190 		struct kvm_tlb_range *range)
191 {
192 	int ret = -ENOTSUPP;
193 
194 	if (range && kvm_x86_ops.tlb_remote_flush_with_range)
195 		ret = kvm_x86_ops.tlb_remote_flush_with_range(kvm, range);
196 
197 	if (ret)
198 		kvm_flush_remote_tlbs(kvm);
199 }
200 
kvm_flush_remote_tlbs_with_address(struct kvm * kvm,u64 start_gfn,u64 pages)201 void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
202 		u64 start_gfn, u64 pages)
203 {
204 	struct kvm_tlb_range range;
205 
206 	range.start_gfn = start_gfn;
207 	range.pages = pages;
208 
209 	kvm_flush_remote_tlbs_with_range(kvm, &range);
210 }
211 
is_nx_huge_page_enabled(void)212 bool is_nx_huge_page_enabled(void)
213 {
214 	return READ_ONCE(nx_huge_pages);
215 }
216 
mark_mmio_spte(struct kvm_vcpu * vcpu,u64 * sptep,u64 gfn,unsigned int access)217 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
218 			   unsigned int access)
219 {
220 	u64 mask = make_mmio_spte(vcpu, gfn, access);
221 
222 	trace_mark_mmio_spte(sptep, gfn, mask);
223 	mmu_spte_set(sptep, mask);
224 }
225 
get_mmio_spte_gfn(u64 spte)226 static gfn_t get_mmio_spte_gfn(u64 spte)
227 {
228 	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
229 
230 	gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
231 	       & shadow_nonpresent_or_rsvd_mask;
232 
233 	return gpa >> PAGE_SHIFT;
234 }
235 
get_mmio_spte_access(u64 spte)236 static unsigned get_mmio_spte_access(u64 spte)
237 {
238 	return spte & shadow_mmio_access_mask;
239 }
240 
set_mmio_spte(struct kvm_vcpu * vcpu,u64 * sptep,gfn_t gfn,kvm_pfn_t pfn,unsigned int access)241 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
242 			  kvm_pfn_t pfn, unsigned int access)
243 {
244 	if (unlikely(is_noslot_pfn(pfn))) {
245 		mark_mmio_spte(vcpu, sptep, gfn, access);
246 		return true;
247 	}
248 
249 	return false;
250 }
251 
check_mmio_spte(struct kvm_vcpu * vcpu,u64 spte)252 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
253 {
254 	u64 kvm_gen, spte_gen, gen;
255 
256 	gen = kvm_vcpu_memslots(vcpu)->generation;
257 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
258 		return false;
259 
260 	kvm_gen = gen & MMIO_SPTE_GEN_MASK;
261 	spte_gen = get_mmio_spte_generation(spte);
262 
263 	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
264 	return likely(kvm_gen == spte_gen);
265 }
266 
translate_gpa(struct kvm_vcpu * vcpu,gpa_t gpa,u32 access,struct x86_exception * exception)267 static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
268                                   struct x86_exception *exception)
269 {
270         return gpa;
271 }
272 
is_cpuid_PSE36(void)273 static int is_cpuid_PSE36(void)
274 {
275 	return 1;
276 }
277 
is_nx(struct kvm_vcpu * vcpu)278 static int is_nx(struct kvm_vcpu *vcpu)
279 {
280 	return vcpu->arch.efer & EFER_NX;
281 }
282 
pse36_gfn_delta(u32 gpte)283 static gfn_t pse36_gfn_delta(u32 gpte)
284 {
285 	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
286 
287 	return (gpte & PT32_DIR_PSE36_MASK) << shift;
288 }
289 
290 #ifdef CONFIG_X86_64
__set_spte(u64 * sptep,u64 spte)291 static void __set_spte(u64 *sptep, u64 spte)
292 {
293 	WRITE_ONCE(*sptep, spte);
294 }
295 
__update_clear_spte_fast(u64 * sptep,u64 spte)296 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
297 {
298 	WRITE_ONCE(*sptep, spte);
299 }
300 
__update_clear_spte_slow(u64 * sptep,u64 spte)301 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
302 {
303 	return xchg(sptep, spte);
304 }
305 
__get_spte_lockless(u64 * sptep)306 static u64 __get_spte_lockless(u64 *sptep)
307 {
308 	return READ_ONCE(*sptep);
309 }
310 #else
311 union split_spte {
312 	struct {
313 		u32 spte_low;
314 		u32 spte_high;
315 	};
316 	u64 spte;
317 };
318 
count_spte_clear(u64 * sptep,u64 spte)319 static void count_spte_clear(u64 *sptep, u64 spte)
320 {
321 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
322 
323 	if (is_shadow_present_pte(spte))
324 		return;
325 
326 	/* Ensure the spte is completely set before we increase the count */
327 	smp_wmb();
328 	sp->clear_spte_count++;
329 }
330 
__set_spte(u64 * sptep,u64 spte)331 static void __set_spte(u64 *sptep, u64 spte)
332 {
333 	union split_spte *ssptep, sspte;
334 
335 	ssptep = (union split_spte *)sptep;
336 	sspte = (union split_spte)spte;
337 
338 	ssptep->spte_high = sspte.spte_high;
339 
340 	/*
341 	 * If we map the spte from nonpresent to present, We should store
342 	 * the high bits firstly, then set present bit, so cpu can not
343 	 * fetch this spte while we are setting the spte.
344 	 */
345 	smp_wmb();
346 
347 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
348 }
349 
__update_clear_spte_fast(u64 * sptep,u64 spte)350 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
351 {
352 	union split_spte *ssptep, sspte;
353 
354 	ssptep = (union split_spte *)sptep;
355 	sspte = (union split_spte)spte;
356 
357 	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
358 
359 	/*
360 	 * If we map the spte from present to nonpresent, we should clear
361 	 * present bit firstly to avoid vcpu fetch the old high bits.
362 	 */
363 	smp_wmb();
364 
365 	ssptep->spte_high = sspte.spte_high;
366 	count_spte_clear(sptep, spte);
367 }
368 
__update_clear_spte_slow(u64 * sptep,u64 spte)369 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
370 {
371 	union split_spte *ssptep, sspte, orig;
372 
373 	ssptep = (union split_spte *)sptep;
374 	sspte = (union split_spte)spte;
375 
376 	/* xchg acts as a barrier before the setting of the high bits */
377 	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
378 	orig.spte_high = ssptep->spte_high;
379 	ssptep->spte_high = sspte.spte_high;
380 	count_spte_clear(sptep, spte);
381 
382 	return orig.spte;
383 }
384 
385 /*
386  * The idea using the light way get the spte on x86_32 guest is from
387  * gup_get_pte (mm/gup.c).
388  *
389  * An spte tlb flush may be pending, because kvm_set_pte_rmapp
390  * coalesces them and we are running out of the MMU lock.  Therefore
391  * we need to protect against in-progress updates of the spte.
392  *
393  * Reading the spte while an update is in progress may get the old value
394  * for the high part of the spte.  The race is fine for a present->non-present
395  * change (because the high part of the spte is ignored for non-present spte),
396  * but for a present->present change we must reread the spte.
397  *
398  * All such changes are done in two steps (present->non-present and
399  * non-present->present), hence it is enough to count the number of
400  * present->non-present updates: if it changed while reading the spte,
401  * we might have hit the race.  This is done using clear_spte_count.
402  */
__get_spte_lockless(u64 * sptep)403 static u64 __get_spte_lockless(u64 *sptep)
404 {
405 	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
406 	union split_spte spte, *orig = (union split_spte *)sptep;
407 	int count;
408 
409 retry:
410 	count = sp->clear_spte_count;
411 	smp_rmb();
412 
413 	spte.spte_low = orig->spte_low;
414 	smp_rmb();
415 
416 	spte.spte_high = orig->spte_high;
417 	smp_rmb();
418 
419 	if (unlikely(spte.spte_low != orig->spte_low ||
420 	      count != sp->clear_spte_count))
421 		goto retry;
422 
423 	return spte.spte;
424 }
425 #endif
426 
spte_has_volatile_bits(u64 spte)427 static bool spte_has_volatile_bits(u64 spte)
428 {
429 	if (!is_shadow_present_pte(spte))
430 		return false;
431 
432 	/*
433 	 * Always atomically update spte if it can be updated
434 	 * out of mmu-lock, it can ensure dirty bit is not lost,
435 	 * also, it can help us to get a stable is_writable_pte()
436 	 * to ensure tlb flush is not missed.
437 	 */
438 	if (spte_can_locklessly_be_made_writable(spte) ||
439 	    is_access_track_spte(spte))
440 		return true;
441 
442 	if (spte_ad_enabled(spte)) {
443 		if ((spte & shadow_accessed_mask) == 0 ||
444 	    	    (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
445 			return true;
446 	}
447 
448 	return false;
449 }
450 
451 /* Rules for using mmu_spte_set:
452  * Set the sptep from nonpresent to present.
453  * Note: the sptep being assigned *must* be either not present
454  * or in a state where the hardware will not attempt to update
455  * the spte.
456  */
mmu_spte_set(u64 * sptep,u64 new_spte)457 static void mmu_spte_set(u64 *sptep, u64 new_spte)
458 {
459 	WARN_ON(is_shadow_present_pte(*sptep));
460 	__set_spte(sptep, new_spte);
461 }
462 
463 /*
464  * Update the SPTE (excluding the PFN), but do not track changes in its
465  * accessed/dirty status.
466  */
mmu_spte_update_no_track(u64 * sptep,u64 new_spte)467 static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
468 {
469 	u64 old_spte = *sptep;
470 
471 	WARN_ON(!is_shadow_present_pte(new_spte));
472 
473 	if (!is_shadow_present_pte(old_spte)) {
474 		mmu_spte_set(sptep, new_spte);
475 		return old_spte;
476 	}
477 
478 	if (!spte_has_volatile_bits(old_spte))
479 		__update_clear_spte_fast(sptep, new_spte);
480 	else
481 		old_spte = __update_clear_spte_slow(sptep, new_spte);
482 
483 	WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
484 
485 	return old_spte;
486 }
487 
488 /* Rules for using mmu_spte_update:
489  * Update the state bits, it means the mapped pfn is not changed.
490  *
491  * Whenever we overwrite a writable spte with a read-only one we
492  * should flush remote TLBs. Otherwise rmap_write_protect
493  * will find a read-only spte, even though the writable spte
494  * might be cached on a CPU's TLB, the return value indicates this
495  * case.
496  *
497  * Returns true if the TLB needs to be flushed
498  */
mmu_spte_update(u64 * sptep,u64 new_spte)499 static bool mmu_spte_update(u64 *sptep, u64 new_spte)
500 {
501 	bool flush = false;
502 	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
503 
504 	if (!is_shadow_present_pte(old_spte))
505 		return false;
506 
507 	/*
508 	 * For the spte updated out of mmu-lock is safe, since
509 	 * we always atomically update it, see the comments in
510 	 * spte_has_volatile_bits().
511 	 */
512 	if (spte_can_locklessly_be_made_writable(old_spte) &&
513 	      !is_writable_pte(new_spte))
514 		flush = true;
515 
516 	/*
517 	 * Flush TLB when accessed/dirty states are changed in the page tables,
518 	 * to guarantee consistency between TLB and page tables.
519 	 */
520 
521 	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
522 		flush = true;
523 		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
524 	}
525 
526 	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
527 		flush = true;
528 		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
529 	}
530 
531 	return flush;
532 }
533 
534 /*
535  * Rules for using mmu_spte_clear_track_bits:
536  * It sets the sptep from present to nonpresent, and track the
537  * state bits, it is used to clear the last level sptep.
538  * Returns non-zero if the PTE was previously valid.
539  */
mmu_spte_clear_track_bits(u64 * sptep)540 static int mmu_spte_clear_track_bits(u64 *sptep)
541 {
542 	kvm_pfn_t pfn;
543 	u64 old_spte = *sptep;
544 
545 	if (!spte_has_volatile_bits(old_spte))
546 		__update_clear_spte_fast(sptep, 0ull);
547 	else
548 		old_spte = __update_clear_spte_slow(sptep, 0ull);
549 
550 	if (!is_shadow_present_pte(old_spte))
551 		return 0;
552 
553 	pfn = spte_to_pfn(old_spte);
554 
555 	/*
556 	 * KVM does not hold the refcount of the page used by
557 	 * kvm mmu, before reclaiming the page, we should
558 	 * unmap it from mmu first.
559 	 */
560 	WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
561 
562 	if (is_accessed_spte(old_spte))
563 		kvm_set_pfn_accessed(pfn);
564 
565 	if (is_dirty_spte(old_spte))
566 		kvm_set_pfn_dirty(pfn);
567 
568 	return 1;
569 }
570 
571 /*
572  * Rules for using mmu_spte_clear_no_track:
573  * Directly clear spte without caring the state bits of sptep,
574  * it is used to set the upper level spte.
575  */
mmu_spte_clear_no_track(u64 * sptep)576 static void mmu_spte_clear_no_track(u64 *sptep)
577 {
578 	__update_clear_spte_fast(sptep, 0ull);
579 }
580 
mmu_spte_get_lockless(u64 * sptep)581 static u64 mmu_spte_get_lockless(u64 *sptep)
582 {
583 	return __get_spte_lockless(sptep);
584 }
585 
586 /* Restore an acc-track PTE back to a regular PTE */
restore_acc_track_spte(u64 spte)587 static u64 restore_acc_track_spte(u64 spte)
588 {
589 	u64 new_spte = spte;
590 	u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
591 			 & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
592 
593 	WARN_ON_ONCE(spte_ad_enabled(spte));
594 	WARN_ON_ONCE(!is_access_track_spte(spte));
595 
596 	new_spte &= ~shadow_acc_track_mask;
597 	new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
598 		      SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
599 	new_spte |= saved_bits;
600 
601 	return new_spte;
602 }
603 
604 /* Returns the Accessed status of the PTE and resets it at the same time. */
mmu_spte_age(u64 * sptep)605 static bool mmu_spte_age(u64 *sptep)
606 {
607 	u64 spte = mmu_spte_get_lockless(sptep);
608 
609 	if (!is_accessed_spte(spte))
610 		return false;
611 
612 	if (spte_ad_enabled(spte)) {
613 		clear_bit((ffs(shadow_accessed_mask) - 1),
614 			  (unsigned long *)sptep);
615 	} else {
616 		/*
617 		 * Capture the dirty status of the page, so that it doesn't get
618 		 * lost when the SPTE is marked for access tracking.
619 		 */
620 		if (is_writable_pte(spte))
621 			kvm_set_pfn_dirty(spte_to_pfn(spte));
622 
623 		spte = mark_spte_for_access_track(spte);
624 		mmu_spte_update_no_track(sptep, spte);
625 	}
626 
627 	return true;
628 }
629 
walk_shadow_page_lockless_begin(struct kvm_vcpu * vcpu)630 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
631 {
632 	/*
633 	 * Prevent page table teardown by making any free-er wait during
634 	 * kvm_flush_remote_tlbs() IPI to all active vcpus.
635 	 */
636 	local_irq_disable();
637 
638 	/*
639 	 * Make sure a following spte read is not reordered ahead of the write
640 	 * to vcpu->mode.
641 	 */
642 	smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
643 }
644 
walk_shadow_page_lockless_end(struct kvm_vcpu * vcpu)645 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
646 {
647 	/*
648 	 * Make sure the write to vcpu->mode is not reordered in front of
649 	 * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
650 	 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
651 	 */
652 	smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
653 	local_irq_enable();
654 }
655 
mmu_topup_memory_caches(struct kvm_vcpu * vcpu,bool maybe_indirect)656 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
657 {
658 	int r;
659 
660 	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
661 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
662 				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
663 	if (r)
664 		return r;
665 	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
666 				       PT64_ROOT_MAX_LEVEL);
667 	if (r)
668 		return r;
669 	if (maybe_indirect) {
670 		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
671 					       PT64_ROOT_MAX_LEVEL);
672 		if (r)
673 			return r;
674 	}
675 	return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
676 					  PT64_ROOT_MAX_LEVEL);
677 }
678 
mmu_free_memory_caches(struct kvm_vcpu * vcpu)679 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
680 {
681 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
682 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
683 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
684 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
685 }
686 
mmu_alloc_pte_list_desc(struct kvm_vcpu * vcpu)687 static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
688 {
689 	return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
690 }
691 
mmu_free_pte_list_desc(struct pte_list_desc * pte_list_desc)692 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
693 {
694 	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
695 }
696 
kvm_mmu_page_get_gfn(struct kvm_mmu_page * sp,int index)697 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
698 {
699 	if (!sp->role.direct)
700 		return sp->gfns[index];
701 
702 	return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
703 }
704 
kvm_mmu_page_set_gfn(struct kvm_mmu_page * sp,int index,gfn_t gfn)705 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
706 {
707 	if (!sp->role.direct) {
708 		sp->gfns[index] = gfn;
709 		return;
710 	}
711 
712 	if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
713 		pr_err_ratelimited("gfn mismatch under direct page %llx "
714 				   "(expected %llx, got %llx)\n",
715 				   sp->gfn,
716 				   kvm_mmu_page_get_gfn(sp, index), gfn);
717 }
718 
719 /*
720  * Return the pointer to the large page information for a given gfn,
721  * handling slots that are not large page aligned.
722  */
lpage_info_slot(gfn_t gfn,struct kvm_memory_slot * slot,int level)723 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
724 					      struct kvm_memory_slot *slot,
725 					      int level)
726 {
727 	unsigned long idx;
728 
729 	idx = gfn_to_index(gfn, slot->base_gfn, level);
730 	return &slot->arch.lpage_info[level - 2][idx];
731 }
732 
update_gfn_disallow_lpage_count(struct kvm_memory_slot * slot,gfn_t gfn,int count)733 static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
734 					    gfn_t gfn, int count)
735 {
736 	struct kvm_lpage_info *linfo;
737 	int i;
738 
739 	for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
740 		linfo = lpage_info_slot(gfn, slot, i);
741 		linfo->disallow_lpage += count;
742 		WARN_ON(linfo->disallow_lpage < 0);
743 	}
744 }
745 
kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot * slot,gfn_t gfn)746 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
747 {
748 	update_gfn_disallow_lpage_count(slot, gfn, 1);
749 }
750 
kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot * slot,gfn_t gfn)751 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
752 {
753 	update_gfn_disallow_lpage_count(slot, gfn, -1);
754 }
755 
account_shadowed(struct kvm * kvm,struct kvm_mmu_page * sp)756 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
757 {
758 	struct kvm_memslots *slots;
759 	struct kvm_memory_slot *slot;
760 	gfn_t gfn;
761 
762 	kvm->arch.indirect_shadow_pages++;
763 	gfn = sp->gfn;
764 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
765 	slot = __gfn_to_memslot(slots, gfn);
766 
767 	/* the non-leaf shadow pages are keeping readonly. */
768 	if (sp->role.level > PG_LEVEL_4K)
769 		return kvm_slot_page_track_add_page(kvm, slot, gfn,
770 						    KVM_PAGE_TRACK_WRITE);
771 
772 	kvm_mmu_gfn_disallow_lpage(slot, gfn);
773 }
774 
account_huge_nx_page(struct kvm * kvm,struct kvm_mmu_page * sp)775 void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
776 {
777 	if (sp->lpage_disallowed)
778 		return;
779 
780 	++kvm->stat.nx_lpage_splits;
781 	list_add_tail(&sp->lpage_disallowed_link,
782 		      &kvm->arch.lpage_disallowed_mmu_pages);
783 	sp->lpage_disallowed = true;
784 }
785 
unaccount_shadowed(struct kvm * kvm,struct kvm_mmu_page * sp)786 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
787 {
788 	struct kvm_memslots *slots;
789 	struct kvm_memory_slot *slot;
790 	gfn_t gfn;
791 
792 	kvm->arch.indirect_shadow_pages--;
793 	gfn = sp->gfn;
794 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
795 	slot = __gfn_to_memslot(slots, gfn);
796 	if (sp->role.level > PG_LEVEL_4K)
797 		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
798 						       KVM_PAGE_TRACK_WRITE);
799 
800 	kvm_mmu_gfn_allow_lpage(slot, gfn);
801 }
802 
unaccount_huge_nx_page(struct kvm * kvm,struct kvm_mmu_page * sp)803 void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
804 {
805 	--kvm->stat.nx_lpage_splits;
806 	sp->lpage_disallowed = false;
807 	list_del(&sp->lpage_disallowed_link);
808 }
809 
810 static struct kvm_memory_slot *
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu * vcpu,gfn_t gfn,bool no_dirty_log)811 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
812 			    bool no_dirty_log)
813 {
814 	struct kvm_memory_slot *slot;
815 
816 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
817 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
818 		return NULL;
819 	if (no_dirty_log && slot->dirty_bitmap)
820 		return NULL;
821 
822 	return slot;
823 }
824 
825 /*
826  * About rmap_head encoding:
827  *
828  * If the bit zero of rmap_head->val is clear, then it points to the only spte
829  * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
830  * pte_list_desc containing more mappings.
831  */
832 
833 /*
834  * Returns the number of pointers in the rmap chain, not counting the new one.
835  */
pte_list_add(struct kvm_vcpu * vcpu,u64 * spte,struct kvm_rmap_head * rmap_head)836 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
837 			struct kvm_rmap_head *rmap_head)
838 {
839 	struct pte_list_desc *desc;
840 	int i, count = 0;
841 
842 	if (!rmap_head->val) {
843 		rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
844 		rmap_head->val = (unsigned long)spte;
845 	} else if (!(rmap_head->val & 1)) {
846 		rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
847 		desc = mmu_alloc_pte_list_desc(vcpu);
848 		desc->sptes[0] = (u64 *)rmap_head->val;
849 		desc->sptes[1] = spte;
850 		rmap_head->val = (unsigned long)desc | 1;
851 		++count;
852 	} else {
853 		rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
854 		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
855 		while (desc->sptes[PTE_LIST_EXT-1]) {
856 			count += PTE_LIST_EXT;
857 
858 			if (!desc->more) {
859 				desc->more = mmu_alloc_pte_list_desc(vcpu);
860 				desc = desc->more;
861 				break;
862 			}
863 			desc = desc->more;
864 		}
865 		for (i = 0; desc->sptes[i]; ++i)
866 			++count;
867 		desc->sptes[i] = spte;
868 	}
869 	return count;
870 }
871 
872 static void
pte_list_desc_remove_entry(struct kvm_rmap_head * rmap_head,struct pte_list_desc * desc,int i,struct pte_list_desc * prev_desc)873 pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
874 			   struct pte_list_desc *desc, int i,
875 			   struct pte_list_desc *prev_desc)
876 {
877 	int j;
878 
879 	for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
880 		;
881 	desc->sptes[i] = desc->sptes[j];
882 	desc->sptes[j] = NULL;
883 	if (j != 0)
884 		return;
885 	if (!prev_desc && !desc->more)
886 		rmap_head->val = 0;
887 	else
888 		if (prev_desc)
889 			prev_desc->more = desc->more;
890 		else
891 			rmap_head->val = (unsigned long)desc->more | 1;
892 	mmu_free_pte_list_desc(desc);
893 }
894 
__pte_list_remove(u64 * spte,struct kvm_rmap_head * rmap_head)895 static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
896 {
897 	struct pte_list_desc *desc;
898 	struct pte_list_desc *prev_desc;
899 	int i;
900 
901 	if (!rmap_head->val) {
902 		pr_err("%s: %p 0->BUG\n", __func__, spte);
903 		BUG();
904 	} else if (!(rmap_head->val & 1)) {
905 		rmap_printk("%s:  %p 1->0\n", __func__, spte);
906 		if ((u64 *)rmap_head->val != spte) {
907 			pr_err("%s:  %p 1->BUG\n", __func__, spte);
908 			BUG();
909 		}
910 		rmap_head->val = 0;
911 	} else {
912 		rmap_printk("%s:  %p many->many\n", __func__, spte);
913 		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
914 		prev_desc = NULL;
915 		while (desc) {
916 			for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
917 				if (desc->sptes[i] == spte) {
918 					pte_list_desc_remove_entry(rmap_head,
919 							desc, i, prev_desc);
920 					return;
921 				}
922 			}
923 			prev_desc = desc;
924 			desc = desc->more;
925 		}
926 		pr_err("%s: %p many->many\n", __func__, spte);
927 		BUG();
928 	}
929 }
930 
pte_list_remove(struct kvm_rmap_head * rmap_head,u64 * sptep)931 static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
932 {
933 	mmu_spte_clear_track_bits(sptep);
934 	__pte_list_remove(sptep, rmap_head);
935 }
936 
__gfn_to_rmap(gfn_t gfn,int level,struct kvm_memory_slot * slot)937 static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
938 					   struct kvm_memory_slot *slot)
939 {
940 	unsigned long idx;
941 
942 	idx = gfn_to_index(gfn, slot->base_gfn, level);
943 	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
944 }
945 
gfn_to_rmap(struct kvm * kvm,gfn_t gfn,struct kvm_mmu_page * sp)946 static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
947 					 struct kvm_mmu_page *sp)
948 {
949 	struct kvm_memslots *slots;
950 	struct kvm_memory_slot *slot;
951 
952 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
953 	slot = __gfn_to_memslot(slots, gfn);
954 	return __gfn_to_rmap(gfn, sp->role.level, slot);
955 }
956 
rmap_can_add(struct kvm_vcpu * vcpu)957 static bool rmap_can_add(struct kvm_vcpu *vcpu)
958 {
959 	struct kvm_mmu_memory_cache *mc;
960 
961 	mc = &vcpu->arch.mmu_pte_list_desc_cache;
962 	return kvm_mmu_memory_cache_nr_free_objects(mc);
963 }
964 
rmap_add(struct kvm_vcpu * vcpu,u64 * spte,gfn_t gfn)965 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
966 {
967 	struct kvm_mmu_page *sp;
968 	struct kvm_rmap_head *rmap_head;
969 
970 	sp = sptep_to_sp(spte);
971 	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
972 	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
973 	return pte_list_add(vcpu, spte, rmap_head);
974 }
975 
rmap_remove(struct kvm * kvm,u64 * spte)976 static void rmap_remove(struct kvm *kvm, u64 *spte)
977 {
978 	struct kvm_mmu_page *sp;
979 	gfn_t gfn;
980 	struct kvm_rmap_head *rmap_head;
981 
982 	sp = sptep_to_sp(spte);
983 	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
984 	rmap_head = gfn_to_rmap(kvm, gfn, sp);
985 	__pte_list_remove(spte, rmap_head);
986 }
987 
988 /*
989  * Used by the following functions to iterate through the sptes linked by a
990  * rmap.  All fields are private and not assumed to be used outside.
991  */
992 struct rmap_iterator {
993 	/* private fields */
994 	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
995 	int pos;			/* index of the sptep */
996 };
997 
998 /*
999  * Iteration must be started by this function.  This should also be used after
1000  * removing/dropping sptes from the rmap link because in such cases the
1001  * information in the iterator may not be valid.
1002  *
1003  * Returns sptep if found, NULL otherwise.
1004  */
rmap_get_first(struct kvm_rmap_head * rmap_head,struct rmap_iterator * iter)1005 static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1006 			   struct rmap_iterator *iter)
1007 {
1008 	u64 *sptep;
1009 
1010 	if (!rmap_head->val)
1011 		return NULL;
1012 
1013 	if (!(rmap_head->val & 1)) {
1014 		iter->desc = NULL;
1015 		sptep = (u64 *)rmap_head->val;
1016 		goto out;
1017 	}
1018 
1019 	iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1020 	iter->pos = 0;
1021 	sptep = iter->desc->sptes[iter->pos];
1022 out:
1023 	BUG_ON(!is_shadow_present_pte(*sptep));
1024 	return sptep;
1025 }
1026 
1027 /*
1028  * Must be used with a valid iterator: e.g. after rmap_get_first().
1029  *
1030  * Returns sptep if found, NULL otherwise.
1031  */
rmap_get_next(struct rmap_iterator * iter)1032 static u64 *rmap_get_next(struct rmap_iterator *iter)
1033 {
1034 	u64 *sptep;
1035 
1036 	if (iter->desc) {
1037 		if (iter->pos < PTE_LIST_EXT - 1) {
1038 			++iter->pos;
1039 			sptep = iter->desc->sptes[iter->pos];
1040 			if (sptep)
1041 				goto out;
1042 		}
1043 
1044 		iter->desc = iter->desc->more;
1045 
1046 		if (iter->desc) {
1047 			iter->pos = 0;
1048 			/* desc->sptes[0] cannot be NULL */
1049 			sptep = iter->desc->sptes[iter->pos];
1050 			goto out;
1051 		}
1052 	}
1053 
1054 	return NULL;
1055 out:
1056 	BUG_ON(!is_shadow_present_pte(*sptep));
1057 	return sptep;
1058 }
1059 
1060 #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)			\
1061 	for (_spte_ = rmap_get_first(_rmap_head_, _iter_);		\
1062 	     _spte_; _spte_ = rmap_get_next(_iter_))
1063 
drop_spte(struct kvm * kvm,u64 * sptep)1064 static void drop_spte(struct kvm *kvm, u64 *sptep)
1065 {
1066 	if (mmu_spte_clear_track_bits(sptep))
1067 		rmap_remove(kvm, sptep);
1068 }
1069 
1070 
__drop_large_spte(struct kvm * kvm,u64 * sptep)1071 static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
1072 {
1073 	if (is_large_pte(*sptep)) {
1074 		WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
1075 		drop_spte(kvm, sptep);
1076 		--kvm->stat.lpages;
1077 		return true;
1078 	}
1079 
1080 	return false;
1081 }
1082 
drop_large_spte(struct kvm_vcpu * vcpu,u64 * sptep)1083 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1084 {
1085 	if (__drop_large_spte(vcpu->kvm, sptep)) {
1086 		struct kvm_mmu_page *sp = sptep_to_sp(sptep);
1087 
1088 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1089 			KVM_PAGES_PER_HPAGE(sp->role.level));
1090 	}
1091 }
1092 
1093 /*
1094  * Write-protect on the specified @sptep, @pt_protect indicates whether
1095  * spte write-protection is caused by protecting shadow page table.
1096  *
1097  * Note: write protection is difference between dirty logging and spte
1098  * protection:
1099  * - for dirty logging, the spte can be set to writable at anytime if
1100  *   its dirty bitmap is properly set.
1101  * - for spte protection, the spte can be writable only after unsync-ing
1102  *   shadow page.
1103  *
1104  * Return true if tlb need be flushed.
1105  */
spte_write_protect(u64 * sptep,bool pt_protect)1106 static bool spte_write_protect(u64 *sptep, bool pt_protect)
1107 {
1108 	u64 spte = *sptep;
1109 
1110 	if (!is_writable_pte(spte) &&
1111 	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1112 		return false;
1113 
1114 	rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
1115 
1116 	if (pt_protect)
1117 		spte &= ~SPTE_MMU_WRITEABLE;
1118 	spte = spte & ~PT_WRITABLE_MASK;
1119 
1120 	return mmu_spte_update(sptep, spte);
1121 }
1122 
__rmap_write_protect(struct kvm * kvm,struct kvm_rmap_head * rmap_head,bool pt_protect)1123 static bool __rmap_write_protect(struct kvm *kvm,
1124 				 struct kvm_rmap_head *rmap_head,
1125 				 bool pt_protect)
1126 {
1127 	u64 *sptep;
1128 	struct rmap_iterator iter;
1129 	bool flush = false;
1130 
1131 	for_each_rmap_spte(rmap_head, &iter, sptep)
1132 		flush |= spte_write_protect(sptep, pt_protect);
1133 
1134 	return flush;
1135 }
1136 
spte_clear_dirty(u64 * sptep)1137 static bool spte_clear_dirty(u64 *sptep)
1138 {
1139 	u64 spte = *sptep;
1140 
1141 	rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
1142 
1143 	MMU_WARN_ON(!spte_ad_enabled(spte));
1144 	spte &= ~shadow_dirty_mask;
1145 	return mmu_spte_update(sptep, spte);
1146 }
1147 
spte_wrprot_for_clear_dirty(u64 * sptep)1148 static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1149 {
1150 	bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
1151 					       (unsigned long *)sptep);
1152 	if (was_writable && !spte_ad_enabled(*sptep))
1153 		kvm_set_pfn_dirty(spte_to_pfn(*sptep));
1154 
1155 	return was_writable;
1156 }
1157 
1158 /*
1159  * Gets the GFN ready for another round of dirty logging by clearing the
1160  *	- D bit on ad-enabled SPTEs, and
1161  *	- W bit on ad-disabled SPTEs.
1162  * Returns true iff any D or W bits were cleared.
1163  */
__rmap_clear_dirty(struct kvm * kvm,struct kvm_rmap_head * rmap_head)1164 static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1165 {
1166 	u64 *sptep;
1167 	struct rmap_iterator iter;
1168 	bool flush = false;
1169 
1170 	for_each_rmap_spte(rmap_head, &iter, sptep)
1171 		if (spte_ad_need_write_protect(*sptep))
1172 			flush |= spte_wrprot_for_clear_dirty(sptep);
1173 		else
1174 			flush |= spte_clear_dirty(sptep);
1175 
1176 	return flush;
1177 }
1178 
spte_set_dirty(u64 * sptep)1179 static bool spte_set_dirty(u64 *sptep)
1180 {
1181 	u64 spte = *sptep;
1182 
1183 	rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
1184 
1185 	/*
1186 	 * Similar to the !kvm_x86_ops.slot_disable_log_dirty case,
1187 	 * do not bother adding back write access to pages marked
1188 	 * SPTE_AD_WRPROT_ONLY_MASK.
1189 	 */
1190 	spte |= shadow_dirty_mask;
1191 
1192 	return mmu_spte_update(sptep, spte);
1193 }
1194 
__rmap_set_dirty(struct kvm * kvm,struct kvm_rmap_head * rmap_head)1195 static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1196 {
1197 	u64 *sptep;
1198 	struct rmap_iterator iter;
1199 	bool flush = false;
1200 
1201 	for_each_rmap_spte(rmap_head, &iter, sptep)
1202 		if (spte_ad_enabled(*sptep))
1203 			flush |= spte_set_dirty(sptep);
1204 
1205 	return flush;
1206 }
1207 
1208 /**
1209  * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1210  * @kvm: kvm instance
1211  * @slot: slot to protect
1212  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1213  * @mask: indicates which pages we should protect
1214  *
1215  * Used when we do not need to care about huge page mappings: e.g. during dirty
1216  * logging we do not have any such mappings.
1217  */
kvm_mmu_write_protect_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1218 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1219 				     struct kvm_memory_slot *slot,
1220 				     gfn_t gfn_offset, unsigned long mask)
1221 {
1222 	struct kvm_rmap_head *rmap_head;
1223 
1224 	if (kvm->arch.tdp_mmu_enabled)
1225 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1226 				slot->base_gfn + gfn_offset, mask, true);
1227 	while (mask) {
1228 		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1229 					  PG_LEVEL_4K, slot);
1230 		__rmap_write_protect(kvm, rmap_head, false);
1231 
1232 		/* clear the first set bit */
1233 		mask &= mask - 1;
1234 	}
1235 }
1236 
1237 /**
1238  * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
1239  * protect the page if the D-bit isn't supported.
1240  * @kvm: kvm instance
1241  * @slot: slot to clear D-bit
1242  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1243  * @mask: indicates which pages we should clear D-bit
1244  *
1245  * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1246  */
kvm_mmu_clear_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1247 void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1248 				     struct kvm_memory_slot *slot,
1249 				     gfn_t gfn_offset, unsigned long mask)
1250 {
1251 	struct kvm_rmap_head *rmap_head;
1252 
1253 	if (kvm->arch.tdp_mmu_enabled)
1254 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
1255 				slot->base_gfn + gfn_offset, mask, false);
1256 	while (mask) {
1257 		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1258 					  PG_LEVEL_4K, slot);
1259 		__rmap_clear_dirty(kvm, rmap_head);
1260 
1261 		/* clear the first set bit */
1262 		mask &= mask - 1;
1263 	}
1264 }
1265 EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked);
1266 
1267 /**
1268  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1269  * PT level pages.
1270  *
1271  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1272  * enable dirty logging for them.
1273  *
1274  * Used when we do not need to care about huge page mappings: e.g. during dirty
1275  * logging we do not have any such mappings.
1276  */
kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)1277 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1278 				struct kvm_memory_slot *slot,
1279 				gfn_t gfn_offset, unsigned long mask)
1280 {
1281 	if (kvm_x86_ops.enable_log_dirty_pt_masked)
1282 		kvm_x86_ops.enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
1283 				mask);
1284 	else
1285 		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1286 }
1287 
kvm_mmu_slot_gfn_write_protect(struct kvm * kvm,struct kvm_memory_slot * slot,u64 gfn)1288 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1289 				    struct kvm_memory_slot *slot, u64 gfn)
1290 {
1291 	struct kvm_rmap_head *rmap_head;
1292 	int i;
1293 	bool write_protected = false;
1294 
1295 	for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1296 		rmap_head = __gfn_to_rmap(gfn, i, slot);
1297 		write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1298 	}
1299 
1300 	if (kvm->arch.tdp_mmu_enabled)
1301 		write_protected |=
1302 			kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
1303 
1304 	return write_protected;
1305 }
1306 
rmap_write_protect(struct kvm_vcpu * vcpu,u64 gfn)1307 static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
1308 {
1309 	struct kvm_memory_slot *slot;
1310 
1311 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1312 	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
1313 }
1314 
kvm_zap_rmapp(struct kvm * kvm,struct kvm_rmap_head * rmap_head)1315 static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1316 {
1317 	u64 *sptep;
1318 	struct rmap_iterator iter;
1319 	bool flush = false;
1320 
1321 	while ((sptep = rmap_get_first(rmap_head, &iter))) {
1322 		rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
1323 
1324 		pte_list_remove(rmap_head, sptep);
1325 		flush = true;
1326 	}
1327 
1328 	return flush;
1329 }
1330 
kvm_unmap_rmapp(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct kvm_memory_slot * slot,gfn_t gfn,int level,unsigned long data)1331 static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1332 			   struct kvm_memory_slot *slot, gfn_t gfn, int level,
1333 			   unsigned long data)
1334 {
1335 	return kvm_zap_rmapp(kvm, rmap_head);
1336 }
1337 
kvm_set_pte_rmapp(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct kvm_memory_slot * slot,gfn_t gfn,int level,unsigned long data)1338 static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1339 			     struct kvm_memory_slot *slot, gfn_t gfn, int level,
1340 			     unsigned long data)
1341 {
1342 	u64 *sptep;
1343 	struct rmap_iterator iter;
1344 	int need_flush = 0;
1345 	u64 new_spte;
1346 	pte_t *ptep = (pte_t *)data;
1347 	kvm_pfn_t new_pfn;
1348 
1349 	WARN_ON(pte_huge(*ptep));
1350 	new_pfn = pte_pfn(*ptep);
1351 
1352 restart:
1353 	for_each_rmap_spte(rmap_head, &iter, sptep) {
1354 		rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
1355 			    sptep, *sptep, gfn, level);
1356 
1357 		need_flush = 1;
1358 
1359 		if (pte_write(*ptep)) {
1360 			pte_list_remove(rmap_head, sptep);
1361 			goto restart;
1362 		} else {
1363 			new_spte = kvm_mmu_changed_pte_notifier_make_spte(
1364 					*sptep, new_pfn);
1365 
1366 			mmu_spte_clear_track_bits(sptep);
1367 			mmu_spte_set(sptep, new_spte);
1368 		}
1369 	}
1370 
1371 	if (need_flush && kvm_available_flush_tlb_with_range()) {
1372 		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
1373 		return 0;
1374 	}
1375 
1376 	return need_flush;
1377 }
1378 
1379 struct slot_rmap_walk_iterator {
1380 	/* input fields. */
1381 	struct kvm_memory_slot *slot;
1382 	gfn_t start_gfn;
1383 	gfn_t end_gfn;
1384 	int start_level;
1385 	int end_level;
1386 
1387 	/* output fields. */
1388 	gfn_t gfn;
1389 	struct kvm_rmap_head *rmap;
1390 	int level;
1391 
1392 	/* private field. */
1393 	struct kvm_rmap_head *end_rmap;
1394 };
1395 
1396 static void
rmap_walk_init_level(struct slot_rmap_walk_iterator * iterator,int level)1397 rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
1398 {
1399 	iterator->level = level;
1400 	iterator->gfn = iterator->start_gfn;
1401 	iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
1402 	iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
1403 					   iterator->slot);
1404 }
1405 
1406 static void
slot_rmap_walk_init(struct slot_rmap_walk_iterator * iterator,struct kvm_memory_slot * slot,int start_level,int end_level,gfn_t start_gfn,gfn_t end_gfn)1407 slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1408 		    struct kvm_memory_slot *slot, int start_level,
1409 		    int end_level, gfn_t start_gfn, gfn_t end_gfn)
1410 {
1411 	iterator->slot = slot;
1412 	iterator->start_level = start_level;
1413 	iterator->end_level = end_level;
1414 	iterator->start_gfn = start_gfn;
1415 	iterator->end_gfn = end_gfn;
1416 
1417 	rmap_walk_init_level(iterator, iterator->start_level);
1418 }
1419 
slot_rmap_walk_okay(struct slot_rmap_walk_iterator * iterator)1420 static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1421 {
1422 	return !!iterator->rmap;
1423 }
1424 
slot_rmap_walk_next(struct slot_rmap_walk_iterator * iterator)1425 static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1426 {
1427 	if (++iterator->rmap <= iterator->end_rmap) {
1428 		iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1429 		return;
1430 	}
1431 
1432 	if (++iterator->level > iterator->end_level) {
1433 		iterator->rmap = NULL;
1434 		return;
1435 	}
1436 
1437 	rmap_walk_init_level(iterator, iterator->level);
1438 }
1439 
1440 #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
1441 	   _start_gfn, _end_gfn, _iter_)				\
1442 	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
1443 				 _end_level_, _start_gfn, _end_gfn);	\
1444 	     slot_rmap_walk_okay(_iter_);				\
1445 	     slot_rmap_walk_next(_iter_))
1446 
kvm_handle_hva_range(struct kvm * kvm,unsigned long start,unsigned long end,unsigned long data,int (* handler)(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct kvm_memory_slot * slot,gfn_t gfn,int level,unsigned long data))1447 static int kvm_handle_hva_range(struct kvm *kvm,
1448 				unsigned long start,
1449 				unsigned long end,
1450 				unsigned long data,
1451 				int (*handler)(struct kvm *kvm,
1452 					       struct kvm_rmap_head *rmap_head,
1453 					       struct kvm_memory_slot *slot,
1454 					       gfn_t gfn,
1455 					       int level,
1456 					       unsigned long data))
1457 {
1458 	struct kvm_memslots *slots;
1459 	struct kvm_memory_slot *memslot;
1460 	struct slot_rmap_walk_iterator iterator;
1461 	int ret = 0;
1462 	int i;
1463 
1464 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1465 		slots = __kvm_memslots(kvm, i);
1466 		kvm_for_each_memslot(memslot, slots) {
1467 			unsigned long hva_start, hva_end;
1468 			gfn_t gfn_start, gfn_end;
1469 
1470 			hva_start = max(start, memslot->userspace_addr);
1471 			hva_end = min(end, memslot->userspace_addr +
1472 				      (memslot->npages << PAGE_SHIFT));
1473 			if (hva_start >= hva_end)
1474 				continue;
1475 			/*
1476 			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
1477 			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1478 			 */
1479 			gfn_start = hva_to_gfn_memslot(hva_start, memslot);
1480 			gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1481 
1482 			for_each_slot_rmap_range(memslot, PG_LEVEL_4K,
1483 						 KVM_MAX_HUGEPAGE_LEVEL,
1484 						 gfn_start, gfn_end - 1,
1485 						 &iterator)
1486 				ret |= handler(kvm, iterator.rmap, memslot,
1487 					       iterator.gfn, iterator.level, data);
1488 		}
1489 	}
1490 
1491 	return ret;
1492 }
1493 
kvm_handle_hva(struct kvm * kvm,unsigned long hva,unsigned long data,int (* handler)(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct kvm_memory_slot * slot,gfn_t gfn,int level,unsigned long data))1494 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
1495 			  unsigned long data,
1496 			  int (*handler)(struct kvm *kvm,
1497 					 struct kvm_rmap_head *rmap_head,
1498 					 struct kvm_memory_slot *slot,
1499 					 gfn_t gfn, int level,
1500 					 unsigned long data))
1501 {
1502 	return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
1503 }
1504 
kvm_unmap_hva_range(struct kvm * kvm,unsigned long start,unsigned long end,unsigned flags)1505 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
1506 			unsigned flags)
1507 {
1508 	int r;
1509 
1510 	r = kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
1511 
1512 	if (kvm->arch.tdp_mmu_enabled)
1513 		r |= kvm_tdp_mmu_zap_hva_range(kvm, start, end);
1514 
1515 	return r;
1516 }
1517 
kvm_set_spte_hva(struct kvm * kvm,unsigned long hva,pte_t pte)1518 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1519 {
1520 	int r;
1521 
1522 	r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
1523 
1524 	if (kvm->arch.tdp_mmu_enabled)
1525 		r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte);
1526 
1527 	return r;
1528 }
1529 
kvm_age_rmapp(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct kvm_memory_slot * slot,gfn_t gfn,int level,unsigned long data)1530 static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1531 			 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1532 			 unsigned long data)
1533 {
1534 	u64 *sptep;
1535 	struct rmap_iterator iter;
1536 	int young = 0;
1537 
1538 	for_each_rmap_spte(rmap_head, &iter, sptep)
1539 		young |= mmu_spte_age(sptep);
1540 
1541 	trace_kvm_age_page(gfn, level, slot, young);
1542 	return young;
1543 }
1544 
kvm_test_age_rmapp(struct kvm * kvm,struct kvm_rmap_head * rmap_head,struct kvm_memory_slot * slot,gfn_t gfn,int level,unsigned long data)1545 static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1546 			      struct kvm_memory_slot *slot, gfn_t gfn,
1547 			      int level, unsigned long data)
1548 {
1549 	u64 *sptep;
1550 	struct rmap_iterator iter;
1551 
1552 	for_each_rmap_spte(rmap_head, &iter, sptep)
1553 		if (is_accessed_spte(*sptep))
1554 			return 1;
1555 	return 0;
1556 }
1557 
1558 #define RMAP_RECYCLE_THRESHOLD 1000
1559 
rmap_recycle(struct kvm_vcpu * vcpu,u64 * spte,gfn_t gfn)1560 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1561 {
1562 	struct kvm_rmap_head *rmap_head;
1563 	struct kvm_mmu_page *sp;
1564 
1565 	sp = sptep_to_sp(spte);
1566 
1567 	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1568 
1569 	kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
1570 	kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1571 			KVM_PAGES_PER_HPAGE(sp->role.level));
1572 }
1573 
kvm_age_hva(struct kvm * kvm,unsigned long start,unsigned long end)1574 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1575 {
1576 	int young = false;
1577 
1578 	young = kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
1579 	if (kvm->arch.tdp_mmu_enabled)
1580 		young |= kvm_tdp_mmu_age_hva_range(kvm, start, end);
1581 
1582 	return young;
1583 }
1584 
kvm_test_age_hva(struct kvm * kvm,unsigned long hva)1585 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1586 {
1587 	int young = false;
1588 
1589 	young = kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
1590 	if (kvm->arch.tdp_mmu_enabled)
1591 		young |= kvm_tdp_mmu_test_age_hva(kvm, hva);
1592 
1593 	return young;
1594 }
1595 
1596 #ifdef MMU_DEBUG
is_empty_shadow_page(u64 * spt)1597 static int is_empty_shadow_page(u64 *spt)
1598 {
1599 	u64 *pos;
1600 	u64 *end;
1601 
1602 	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1603 		if (is_shadow_present_pte(*pos)) {
1604 			printk(KERN_ERR "%s: %p %llx\n", __func__,
1605 			       pos, *pos);
1606 			return 0;
1607 		}
1608 	return 1;
1609 }
1610 #endif
1611 
1612 /*
1613  * This value is the sum of all of the kvm instances's
1614  * kvm->arch.n_used_mmu_pages values.  We need a global,
1615  * aggregate version in order to make the slab shrinker
1616  * faster
1617  */
kvm_mod_used_mmu_pages(struct kvm * kvm,long nr)1618 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
1619 {
1620 	kvm->arch.n_used_mmu_pages += nr;
1621 	percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1622 }
1623 
kvm_mmu_free_page(struct kvm_mmu_page * sp)1624 static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1625 {
1626 	MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1627 	hlist_del(&sp->hash_link);
1628 	list_del(&sp->link);
1629 	free_page((unsigned long)sp->spt);
1630 	if (!sp->role.direct)
1631 		free_page((unsigned long)sp->gfns);
1632 	kmem_cache_free(mmu_page_header_cache, sp);
1633 }
1634 
kvm_page_table_hashfn(gfn_t gfn)1635 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1636 {
1637 	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1638 }
1639 
mmu_page_add_parent_pte(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * parent_pte)1640 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1641 				    struct kvm_mmu_page *sp, u64 *parent_pte)
1642 {
1643 	if (!parent_pte)
1644 		return;
1645 
1646 	pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1647 }
1648 
mmu_page_remove_parent_pte(struct kvm_mmu_page * sp,u64 * parent_pte)1649 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1650 				       u64 *parent_pte)
1651 {
1652 	__pte_list_remove(parent_pte, &sp->parent_ptes);
1653 }
1654 
drop_parent_pte(struct kvm_mmu_page * sp,u64 * parent_pte)1655 static void drop_parent_pte(struct kvm_mmu_page *sp,
1656 			    u64 *parent_pte)
1657 {
1658 	mmu_page_remove_parent_pte(sp, parent_pte);
1659 	mmu_spte_clear_no_track(parent_pte);
1660 }
1661 
kvm_mmu_alloc_page(struct kvm_vcpu * vcpu,int direct)1662 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
1663 {
1664 	struct kvm_mmu_page *sp;
1665 
1666 	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
1667 	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
1668 	if (!direct)
1669 		sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
1670 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1671 
1672 	/*
1673 	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
1674 	 * depends on valid pages being added to the head of the list.  See
1675 	 * comments in kvm_zap_obsolete_pages().
1676 	 */
1677 	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
1678 	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1679 	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1680 	return sp;
1681 }
1682 
1683 static void mark_unsync(u64 *spte);
kvm_mmu_mark_parents_unsync(struct kvm_mmu_page * sp)1684 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1685 {
1686 	u64 *sptep;
1687 	struct rmap_iterator iter;
1688 
1689 	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1690 		mark_unsync(sptep);
1691 	}
1692 }
1693 
mark_unsync(u64 * spte)1694 static void mark_unsync(u64 *spte)
1695 {
1696 	struct kvm_mmu_page *sp;
1697 	unsigned int index;
1698 
1699 	sp = sptep_to_sp(spte);
1700 	index = spte - sp->spt;
1701 	if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1702 		return;
1703 	if (sp->unsync_children++)
1704 		return;
1705 	kvm_mmu_mark_parents_unsync(sp);
1706 }
1707 
nonpaging_sync_page(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp)1708 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1709 			       struct kvm_mmu_page *sp)
1710 {
1711 	return 0;
1712 }
1713 
1714 #define KVM_PAGE_ARRAY_NR 16
1715 
1716 struct kvm_mmu_pages {
1717 	struct mmu_page_and_offset {
1718 		struct kvm_mmu_page *sp;
1719 		unsigned int idx;
1720 	} page[KVM_PAGE_ARRAY_NR];
1721 	unsigned int nr;
1722 };
1723 
mmu_pages_add(struct kvm_mmu_pages * pvec,struct kvm_mmu_page * sp,int idx)1724 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1725 			 int idx)
1726 {
1727 	int i;
1728 
1729 	if (sp->unsync)
1730 		for (i=0; i < pvec->nr; i++)
1731 			if (pvec->page[i].sp == sp)
1732 				return 0;
1733 
1734 	pvec->page[pvec->nr].sp = sp;
1735 	pvec->page[pvec->nr].idx = idx;
1736 	pvec->nr++;
1737 	return (pvec->nr == KVM_PAGE_ARRAY_NR);
1738 }
1739 
clear_unsync_child_bit(struct kvm_mmu_page * sp,int idx)1740 static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
1741 {
1742 	--sp->unsync_children;
1743 	WARN_ON((int)sp->unsync_children < 0);
1744 	__clear_bit(idx, sp->unsync_child_bitmap);
1745 }
1746 
__mmu_unsync_walk(struct kvm_mmu_page * sp,struct kvm_mmu_pages * pvec)1747 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1748 			   struct kvm_mmu_pages *pvec)
1749 {
1750 	int i, ret, nr_unsync_leaf = 0;
1751 
1752 	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1753 		struct kvm_mmu_page *child;
1754 		u64 ent = sp->spt[i];
1755 
1756 		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
1757 			clear_unsync_child_bit(sp, i);
1758 			continue;
1759 		}
1760 
1761 		child = to_shadow_page(ent & PT64_BASE_ADDR_MASK);
1762 
1763 		if (child->unsync_children) {
1764 			if (mmu_pages_add(pvec, child, i))
1765 				return -ENOSPC;
1766 
1767 			ret = __mmu_unsync_walk(child, pvec);
1768 			if (!ret) {
1769 				clear_unsync_child_bit(sp, i);
1770 				continue;
1771 			} else if (ret > 0) {
1772 				nr_unsync_leaf += ret;
1773 			} else
1774 				return ret;
1775 		} else if (child->unsync) {
1776 			nr_unsync_leaf++;
1777 			if (mmu_pages_add(pvec, child, i))
1778 				return -ENOSPC;
1779 		} else
1780 			clear_unsync_child_bit(sp, i);
1781 	}
1782 
1783 	return nr_unsync_leaf;
1784 }
1785 
1786 #define INVALID_INDEX (-1)
1787 
mmu_unsync_walk(struct kvm_mmu_page * sp,struct kvm_mmu_pages * pvec)1788 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1789 			   struct kvm_mmu_pages *pvec)
1790 {
1791 	pvec->nr = 0;
1792 	if (!sp->unsync_children)
1793 		return 0;
1794 
1795 	mmu_pages_add(pvec, sp, INVALID_INDEX);
1796 	return __mmu_unsync_walk(sp, pvec);
1797 }
1798 
kvm_unlink_unsync_page(struct kvm * kvm,struct kvm_mmu_page * sp)1799 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1800 {
1801 	WARN_ON(!sp->unsync);
1802 	trace_kvm_mmu_sync_page(sp);
1803 	sp->unsync = 0;
1804 	--kvm->stat.mmu_unsync;
1805 }
1806 
1807 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1808 				     struct list_head *invalid_list);
1809 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1810 				    struct list_head *invalid_list);
1811 
1812 #define for_each_valid_sp(_kvm, _sp, _list)				\
1813 	hlist_for_each_entry(_sp, _list, hash_link)			\
1814 		if (is_obsolete_sp((_kvm), (_sp))) {			\
1815 		} else
1816 
1817 #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
1818 	for_each_valid_sp(_kvm, _sp,					\
1819 	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])	\
1820 		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
1821 
is_ept_sp(struct kvm_mmu_page * sp)1822 static inline bool is_ept_sp(struct kvm_mmu_page *sp)
1823 {
1824 	return sp->role.cr0_wp && sp->role.smap_andnot_wp;
1825 }
1826 
1827 /* @sp->gfn should be write-protected at the call site */
__kvm_sync_page(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,struct list_head * invalid_list)1828 static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1829 			    struct list_head *invalid_list)
1830 {
1831 	if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
1832 	    vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
1833 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1834 		return false;
1835 	}
1836 
1837 	return true;
1838 }
1839 
kvm_mmu_remote_flush_or_zap(struct kvm * kvm,struct list_head * invalid_list,bool remote_flush)1840 static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
1841 					struct list_head *invalid_list,
1842 					bool remote_flush)
1843 {
1844 	if (!remote_flush && list_empty(invalid_list))
1845 		return false;
1846 
1847 	if (!list_empty(invalid_list))
1848 		kvm_mmu_commit_zap_page(kvm, invalid_list);
1849 	else
1850 		kvm_flush_remote_tlbs(kvm);
1851 	return true;
1852 }
1853 
kvm_mmu_flush_or_zap(struct kvm_vcpu * vcpu,struct list_head * invalid_list,bool remote_flush,bool local_flush)1854 static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
1855 				 struct list_head *invalid_list,
1856 				 bool remote_flush, bool local_flush)
1857 {
1858 	if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
1859 		return;
1860 
1861 	if (local_flush)
1862 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1863 }
1864 
1865 #ifdef CONFIG_KVM_MMU_AUDIT
1866 #include "mmu_audit.c"
1867 #else
kvm_mmu_audit(struct kvm_vcpu * vcpu,int point)1868 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
mmu_audit_disable(void)1869 static void mmu_audit_disable(void) { }
1870 #endif
1871 
is_obsolete_sp(struct kvm * kvm,struct kvm_mmu_page * sp)1872 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
1873 {
1874 	return sp->role.invalid ||
1875 	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1876 }
1877 
kvm_sync_page(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,struct list_head * invalid_list)1878 static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1879 			 struct list_head *invalid_list)
1880 {
1881 	kvm_unlink_unsync_page(vcpu->kvm, sp);
1882 	return __kvm_sync_page(vcpu, sp, invalid_list);
1883 }
1884 
1885 /* @gfn should be write-protected at the call site */
kvm_sync_pages(struct kvm_vcpu * vcpu,gfn_t gfn,struct list_head * invalid_list)1886 static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
1887 			   struct list_head *invalid_list)
1888 {
1889 	struct kvm_mmu_page *s;
1890 	bool ret = false;
1891 
1892 	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
1893 		if (!s->unsync)
1894 			continue;
1895 
1896 		WARN_ON(s->role.level != PG_LEVEL_4K);
1897 		ret |= kvm_sync_page(vcpu, s, invalid_list);
1898 	}
1899 
1900 	return ret;
1901 }
1902 
1903 struct mmu_page_path {
1904 	struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
1905 	unsigned int idx[PT64_ROOT_MAX_LEVEL];
1906 };
1907 
1908 #define for_each_sp(pvec, sp, parents, i)			\
1909 		for (i = mmu_pages_first(&pvec, &parents);	\
1910 			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
1911 			i = mmu_pages_next(&pvec, &parents, i))
1912 
mmu_pages_next(struct kvm_mmu_pages * pvec,struct mmu_page_path * parents,int i)1913 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1914 			  struct mmu_page_path *parents,
1915 			  int i)
1916 {
1917 	int n;
1918 
1919 	for (n = i+1; n < pvec->nr; n++) {
1920 		struct kvm_mmu_page *sp = pvec->page[n].sp;
1921 		unsigned idx = pvec->page[n].idx;
1922 		int level = sp->role.level;
1923 
1924 		parents->idx[level-1] = idx;
1925 		if (level == PG_LEVEL_4K)
1926 			break;
1927 
1928 		parents->parent[level-2] = sp;
1929 	}
1930 
1931 	return n;
1932 }
1933 
mmu_pages_first(struct kvm_mmu_pages * pvec,struct mmu_page_path * parents)1934 static int mmu_pages_first(struct kvm_mmu_pages *pvec,
1935 			   struct mmu_page_path *parents)
1936 {
1937 	struct kvm_mmu_page *sp;
1938 	int level;
1939 
1940 	if (pvec->nr == 0)
1941 		return 0;
1942 
1943 	WARN_ON(pvec->page[0].idx != INVALID_INDEX);
1944 
1945 	sp = pvec->page[0].sp;
1946 	level = sp->role.level;
1947 	WARN_ON(level == PG_LEVEL_4K);
1948 
1949 	parents->parent[level-2] = sp;
1950 
1951 	/* Also set up a sentinel.  Further entries in pvec are all
1952 	 * children of sp, so this element is never overwritten.
1953 	 */
1954 	parents->parent[level-1] = NULL;
1955 	return mmu_pages_next(pvec, parents, 0);
1956 }
1957 
mmu_pages_clear_parents(struct mmu_page_path * parents)1958 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1959 {
1960 	struct kvm_mmu_page *sp;
1961 	unsigned int level = 0;
1962 
1963 	do {
1964 		unsigned int idx = parents->idx[level];
1965 		sp = parents->parent[level];
1966 		if (!sp)
1967 			return;
1968 
1969 		WARN_ON(idx == INVALID_INDEX);
1970 		clear_unsync_child_bit(sp, idx);
1971 		level++;
1972 	} while (!sp->unsync_children);
1973 }
1974 
mmu_sync_children(struct kvm_vcpu * vcpu,struct kvm_mmu_page * parent)1975 static void mmu_sync_children(struct kvm_vcpu *vcpu,
1976 			      struct kvm_mmu_page *parent)
1977 {
1978 	int i;
1979 	struct kvm_mmu_page *sp;
1980 	struct mmu_page_path parents;
1981 	struct kvm_mmu_pages pages;
1982 	LIST_HEAD(invalid_list);
1983 	bool flush = false;
1984 
1985 	while (mmu_unsync_walk(parent, &pages)) {
1986 		bool protected = false;
1987 
1988 		for_each_sp(pages, sp, parents, i)
1989 			protected |= rmap_write_protect(vcpu, sp->gfn);
1990 
1991 		if (protected) {
1992 			kvm_flush_remote_tlbs(vcpu->kvm);
1993 			flush = false;
1994 		}
1995 
1996 		for_each_sp(pages, sp, parents, i) {
1997 			flush |= kvm_sync_page(vcpu, sp, &invalid_list);
1998 			mmu_pages_clear_parents(&parents);
1999 		}
2000 		if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) {
2001 			kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2002 			cond_resched_lock(&vcpu->kvm->mmu_lock);
2003 			flush = false;
2004 		}
2005 	}
2006 
2007 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2008 }
2009 
__clear_sp_write_flooding_count(struct kvm_mmu_page * sp)2010 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
2011 {
2012 	atomic_set(&sp->write_flooding_count,  0);
2013 }
2014 
clear_sp_write_flooding_count(u64 * spte)2015 static void clear_sp_write_flooding_count(u64 *spte)
2016 {
2017 	__clear_sp_write_flooding_count(sptep_to_sp(spte));
2018 }
2019 
kvm_mmu_get_page(struct kvm_vcpu * vcpu,gfn_t gfn,gva_t gaddr,unsigned level,int direct,unsigned int access)2020 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2021 					     gfn_t gfn,
2022 					     gva_t gaddr,
2023 					     unsigned level,
2024 					     int direct,
2025 					     unsigned int access)
2026 {
2027 	bool direct_mmu = vcpu->arch.mmu->direct_map;
2028 	union kvm_mmu_page_role role;
2029 	struct hlist_head *sp_list;
2030 	unsigned quadrant;
2031 	struct kvm_mmu_page *sp;
2032 	bool need_sync = false;
2033 	bool flush = false;
2034 	int collisions = 0;
2035 	LIST_HEAD(invalid_list);
2036 
2037 	role = vcpu->arch.mmu->mmu_role.base;
2038 	role.level = level;
2039 	role.direct = direct;
2040 	if (role.direct)
2041 		role.gpte_is_8_bytes = true;
2042 	role.access = access;
2043 	if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
2044 		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
2045 		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
2046 		role.quadrant = quadrant;
2047 	}
2048 
2049 	sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2050 	for_each_valid_sp(vcpu->kvm, sp, sp_list) {
2051 		if (sp->gfn != gfn) {
2052 			collisions++;
2053 			continue;
2054 		}
2055 
2056 		if (!need_sync && sp->unsync)
2057 			need_sync = true;
2058 
2059 		if (sp->role.word != role.word)
2060 			continue;
2061 
2062 		if (direct_mmu)
2063 			goto trace_get_page;
2064 
2065 		if (sp->unsync) {
2066 			/* The page is good, but __kvm_sync_page might still end
2067 			 * up zapping it.  If so, break in order to rebuild it.
2068 			 */
2069 			if (!__kvm_sync_page(vcpu, sp, &invalid_list))
2070 				break;
2071 
2072 			WARN_ON(!list_empty(&invalid_list));
2073 			kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2074 		}
2075 
2076 		if (sp->unsync_children)
2077 			kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2078 
2079 		__clear_sp_write_flooding_count(sp);
2080 
2081 trace_get_page:
2082 		trace_kvm_mmu_get_page(sp, false);
2083 		goto out;
2084 	}
2085 
2086 	++vcpu->kvm->stat.mmu_cache_miss;
2087 
2088 	sp = kvm_mmu_alloc_page(vcpu, direct);
2089 
2090 	sp->gfn = gfn;
2091 	sp->role = role;
2092 	hlist_add_head(&sp->hash_link, sp_list);
2093 	if (!direct) {
2094 		/*
2095 		 * we should do write protection before syncing pages
2096 		 * otherwise the content of the synced shadow page may
2097 		 * be inconsistent with guest page table.
2098 		 */
2099 		account_shadowed(vcpu->kvm, sp);
2100 		if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
2101 			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2102 
2103 		if (level > PG_LEVEL_4K && need_sync)
2104 			flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
2105 	}
2106 	trace_kvm_mmu_get_page(sp, true);
2107 
2108 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2109 out:
2110 	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
2111 		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2112 	return sp;
2113 }
2114 
shadow_walk_init_using_root(struct kvm_shadow_walk_iterator * iterator,struct kvm_vcpu * vcpu,hpa_t root,u64 addr)2115 static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
2116 					struct kvm_vcpu *vcpu, hpa_t root,
2117 					u64 addr)
2118 {
2119 	iterator->addr = addr;
2120 	iterator->shadow_addr = root;
2121 	iterator->level = vcpu->arch.mmu->shadow_root_level;
2122 
2123 	if (iterator->level == PT64_ROOT_4LEVEL &&
2124 	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
2125 	    !vcpu->arch.mmu->direct_map)
2126 		--iterator->level;
2127 
2128 	if (iterator->level == PT32E_ROOT_LEVEL) {
2129 		/*
2130 		 * prev_root is currently only used for 64-bit hosts. So only
2131 		 * the active root_hpa is valid here.
2132 		 */
2133 		BUG_ON(root != vcpu->arch.mmu->root_hpa);
2134 
2135 		iterator->shadow_addr
2136 			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2137 		iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
2138 		--iterator->level;
2139 		if (!iterator->shadow_addr)
2140 			iterator->level = 0;
2141 	}
2142 }
2143 
shadow_walk_init(struct kvm_shadow_walk_iterator * iterator,struct kvm_vcpu * vcpu,u64 addr)2144 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2145 			     struct kvm_vcpu *vcpu, u64 addr)
2146 {
2147 	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
2148 				    addr);
2149 }
2150 
shadow_walk_okay(struct kvm_shadow_walk_iterator * iterator)2151 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2152 {
2153 	if (iterator->level < PG_LEVEL_4K)
2154 		return false;
2155 
2156 	iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
2157 	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2158 	return true;
2159 }
2160 
__shadow_walk_next(struct kvm_shadow_walk_iterator * iterator,u64 spte)2161 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2162 			       u64 spte)
2163 {
2164 	if (is_last_spte(spte, iterator->level)) {
2165 		iterator->level = 0;
2166 		return;
2167 	}
2168 
2169 	iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2170 	--iterator->level;
2171 }
2172 
shadow_walk_next(struct kvm_shadow_walk_iterator * iterator)2173 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2174 {
2175 	__shadow_walk_next(iterator, *iterator->sptep);
2176 }
2177 
link_shadow_page(struct kvm_vcpu * vcpu,u64 * sptep,struct kvm_mmu_page * sp)2178 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2179 			     struct kvm_mmu_page *sp)
2180 {
2181 	u64 spte;
2182 
2183 	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2184 
2185 	spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2186 
2187 	mmu_spte_set(sptep, spte);
2188 
2189 	mmu_page_add_parent_pte(vcpu, sp, sptep);
2190 
2191 	if (sp->unsync_children || sp->unsync)
2192 		mark_unsync(sptep);
2193 }
2194 
validate_direct_spte(struct kvm_vcpu * vcpu,u64 * sptep,unsigned direct_access)2195 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2196 				   unsigned direct_access)
2197 {
2198 	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2199 		struct kvm_mmu_page *child;
2200 
2201 		/*
2202 		 * For the direct sp, if the guest pte's dirty bit
2203 		 * changed form clean to dirty, it will corrupt the
2204 		 * sp's access: allow writable in the read-only sp,
2205 		 * so we should update the spte at this point to get
2206 		 * a new sp with the correct access.
2207 		 */
2208 		child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK);
2209 		if (child->role.access == direct_access)
2210 			return;
2211 
2212 		drop_parent_pte(child, sptep);
2213 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2214 	}
2215 }
2216 
2217 /* Returns the number of zapped non-leaf child shadow pages. */
mmu_page_zap_pte(struct kvm * kvm,struct kvm_mmu_page * sp,u64 * spte,struct list_head * invalid_list)2218 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2219 			    u64 *spte, struct list_head *invalid_list)
2220 {
2221 	u64 pte;
2222 	struct kvm_mmu_page *child;
2223 
2224 	pte = *spte;
2225 	if (is_shadow_present_pte(pte)) {
2226 		if (is_last_spte(pte, sp->role.level)) {
2227 			drop_spte(kvm, spte);
2228 			if (is_large_pte(pte))
2229 				--kvm->stat.lpages;
2230 		} else {
2231 			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2232 			drop_parent_pte(child, spte);
2233 
2234 			/*
2235 			 * Recursively zap nested TDP SPs, parentless SPs are
2236 			 * unlikely to be used again in the near future.  This
2237 			 * avoids retaining a large number of stale nested SPs.
2238 			 */
2239 			if (tdp_enabled && invalid_list &&
2240 			    child->role.guest_mode && !child->parent_ptes.val)
2241 				return kvm_mmu_prepare_zap_page(kvm, child,
2242 								invalid_list);
2243 		}
2244 	} else if (is_mmio_spte(pte)) {
2245 		mmu_spte_clear_no_track(spte);
2246 	}
2247 	return 0;
2248 }
2249 
kvm_mmu_page_unlink_children(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list)2250 static int kvm_mmu_page_unlink_children(struct kvm *kvm,
2251 					struct kvm_mmu_page *sp,
2252 					struct list_head *invalid_list)
2253 {
2254 	int zapped = 0;
2255 	unsigned i;
2256 
2257 	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2258 		zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);
2259 
2260 	return zapped;
2261 }
2262 
kvm_mmu_unlink_parents(struct kvm * kvm,struct kvm_mmu_page * sp)2263 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2264 {
2265 	u64 *sptep;
2266 	struct rmap_iterator iter;
2267 
2268 	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2269 		drop_parent_pte(sp, sptep);
2270 }
2271 
mmu_zap_unsync_children(struct kvm * kvm,struct kvm_mmu_page * parent,struct list_head * invalid_list)2272 static int mmu_zap_unsync_children(struct kvm *kvm,
2273 				   struct kvm_mmu_page *parent,
2274 				   struct list_head *invalid_list)
2275 {
2276 	int i, zapped = 0;
2277 	struct mmu_page_path parents;
2278 	struct kvm_mmu_pages pages;
2279 
2280 	if (parent->role.level == PG_LEVEL_4K)
2281 		return 0;
2282 
2283 	while (mmu_unsync_walk(parent, &pages)) {
2284 		struct kvm_mmu_page *sp;
2285 
2286 		for_each_sp(pages, sp, parents, i) {
2287 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2288 			mmu_pages_clear_parents(&parents);
2289 			zapped++;
2290 		}
2291 	}
2292 
2293 	return zapped;
2294 }
2295 
__kvm_mmu_prepare_zap_page(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list,int * nr_zapped)2296 static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
2297 				       struct kvm_mmu_page *sp,
2298 				       struct list_head *invalid_list,
2299 				       int *nr_zapped)
2300 {
2301 	bool list_unstable;
2302 
2303 	trace_kvm_mmu_prepare_zap_page(sp);
2304 	++kvm->stat.mmu_shadow_zapped;
2305 	*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
2306 	*nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2307 	kvm_mmu_unlink_parents(kvm, sp);
2308 
2309 	/* Zapping children means active_mmu_pages has become unstable. */
2310 	list_unstable = *nr_zapped;
2311 
2312 	if (!sp->role.invalid && !sp->role.direct)
2313 		unaccount_shadowed(kvm, sp);
2314 
2315 	if (sp->unsync)
2316 		kvm_unlink_unsync_page(kvm, sp);
2317 	if (!sp->root_count) {
2318 		/* Count self */
2319 		(*nr_zapped)++;
2320 
2321 		/*
2322 		 * Already invalid pages (previously active roots) are not on
2323 		 * the active page list.  See list_del() in the "else" case of
2324 		 * !sp->root_count.
2325 		 */
2326 		if (sp->role.invalid)
2327 			list_add(&sp->link, invalid_list);
2328 		else
2329 			list_move(&sp->link, invalid_list);
2330 		kvm_mod_used_mmu_pages(kvm, -1);
2331 	} else {
2332 		/*
2333 		 * Remove the active root from the active page list, the root
2334 		 * will be explicitly freed when the root_count hits zero.
2335 		 */
2336 		list_del(&sp->link);
2337 
2338 		/*
2339 		 * Obsolete pages cannot be used on any vCPUs, see the comment
2340 		 * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
2341 		 * treats invalid shadow pages as being obsolete.
2342 		 */
2343 		if (!is_obsolete_sp(kvm, sp))
2344 			kvm_reload_remote_mmus(kvm);
2345 	}
2346 
2347 	if (sp->lpage_disallowed)
2348 		unaccount_huge_nx_page(kvm, sp);
2349 
2350 	sp->role.invalid = 1;
2351 	return list_unstable;
2352 }
2353 
kvm_mmu_prepare_zap_page(struct kvm * kvm,struct kvm_mmu_page * sp,struct list_head * invalid_list)2354 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2355 				     struct list_head *invalid_list)
2356 {
2357 	int nr_zapped;
2358 
2359 	__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
2360 	return nr_zapped;
2361 }
2362 
kvm_mmu_commit_zap_page(struct kvm * kvm,struct list_head * invalid_list)2363 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2364 				    struct list_head *invalid_list)
2365 {
2366 	struct kvm_mmu_page *sp, *nsp;
2367 
2368 	if (list_empty(invalid_list))
2369 		return;
2370 
2371 	/*
2372 	 * We need to make sure everyone sees our modifications to
2373 	 * the page tables and see changes to vcpu->mode here. The barrier
2374 	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
2375 	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2376 	 *
2377 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2378 	 * guest mode and/or lockless shadow page table walks.
2379 	 */
2380 	kvm_flush_remote_tlbs(kvm);
2381 
2382 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2383 		WARN_ON(!sp->role.invalid || sp->root_count);
2384 		kvm_mmu_free_page(sp);
2385 	}
2386 }
2387 
kvm_mmu_zap_oldest_mmu_pages(struct kvm * kvm,unsigned long nr_to_zap)2388 static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
2389 						  unsigned long nr_to_zap)
2390 {
2391 	unsigned long total_zapped = 0;
2392 	struct kvm_mmu_page *sp, *tmp;
2393 	LIST_HEAD(invalid_list);
2394 	bool unstable;
2395 	int nr_zapped;
2396 
2397 	if (list_empty(&kvm->arch.active_mmu_pages))
2398 		return 0;
2399 
2400 restart:
2401 	list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2402 		/*
2403 		 * Don't zap active root pages, the page itself can't be freed
2404 		 * and zapping it will just force vCPUs to realloc and reload.
2405 		 */
2406 		if (sp->root_count)
2407 			continue;
2408 
2409 		unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
2410 						      &nr_zapped);
2411 		total_zapped += nr_zapped;
2412 		if (total_zapped >= nr_to_zap)
2413 			break;
2414 
2415 		if (unstable)
2416 			goto restart;
2417 	}
2418 
2419 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2420 
2421 	kvm->stat.mmu_recycled += total_zapped;
2422 	return total_zapped;
2423 }
2424 
kvm_mmu_available_pages(struct kvm * kvm)2425 static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
2426 {
2427 	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2428 		return kvm->arch.n_max_mmu_pages -
2429 			kvm->arch.n_used_mmu_pages;
2430 
2431 	return 0;
2432 }
2433 
make_mmu_pages_available(struct kvm_vcpu * vcpu)2434 static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
2435 {
2436 	unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2437 
2438 	if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2439 		return 0;
2440 
2441 	kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2442 
2443 	if (!kvm_mmu_available_pages(vcpu->kvm))
2444 		return -ENOSPC;
2445 	return 0;
2446 }
2447 
2448 /*
2449  * Changing the number of mmu pages allocated to the vm
2450  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2451  */
kvm_mmu_change_mmu_pages(struct kvm * kvm,unsigned long goal_nr_mmu_pages)2452 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2453 {
2454 	spin_lock(&kvm->mmu_lock);
2455 
2456 	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2457 		kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
2458 						  goal_nr_mmu_pages);
2459 
2460 		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2461 	}
2462 
2463 	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2464 
2465 	spin_unlock(&kvm->mmu_lock);
2466 }
2467 
kvm_mmu_unprotect_page(struct kvm * kvm,gfn_t gfn)2468 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2469 {
2470 	struct kvm_mmu_page *sp;
2471 	LIST_HEAD(invalid_list);
2472 	int r;
2473 
2474 	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2475 	r = 0;
2476 	spin_lock(&kvm->mmu_lock);
2477 	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2478 		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2479 			 sp->role.word);
2480 		r = 1;
2481 		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2482 	}
2483 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2484 	spin_unlock(&kvm->mmu_lock);
2485 
2486 	return r;
2487 }
2488 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
2489 
kvm_unsync_page(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp)2490 static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2491 {
2492 	trace_kvm_mmu_unsync_page(sp);
2493 	++vcpu->kvm->stat.mmu_unsync;
2494 	sp->unsync = 1;
2495 
2496 	kvm_mmu_mark_parents_unsync(sp);
2497 }
2498 
mmu_need_write_protect(struct kvm_vcpu * vcpu,gfn_t gfn,bool can_unsync)2499 bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
2500 			    bool can_unsync)
2501 {
2502 	struct kvm_mmu_page *sp;
2503 
2504 	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
2505 		return true;
2506 
2507 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2508 		if (!can_unsync)
2509 			return true;
2510 
2511 		if (sp->unsync)
2512 			continue;
2513 
2514 		WARN_ON(sp->role.level != PG_LEVEL_4K);
2515 		kvm_unsync_page(vcpu, sp);
2516 	}
2517 
2518 	/*
2519 	 * We need to ensure that the marking of unsync pages is visible
2520 	 * before the SPTE is updated to allow writes because
2521 	 * kvm_mmu_sync_roots() checks the unsync flags without holding
2522 	 * the MMU lock and so can race with this. If the SPTE was updated
2523 	 * before the page had been marked as unsync-ed, something like the
2524 	 * following could happen:
2525 	 *
2526 	 * CPU 1                    CPU 2
2527 	 * ---------------------------------------------------------------------
2528 	 * 1.2 Host updates SPTE
2529 	 *     to be writable
2530 	 *                      2.1 Guest writes a GPTE for GVA X.
2531 	 *                          (GPTE being in the guest page table shadowed
2532 	 *                           by the SP from CPU 1.)
2533 	 *                          This reads SPTE during the page table walk.
2534 	 *                          Since SPTE.W is read as 1, there is no
2535 	 *                          fault.
2536 	 *
2537 	 *                      2.2 Guest issues TLB flush.
2538 	 *                          That causes a VM Exit.
2539 	 *
2540 	 *                      2.3 kvm_mmu_sync_pages() reads sp->unsync.
2541 	 *                          Since it is false, so it just returns.
2542 	 *
2543 	 *                      2.4 Guest accesses GVA X.
2544 	 *                          Since the mapping in the SP was not updated,
2545 	 *                          so the old mapping for GVA X incorrectly
2546 	 *                          gets used.
2547 	 * 1.1 Host marks SP
2548 	 *     as unsync
2549 	 *     (sp->unsync = true)
2550 	 *
2551 	 * The write barrier below ensures that 1.1 happens before 1.2 and thus
2552 	 * the situation in 2.4 does not arise. The implicit barrier in 2.2
2553 	 * pairs with this write barrier.
2554 	 */
2555 	smp_wmb();
2556 
2557 	return false;
2558 }
2559 
set_spte(struct kvm_vcpu * vcpu,u64 * sptep,unsigned int pte_access,int level,gfn_t gfn,kvm_pfn_t pfn,bool speculative,bool can_unsync,bool host_writable)2560 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2561 		    unsigned int pte_access, int level,
2562 		    gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2563 		    bool can_unsync, bool host_writable)
2564 {
2565 	u64 spte;
2566 	struct kvm_mmu_page *sp;
2567 	int ret;
2568 
2569 	if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
2570 		return 0;
2571 
2572 	sp = sptep_to_sp(sptep);
2573 
2574 	ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
2575 			can_unsync, host_writable, sp_ad_disabled(sp), &spte);
2576 
2577 	if (spte & PT_WRITABLE_MASK)
2578 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
2579 
2580 	if (*sptep == spte)
2581 		ret |= SET_SPTE_SPURIOUS;
2582 	else if (mmu_spte_update(sptep, spte))
2583 		ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
2584 	return ret;
2585 }
2586 
mmu_set_spte(struct kvm_vcpu * vcpu,u64 * sptep,unsigned int pte_access,bool write_fault,int level,gfn_t gfn,kvm_pfn_t pfn,bool speculative,bool host_writable)2587 static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2588 			unsigned int pte_access, bool write_fault, int level,
2589 			gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2590 			bool host_writable)
2591 {
2592 	int was_rmapped = 0;
2593 	int rmap_count;
2594 	int set_spte_ret;
2595 	int ret = RET_PF_FIXED;
2596 	bool flush = false;
2597 
2598 	pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
2599 		 *sptep, write_fault, gfn);
2600 
2601 	if (is_shadow_present_pte(*sptep)) {
2602 		/*
2603 		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2604 		 * the parent of the now unreachable PTE.
2605 		 */
2606 		if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
2607 			struct kvm_mmu_page *child;
2608 			u64 pte = *sptep;
2609 
2610 			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2611 			drop_parent_pte(child, sptep);
2612 			flush = true;
2613 		} else if (pfn != spte_to_pfn(*sptep)) {
2614 			pgprintk("hfn old %llx new %llx\n",
2615 				 spte_to_pfn(*sptep), pfn);
2616 			drop_spte(vcpu->kvm, sptep);
2617 			flush = true;
2618 		} else
2619 			was_rmapped = 1;
2620 	}
2621 
2622 	set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
2623 				speculative, true, host_writable);
2624 	if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
2625 		if (write_fault)
2626 			ret = RET_PF_EMULATE;
2627 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
2628 	}
2629 
2630 	if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
2631 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
2632 				KVM_PAGES_PER_HPAGE(level));
2633 
2634 	if (unlikely(is_mmio_spte(*sptep)))
2635 		ret = RET_PF_EMULATE;
2636 
2637 	/*
2638 	 * The fault is fully spurious if and only if the new SPTE and old SPTE
2639 	 * are identical, and emulation is not required.
2640 	 */
2641 	if ((set_spte_ret & SET_SPTE_SPURIOUS) && ret == RET_PF_FIXED) {
2642 		WARN_ON_ONCE(!was_rmapped);
2643 		return RET_PF_SPURIOUS;
2644 	}
2645 
2646 	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2647 	trace_kvm_mmu_set_spte(level, gfn, sptep);
2648 	if (!was_rmapped && is_large_pte(*sptep))
2649 		++vcpu->kvm->stat.lpages;
2650 
2651 	if (is_shadow_present_pte(*sptep)) {
2652 		if (!was_rmapped) {
2653 			rmap_count = rmap_add(vcpu, sptep, gfn);
2654 			if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2655 				rmap_recycle(vcpu, sptep, gfn);
2656 		}
2657 	}
2658 
2659 	return ret;
2660 }
2661 
pte_prefetch_gfn_to_pfn(struct kvm_vcpu * vcpu,gfn_t gfn,bool no_dirty_log)2662 static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2663 				     bool no_dirty_log)
2664 {
2665 	struct kvm_memory_slot *slot;
2666 
2667 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2668 	if (!slot)
2669 		return KVM_PFN_ERR_FAULT;
2670 
2671 	return gfn_to_pfn_memslot_atomic(slot, gfn);
2672 }
2673 
direct_pte_prefetch_many(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * start,u64 * end)2674 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2675 				    struct kvm_mmu_page *sp,
2676 				    u64 *start, u64 *end)
2677 {
2678 	struct page *pages[PTE_PREFETCH_NUM];
2679 	struct kvm_memory_slot *slot;
2680 	unsigned int access = sp->role.access;
2681 	int i, ret;
2682 	gfn_t gfn;
2683 
2684 	gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2685 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2686 	if (!slot)
2687 		return -1;
2688 
2689 	ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2690 	if (ret <= 0)
2691 		return -1;
2692 
2693 	for (i = 0; i < ret; i++, gfn++, start++) {
2694 		mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn,
2695 			     page_to_pfn(pages[i]), true, true);
2696 		put_page(pages[i]);
2697 	}
2698 
2699 	return 0;
2700 }
2701 
__direct_pte_prefetch(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * sptep)2702 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2703 				  struct kvm_mmu_page *sp, u64 *sptep)
2704 {
2705 	u64 *spte, *start = NULL;
2706 	int i;
2707 
2708 	WARN_ON(!sp->role.direct);
2709 
2710 	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2711 	spte = sp->spt + i;
2712 
2713 	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2714 		if (is_shadow_present_pte(*spte) || spte == sptep) {
2715 			if (!start)
2716 				continue;
2717 			if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2718 				break;
2719 			start = NULL;
2720 		} else if (!start)
2721 			start = spte;
2722 	}
2723 }
2724 
direct_pte_prefetch(struct kvm_vcpu * vcpu,u64 * sptep)2725 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2726 {
2727 	struct kvm_mmu_page *sp;
2728 
2729 	sp = sptep_to_sp(sptep);
2730 
2731 	/*
2732 	 * Without accessed bits, there's no way to distinguish between
2733 	 * actually accessed translations and prefetched, so disable pte
2734 	 * prefetch if accessed bits aren't available.
2735 	 */
2736 	if (sp_ad_disabled(sp))
2737 		return;
2738 
2739 	if (sp->role.level > PG_LEVEL_4K)
2740 		return;
2741 
2742 	__direct_pte_prefetch(vcpu, sp, sptep);
2743 }
2744 
host_pfn_mapping_level(struct kvm_vcpu * vcpu,gfn_t gfn,kvm_pfn_t pfn,struct kvm_memory_slot * slot)2745 static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn,
2746 				  kvm_pfn_t pfn, struct kvm_memory_slot *slot)
2747 {
2748 	unsigned long hva;
2749 	pte_t *pte;
2750 	int level;
2751 
2752 	if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
2753 		return PG_LEVEL_4K;
2754 
2755 	/*
2756 	 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
2757 	 * is not solely for performance, it's also necessary to avoid the
2758 	 * "writable" check in __gfn_to_hva_many(), which will always fail on
2759 	 * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
2760 	 * page fault steps have already verified the guest isn't writing a
2761 	 * read-only memslot.
2762 	 */
2763 	hva = __gfn_to_hva_memslot(slot, gfn);
2764 
2765 	pte = lookup_address_in_mm(vcpu->kvm->mm, hva, &level);
2766 	if (unlikely(!pte))
2767 		return PG_LEVEL_4K;
2768 
2769 	return level;
2770 }
2771 
kvm_mmu_hugepage_adjust(struct kvm_vcpu * vcpu,gfn_t gfn,int max_level,kvm_pfn_t * pfnp,bool huge_page_disallowed,int * req_level)2772 int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
2773 			    int max_level, kvm_pfn_t *pfnp,
2774 			    bool huge_page_disallowed, int *req_level)
2775 {
2776 	struct kvm_memory_slot *slot;
2777 	struct kvm_lpage_info *linfo;
2778 	kvm_pfn_t pfn = *pfnp;
2779 	kvm_pfn_t mask;
2780 	int level;
2781 
2782 	*req_level = PG_LEVEL_4K;
2783 
2784 	if (unlikely(max_level == PG_LEVEL_4K))
2785 		return PG_LEVEL_4K;
2786 
2787 	if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn))
2788 		return PG_LEVEL_4K;
2789 
2790 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true);
2791 	if (!slot)
2792 		return PG_LEVEL_4K;
2793 
2794 	max_level = min(max_level, max_huge_page_level);
2795 	for ( ; max_level > PG_LEVEL_4K; max_level--) {
2796 		linfo = lpage_info_slot(gfn, slot, max_level);
2797 		if (!linfo->disallow_lpage)
2798 			break;
2799 	}
2800 
2801 	if (max_level == PG_LEVEL_4K)
2802 		return PG_LEVEL_4K;
2803 
2804 	level = host_pfn_mapping_level(vcpu, gfn, pfn, slot);
2805 	if (level == PG_LEVEL_4K)
2806 		return level;
2807 
2808 	*req_level = level = min(level, max_level);
2809 
2810 	/*
2811 	 * Enforce the iTLB multihit workaround after capturing the requested
2812 	 * level, which will be used to do precise, accurate accounting.
2813 	 */
2814 	if (huge_page_disallowed)
2815 		return PG_LEVEL_4K;
2816 
2817 	/*
2818 	 * mmu_notifier_retry() was successful and mmu_lock is held, so
2819 	 * the pmd can't be split from under us.
2820 	 */
2821 	mask = KVM_PAGES_PER_HPAGE(level) - 1;
2822 	VM_BUG_ON((gfn & mask) != (pfn & mask));
2823 	*pfnp = pfn & ~mask;
2824 
2825 	return level;
2826 }
2827 
disallowed_hugepage_adjust(u64 spte,gfn_t gfn,int cur_level,kvm_pfn_t * pfnp,int * goal_levelp)2828 void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
2829 				kvm_pfn_t *pfnp, int *goal_levelp)
2830 {
2831 	int level = *goal_levelp;
2832 
2833 	if (cur_level == level && level > PG_LEVEL_4K &&
2834 	    is_shadow_present_pte(spte) &&
2835 	    !is_large_pte(spte)) {
2836 		/*
2837 		 * A small SPTE exists for this pfn, but FNAME(fetch)
2838 		 * and __direct_map would like to create a large PTE
2839 		 * instead: just force them to go down another level,
2840 		 * patching back for them into pfn the next 9 bits of
2841 		 * the address.
2842 		 */
2843 		u64 page_mask = KVM_PAGES_PER_HPAGE(level) -
2844 				KVM_PAGES_PER_HPAGE(level - 1);
2845 		*pfnp |= gfn & page_mask;
2846 		(*goal_levelp)--;
2847 	}
2848 }
2849 
__direct_map(struct kvm_vcpu * vcpu,gpa_t gpa,u32 error_code,int map_writable,int max_level,kvm_pfn_t pfn,bool prefault,bool is_tdp)2850 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
2851 			int map_writable, int max_level, kvm_pfn_t pfn,
2852 			bool prefault, bool is_tdp)
2853 {
2854 	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
2855 	bool write = error_code & PFERR_WRITE_MASK;
2856 	bool exec = error_code & PFERR_FETCH_MASK;
2857 	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
2858 	struct kvm_shadow_walk_iterator it;
2859 	struct kvm_mmu_page *sp;
2860 	int level, req_level, ret;
2861 	gfn_t gfn = gpa >> PAGE_SHIFT;
2862 	gfn_t base_gfn = gfn;
2863 
2864 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
2865 		return RET_PF_RETRY;
2866 
2867 	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
2868 					huge_page_disallowed, &req_level);
2869 
2870 	trace_kvm_mmu_spte_requested(gpa, level, pfn);
2871 	for_each_shadow_entry(vcpu, gpa, it) {
2872 		/*
2873 		 * We cannot overwrite existing page tables with an NX
2874 		 * large page, as the leaf could be executable.
2875 		 */
2876 		if (nx_huge_page_workaround_enabled)
2877 			disallowed_hugepage_adjust(*it.sptep, gfn, it.level,
2878 						   &pfn, &level);
2879 
2880 		base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
2881 		if (it.level == level)
2882 			break;
2883 
2884 		drop_large_spte(vcpu, it.sptep);
2885 		if (!is_shadow_present_pte(*it.sptep)) {
2886 			sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
2887 					      it.level - 1, true, ACC_ALL);
2888 
2889 			link_shadow_page(vcpu, it.sptep, sp);
2890 			if (is_tdp && huge_page_disallowed &&
2891 			    req_level >= it.level)
2892 				account_huge_nx_page(vcpu->kvm, sp);
2893 		}
2894 	}
2895 
2896 	ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
2897 			   write, level, base_gfn, pfn, prefault,
2898 			   map_writable);
2899 	if (ret == RET_PF_SPURIOUS)
2900 		return ret;
2901 
2902 	direct_pte_prefetch(vcpu, it.sptep);
2903 	++vcpu->stat.pf_fixed;
2904 	return ret;
2905 }
2906 
kvm_send_hwpoison_signal(unsigned long address,struct task_struct * tsk)2907 static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2908 {
2909 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
2910 }
2911 
kvm_handle_bad_page(struct kvm_vcpu * vcpu,gfn_t gfn,kvm_pfn_t pfn)2912 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
2913 {
2914 	/*
2915 	 * Do not cache the mmio info caused by writing the readonly gfn
2916 	 * into the spte otherwise read access on readonly gfn also can
2917 	 * caused mmio page fault and treat it as mmio access.
2918 	 */
2919 	if (pfn == KVM_PFN_ERR_RO_FAULT)
2920 		return RET_PF_EMULATE;
2921 
2922 	if (pfn == KVM_PFN_ERR_HWPOISON) {
2923 		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
2924 		return RET_PF_RETRY;
2925 	}
2926 
2927 	return -EFAULT;
2928 }
2929 
handle_abnormal_pfn(struct kvm_vcpu * vcpu,gva_t gva,gfn_t gfn,kvm_pfn_t pfn,unsigned int access,int * ret_val)2930 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
2931 				kvm_pfn_t pfn, unsigned int access,
2932 				int *ret_val)
2933 {
2934 	/* The pfn is invalid, report the error! */
2935 	if (unlikely(is_error_pfn(pfn))) {
2936 		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
2937 		return true;
2938 	}
2939 
2940 	if (unlikely(is_noslot_pfn(pfn)))
2941 		vcpu_cache_mmio_info(vcpu, gva, gfn,
2942 				     access & shadow_mmio_access_mask);
2943 
2944 	return false;
2945 }
2946 
page_fault_can_be_fast(u32 error_code)2947 static bool page_fault_can_be_fast(u32 error_code)
2948 {
2949 	/*
2950 	 * Do not fix the mmio spte with invalid generation number which
2951 	 * need to be updated by slow page fault path.
2952 	 */
2953 	if (unlikely(error_code & PFERR_RSVD_MASK))
2954 		return false;
2955 
2956 	/* See if the page fault is due to an NX violation */
2957 	if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))
2958 		      == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))))
2959 		return false;
2960 
2961 	/*
2962 	 * #PF can be fast if:
2963 	 * 1. The shadow page table entry is not present, which could mean that
2964 	 *    the fault is potentially caused by access tracking (if enabled).
2965 	 * 2. The shadow page table entry is present and the fault
2966 	 *    is caused by write-protect, that means we just need change the W
2967 	 *    bit of the spte which can be done out of mmu-lock.
2968 	 *
2969 	 * However, if access tracking is disabled we know that a non-present
2970 	 * page must be a genuine page fault where we have to create a new SPTE.
2971 	 * So, if access tracking is disabled, we return true only for write
2972 	 * accesses to a present page.
2973 	 */
2974 
2975 	return shadow_acc_track_mask != 0 ||
2976 	       ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK))
2977 		== (PFERR_WRITE_MASK | PFERR_PRESENT_MASK));
2978 }
2979 
2980 /*
2981  * Returns true if the SPTE was fixed successfully. Otherwise,
2982  * someone else modified the SPTE from its original value.
2983  */
2984 static bool
fast_pf_fix_direct_spte(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,u64 * sptep,u64 old_spte,u64 new_spte)2985 fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2986 			u64 *sptep, u64 old_spte, u64 new_spte)
2987 {
2988 	gfn_t gfn;
2989 
2990 	WARN_ON(!sp->role.direct);
2991 
2992 	/*
2993 	 * Theoretically we could also set dirty bit (and flush TLB) here in
2994 	 * order to eliminate unnecessary PML logging. See comments in
2995 	 * set_spte. But fast_page_fault is very unlikely to happen with PML
2996 	 * enabled, so we do not do this. This might result in the same GPA
2997 	 * to be logged in PML buffer again when the write really happens, and
2998 	 * eventually to be called by mark_page_dirty twice. But it's also no
2999 	 * harm. This also avoids the TLB flush needed after setting dirty bit
3000 	 * so non-PML cases won't be impacted.
3001 	 *
3002 	 * Compare with set_spte where instead shadow_dirty_mask is set.
3003 	 */
3004 	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3005 		return false;
3006 
3007 	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
3008 		/*
3009 		 * The gfn of direct spte is stable since it is
3010 		 * calculated by sp->gfn.
3011 		 */
3012 		gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
3013 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
3014 	}
3015 
3016 	return true;
3017 }
3018 
is_access_allowed(u32 fault_err_code,u64 spte)3019 static bool is_access_allowed(u32 fault_err_code, u64 spte)
3020 {
3021 	if (fault_err_code & PFERR_FETCH_MASK)
3022 		return is_executable_pte(spte);
3023 
3024 	if (fault_err_code & PFERR_WRITE_MASK)
3025 		return is_writable_pte(spte);
3026 
3027 	/* Fault was on Read access */
3028 	return spte & PT_PRESENT_MASK;
3029 }
3030 
3031 /*
3032  * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3033  */
fast_page_fault(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,u32 error_code)3034 static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
3035 			   u32 error_code)
3036 {
3037 	struct kvm_shadow_walk_iterator iterator;
3038 	struct kvm_mmu_page *sp;
3039 	int ret = RET_PF_INVALID;
3040 	u64 spte = 0ull;
3041 	uint retry_count = 0;
3042 
3043 	if (!page_fault_can_be_fast(error_code))
3044 		return ret;
3045 
3046 	walk_shadow_page_lockless_begin(vcpu);
3047 
3048 	do {
3049 		u64 new_spte;
3050 
3051 		for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
3052 			if (!is_shadow_present_pte(spte))
3053 				break;
3054 
3055 		sp = sptep_to_sp(iterator.sptep);
3056 		if (!is_last_spte(spte, sp->role.level))
3057 			break;
3058 
3059 		/*
3060 		 * Check whether the memory access that caused the fault would
3061 		 * still cause it if it were to be performed right now. If not,
3062 		 * then this is a spurious fault caused by TLB lazily flushed,
3063 		 * or some other CPU has already fixed the PTE after the
3064 		 * current CPU took the fault.
3065 		 *
3066 		 * Need not check the access of upper level table entries since
3067 		 * they are always ACC_ALL.
3068 		 */
3069 		if (is_access_allowed(error_code, spte)) {
3070 			ret = RET_PF_SPURIOUS;
3071 			break;
3072 		}
3073 
3074 		new_spte = spte;
3075 
3076 		if (is_access_track_spte(spte))
3077 			new_spte = restore_acc_track_spte(new_spte);
3078 
3079 		/*
3080 		 * Currently, to simplify the code, write-protection can
3081 		 * be removed in the fast path only if the SPTE was
3082 		 * write-protected for dirty-logging or access tracking.
3083 		 */
3084 		if ((error_code & PFERR_WRITE_MASK) &&
3085 		    spte_can_locklessly_be_made_writable(spte)) {
3086 			new_spte |= PT_WRITABLE_MASK;
3087 
3088 			/*
3089 			 * Do not fix write-permission on the large spte.  Since
3090 			 * we only dirty the first page into the dirty-bitmap in
3091 			 * fast_pf_fix_direct_spte(), other pages are missed
3092 			 * if its slot has dirty logging enabled.
3093 			 *
3094 			 * Instead, we let the slow page fault path create a
3095 			 * normal spte to fix the access.
3096 			 *
3097 			 * See the comments in kvm_arch_commit_memory_region().
3098 			 */
3099 			if (sp->role.level > PG_LEVEL_4K)
3100 				break;
3101 		}
3102 
3103 		/* Verify that the fault can be handled in the fast path */
3104 		if (new_spte == spte ||
3105 		    !is_access_allowed(error_code, new_spte))
3106 			break;
3107 
3108 		/*
3109 		 * Currently, fast page fault only works for direct mapping
3110 		 * since the gfn is not stable for indirect shadow page. See
3111 		 * Documentation/virt/kvm/locking.rst to get more detail.
3112 		 */
3113 		if (fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte,
3114 					    new_spte)) {
3115 			ret = RET_PF_FIXED;
3116 			break;
3117 		}
3118 
3119 		if (++retry_count > 4) {
3120 			printk_once(KERN_WARNING
3121 				"kvm: Fast #PF retrying more than 4 times.\n");
3122 			break;
3123 		}
3124 
3125 	} while (true);
3126 
3127 	trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
3128 			      spte, ret);
3129 	walk_shadow_page_lockless_end(vcpu);
3130 
3131 	return ret;
3132 }
3133 
mmu_free_root_page(struct kvm * kvm,hpa_t * root_hpa,struct list_head * invalid_list)3134 static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3135 			       struct list_head *invalid_list)
3136 {
3137 	struct kvm_mmu_page *sp;
3138 
3139 	if (!VALID_PAGE(*root_hpa))
3140 		return;
3141 
3142 	sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
3143 
3144 	if (kvm_mmu_put_root(kvm, sp)) {
3145 		if (sp->tdp_mmu_page)
3146 			kvm_tdp_mmu_free_root(kvm, sp);
3147 		else if (sp->role.invalid)
3148 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3149 	}
3150 
3151 	*root_hpa = INVALID_PAGE;
3152 }
3153 
3154 /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
kvm_mmu_free_roots(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,ulong roots_to_free)3155 void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3156 			ulong roots_to_free)
3157 {
3158 	struct kvm *kvm = vcpu->kvm;
3159 	int i;
3160 	LIST_HEAD(invalid_list);
3161 	bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
3162 
3163 	BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3164 
3165 	/* Before acquiring the MMU lock, see if we need to do any real work. */
3166 	if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
3167 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3168 			if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
3169 			    VALID_PAGE(mmu->prev_roots[i].hpa))
3170 				break;
3171 
3172 		if (i == KVM_MMU_NUM_PREV_ROOTS)
3173 			return;
3174 	}
3175 
3176 	spin_lock(&kvm->mmu_lock);
3177 
3178 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
3179 		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
3180 			mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3181 					   &invalid_list);
3182 
3183 	if (free_active_root) {
3184 		if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
3185 		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
3186 			mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
3187 		} else if (mmu->pae_root) {
3188 			for (i = 0; i < 4; ++i)
3189 				if (mmu->pae_root[i] != 0)
3190 					mmu_free_root_page(kvm,
3191 							   &mmu->pae_root[i],
3192 							   &invalid_list);
3193 		}
3194 		mmu->root_hpa = INVALID_PAGE;
3195 		mmu->root_pgd = 0;
3196 	}
3197 
3198 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
3199 	spin_unlock(&kvm->mmu_lock);
3200 }
3201 EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3202 
mmu_check_root(struct kvm_vcpu * vcpu,gfn_t root_gfn)3203 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
3204 {
3205 	int ret = 0;
3206 
3207 	if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3208 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3209 		ret = 1;
3210 	}
3211 
3212 	return ret;
3213 }
3214 
mmu_alloc_root(struct kvm_vcpu * vcpu,gfn_t gfn,gva_t gva,u8 level,bool direct)3215 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
3216 			    u8 level, bool direct)
3217 {
3218 	struct kvm_mmu_page *sp;
3219 
3220 	spin_lock(&vcpu->kvm->mmu_lock);
3221 
3222 	if (make_mmu_pages_available(vcpu)) {
3223 		spin_unlock(&vcpu->kvm->mmu_lock);
3224 		return INVALID_PAGE;
3225 	}
3226 	sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
3227 	++sp->root_count;
3228 
3229 	spin_unlock(&vcpu->kvm->mmu_lock);
3230 	return __pa(sp->spt);
3231 }
3232 
mmu_alloc_direct_roots(struct kvm_vcpu * vcpu)3233 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3234 {
3235 	u8 shadow_root_level = vcpu->arch.mmu->shadow_root_level;
3236 	hpa_t root;
3237 	unsigned i;
3238 
3239 	if (vcpu->kvm->arch.tdp_mmu_enabled) {
3240 		root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3241 
3242 		if (!VALID_PAGE(root))
3243 			return -ENOSPC;
3244 		vcpu->arch.mmu->root_hpa = root;
3245 	} else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
3246 		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level,
3247 				      true);
3248 
3249 		if (!VALID_PAGE(root))
3250 			return -ENOSPC;
3251 		vcpu->arch.mmu->root_hpa = root;
3252 	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
3253 		for (i = 0; i < 4; ++i) {
3254 			MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
3255 
3256 			root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
3257 					      i << 30, PT32_ROOT_LEVEL, true);
3258 			if (!VALID_PAGE(root))
3259 				return -ENOSPC;
3260 			vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
3261 		}
3262 		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
3263 	} else
3264 		BUG();
3265 
3266 	/* root_pgd is ignored for direct MMUs. */
3267 	vcpu->arch.mmu->root_pgd = 0;
3268 
3269 	return 0;
3270 }
3271 
mmu_alloc_shadow_roots(struct kvm_vcpu * vcpu)3272 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3273 {
3274 	u64 pdptr, pm_mask;
3275 	gfn_t root_gfn, root_pgd;
3276 	hpa_t root;
3277 	int i;
3278 
3279 	root_pgd = vcpu->arch.mmu->get_guest_pgd(vcpu);
3280 	root_gfn = root_pgd >> PAGE_SHIFT;
3281 
3282 	if (mmu_check_root(vcpu, root_gfn))
3283 		return 1;
3284 
3285 	/*
3286 	 * Do we shadow a long mode page table? If so we need to
3287 	 * write-protect the guests page table root.
3288 	 */
3289 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
3290 		MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa));
3291 
3292 		root = mmu_alloc_root(vcpu, root_gfn, 0,
3293 				      vcpu->arch.mmu->shadow_root_level, false);
3294 		if (!VALID_PAGE(root))
3295 			return -ENOSPC;
3296 		vcpu->arch.mmu->root_hpa = root;
3297 		goto set_root_pgd;
3298 	}
3299 
3300 	/*
3301 	 * We shadow a 32 bit page table. This may be a legacy 2-level
3302 	 * or a PAE 3-level page table. In either case we need to be aware that
3303 	 * the shadow page table may be a PAE or a long mode page table.
3304 	 */
3305 	pm_mask = PT_PRESENT_MASK;
3306 	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
3307 		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3308 
3309 		/*
3310 		 * Allocate the page for the PDPTEs when shadowing 32-bit NPT
3311 		 * with 64-bit only when needed.  Unlike 32-bit NPT, it doesn't
3312 		 * need to be in low mem.  See also lm_root below.
3313 		 */
3314 		if (!vcpu->arch.mmu->pae_root) {
3315 			WARN_ON_ONCE(!tdp_enabled);
3316 
3317 			vcpu->arch.mmu->pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3318 			if (!vcpu->arch.mmu->pae_root)
3319 				return -ENOMEM;
3320 		}
3321 	}
3322 
3323 	for (i = 0; i < 4; ++i) {
3324 		MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
3325 		if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
3326 			pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
3327 			if (!(pdptr & PT_PRESENT_MASK)) {
3328 				vcpu->arch.mmu->pae_root[i] = 0;
3329 				continue;
3330 			}
3331 			root_gfn = pdptr >> PAGE_SHIFT;
3332 			if (mmu_check_root(vcpu, root_gfn))
3333 				return 1;
3334 		}
3335 
3336 		root = mmu_alloc_root(vcpu, root_gfn, i << 30,
3337 				      PT32_ROOT_LEVEL, false);
3338 		if (!VALID_PAGE(root))
3339 			return -ENOSPC;
3340 		vcpu->arch.mmu->pae_root[i] = root | pm_mask;
3341 	}
3342 	vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
3343 
3344 	/*
3345 	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
3346 	 * tables are allocated and initialized at MMU creation as there is no
3347 	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
3348 	 * on demand, as running a 32-bit L1 VMM is very rare.  The PDP is
3349 	 * handled above (to share logic with PAE), deal with the PML4 here.
3350 	 */
3351 	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
3352 		if (vcpu->arch.mmu->lm_root == NULL) {
3353 			u64 *lm_root;
3354 
3355 			lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3356 			if (!lm_root)
3357 				return -ENOMEM;
3358 
3359 			lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
3360 
3361 			vcpu->arch.mmu->lm_root = lm_root;
3362 		}
3363 
3364 		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
3365 	}
3366 
3367 set_root_pgd:
3368 	vcpu->arch.mmu->root_pgd = root_pgd;
3369 
3370 	return 0;
3371 }
3372 
mmu_alloc_roots(struct kvm_vcpu * vcpu)3373 static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
3374 {
3375 	if (vcpu->arch.mmu->direct_map)
3376 		return mmu_alloc_direct_roots(vcpu);
3377 	else
3378 		return mmu_alloc_shadow_roots(vcpu);
3379 }
3380 
kvm_mmu_sync_roots(struct kvm_vcpu * vcpu)3381 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3382 {
3383 	int i;
3384 	struct kvm_mmu_page *sp;
3385 
3386 	if (vcpu->arch.mmu->direct_map)
3387 		return;
3388 
3389 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
3390 		return;
3391 
3392 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3393 
3394 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
3395 		hpa_t root = vcpu->arch.mmu->root_hpa;
3396 		sp = to_shadow_page(root);
3397 
3398 		/*
3399 		 * Even if another CPU was marking the SP as unsync-ed
3400 		 * simultaneously, any guest page table changes are not
3401 		 * guaranteed to be visible anyway until this VCPU issues a TLB
3402 		 * flush strictly after those changes are made. We only need to
3403 		 * ensure that the other CPU sets these flags before any actual
3404 		 * changes to the page tables are made. The comments in
3405 		 * mmu_need_write_protect() describe what could go wrong if this
3406 		 * requirement isn't satisfied.
3407 		 */
3408 		if (!smp_load_acquire(&sp->unsync) &&
3409 		    !smp_load_acquire(&sp->unsync_children))
3410 			return;
3411 
3412 		spin_lock(&vcpu->kvm->mmu_lock);
3413 		kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3414 
3415 		mmu_sync_children(vcpu, sp);
3416 
3417 		kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3418 		spin_unlock(&vcpu->kvm->mmu_lock);
3419 		return;
3420 	}
3421 
3422 	spin_lock(&vcpu->kvm->mmu_lock);
3423 	kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3424 
3425 	for (i = 0; i < 4; ++i) {
3426 		hpa_t root = vcpu->arch.mmu->pae_root[i];
3427 
3428 		if (root && VALID_PAGE(root)) {
3429 			root &= PT64_BASE_ADDR_MASK;
3430 			sp = to_shadow_page(root);
3431 			mmu_sync_children(vcpu, sp);
3432 		}
3433 	}
3434 
3435 	kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3436 	spin_unlock(&vcpu->kvm->mmu_lock);
3437 }
3438 EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
3439 
nonpaging_gva_to_gpa(struct kvm_vcpu * vcpu,gpa_t vaddr,u32 access,struct x86_exception * exception)3440 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
3441 				  u32 access, struct x86_exception *exception)
3442 {
3443 	if (exception)
3444 		exception->error_code = 0;
3445 	return vaddr;
3446 }
3447 
nonpaging_gva_to_gpa_nested(struct kvm_vcpu * vcpu,gpa_t vaddr,u32 access,struct x86_exception * exception)3448 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
3449 					 u32 access,
3450 					 struct x86_exception *exception)
3451 {
3452 	if (exception)
3453 		exception->error_code = 0;
3454 	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3455 }
3456 
3457 static bool
__is_rsvd_bits_set(struct rsvd_bits_validate * rsvd_check,u64 pte,int level)3458 __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
3459 {
3460 	int bit7 = (pte >> 7) & 1;
3461 
3462 	return pte & rsvd_check->rsvd_bits_mask[bit7][level-1];
3463 }
3464 
__is_bad_mt_xwr(struct rsvd_bits_validate * rsvd_check,u64 pte)3465 static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte)
3466 {
3467 	return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
3468 }
3469 
mmio_info_in_cache(struct kvm_vcpu * vcpu,u64 addr,bool direct)3470 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3471 {
3472 	/*
3473 	 * A nested guest cannot use the MMIO cache if it is using nested
3474 	 * page tables, because cr2 is a nGPA while the cache stores GPAs.
3475 	 */
3476 	if (mmu_is_nested(vcpu))
3477 		return false;
3478 
3479 	if (direct)
3480 		return vcpu_match_mmio_gpa(vcpu, addr);
3481 
3482 	return vcpu_match_mmio_gva(vcpu, addr);
3483 }
3484 
3485 /*
3486  * Return the level of the lowest level SPTE added to sptes.
3487  * That SPTE may be non-present.
3488  */
get_walk(struct kvm_vcpu * vcpu,u64 addr,u64 * sptes,int * root_level)3489 static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
3490 {
3491 	struct kvm_shadow_walk_iterator iterator;
3492 	int leaf = -1;
3493 	u64 spte;
3494 
3495 	walk_shadow_page_lockless_begin(vcpu);
3496 
3497 	for (shadow_walk_init(&iterator, vcpu, addr),
3498 	     *root_level = iterator.level;
3499 	     shadow_walk_okay(&iterator);
3500 	     __shadow_walk_next(&iterator, spte)) {
3501 		leaf = iterator.level;
3502 		spte = mmu_spte_get_lockless(iterator.sptep);
3503 
3504 		sptes[leaf - 1] = spte;
3505 
3506 		if (!is_shadow_present_pte(spte))
3507 			break;
3508 	}
3509 
3510 	walk_shadow_page_lockless_end(vcpu);
3511 
3512 	return leaf;
3513 }
3514 
3515 /* return true if reserved bit is detected on spte. */
get_mmio_spte(struct kvm_vcpu * vcpu,u64 addr,u64 * sptep)3516 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3517 {
3518 	u64 sptes[PT64_ROOT_MAX_LEVEL];
3519 	struct rsvd_bits_validate *rsvd_check;
3520 	int root, leaf, level;
3521 	bool reserved = false;
3522 
3523 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) {
3524 		*sptep = 0ull;
3525 		return reserved;
3526 	}
3527 
3528 	if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
3529 		leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
3530 	else
3531 		leaf = get_walk(vcpu, addr, sptes, &root);
3532 
3533 	if (unlikely(leaf < 0)) {
3534 		*sptep = 0ull;
3535 		return reserved;
3536 	}
3537 
3538 	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
3539 
3540 	for (level = root; level >= leaf; level--) {
3541 		if (!is_shadow_present_pte(sptes[level - 1]))
3542 			break;
3543 		/*
3544 		 * Use a bitwise-OR instead of a logical-OR to aggregate the
3545 		 * reserved bit and EPT's invalid memtype/XWR checks to avoid
3546 		 * adding a Jcc in the loop.
3547 		 */
3548 		reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level - 1]) ||
3549 			    __is_rsvd_bits_set(rsvd_check, sptes[level - 1],
3550 					       level);
3551 	}
3552 
3553 	if (reserved) {
3554 		pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
3555 		       __func__, addr);
3556 		for (level = root; level >= leaf; level--)
3557 			pr_err("------ spte 0x%llx level %d.\n",
3558 			       sptes[level - 1], level);
3559 	}
3560 
3561 	*sptep = sptes[leaf - 1];
3562 
3563 	return reserved;
3564 }
3565 
handle_mmio_page_fault(struct kvm_vcpu * vcpu,u64 addr,bool direct)3566 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3567 {
3568 	u64 spte;
3569 	bool reserved;
3570 
3571 	if (mmio_info_in_cache(vcpu, addr, direct))
3572 		return RET_PF_EMULATE;
3573 
3574 	reserved = get_mmio_spte(vcpu, addr, &spte);
3575 	if (WARN_ON(reserved))
3576 		return -EINVAL;
3577 
3578 	if (is_mmio_spte(spte)) {
3579 		gfn_t gfn = get_mmio_spte_gfn(spte);
3580 		unsigned int access = get_mmio_spte_access(spte);
3581 
3582 		if (!check_mmio_spte(vcpu, spte))
3583 			return RET_PF_INVALID;
3584 
3585 		if (direct)
3586 			addr = 0;
3587 
3588 		trace_handle_mmio_page_fault(addr, gfn, access);
3589 		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3590 		return RET_PF_EMULATE;
3591 	}
3592 
3593 	/*
3594 	 * If the page table is zapped by other cpus, let CPU fault again on
3595 	 * the address.
3596 	 */
3597 	return RET_PF_RETRY;
3598 }
3599 
page_fault_handle_page_track(struct kvm_vcpu * vcpu,u32 error_code,gfn_t gfn)3600 static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
3601 					 u32 error_code, gfn_t gfn)
3602 {
3603 	if (unlikely(error_code & PFERR_RSVD_MASK))
3604 		return false;
3605 
3606 	if (!(error_code & PFERR_PRESENT_MASK) ||
3607 	      !(error_code & PFERR_WRITE_MASK))
3608 		return false;
3609 
3610 	/*
3611 	 * guest is writing the page which is write tracked which can
3612 	 * not be fixed by page fault handler.
3613 	 */
3614 	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
3615 		return true;
3616 
3617 	return false;
3618 }
3619 
shadow_page_table_clear_flood(struct kvm_vcpu * vcpu,gva_t addr)3620 static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
3621 {
3622 	struct kvm_shadow_walk_iterator iterator;
3623 	u64 spte;
3624 
3625 	walk_shadow_page_lockless_begin(vcpu);
3626 	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
3627 		clear_sp_write_flooding_count(iterator.sptep);
3628 		if (!is_shadow_present_pte(spte))
3629 			break;
3630 	}
3631 	walk_shadow_page_lockless_end(vcpu);
3632 }
3633 
kvm_arch_setup_async_pf(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,gfn_t gfn)3634 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
3635 				    gfn_t gfn)
3636 {
3637 	struct kvm_arch_async_pf arch;
3638 
3639 	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3640 	arch.gfn = gfn;
3641 	arch.direct_map = vcpu->arch.mmu->direct_map;
3642 	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
3643 
3644 	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
3645 				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3646 }
3647 
try_async_pf(struct kvm_vcpu * vcpu,bool prefault,gfn_t gfn,gpa_t cr2_or_gpa,kvm_pfn_t * pfn,bool write,bool * writable)3648 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3649 			 gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
3650 			 bool *writable)
3651 {
3652 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3653 	bool async;
3654 
3655 	/*
3656 	 * Retry the page fault if the gfn hit a memslot that is being deleted
3657 	 * or moved.  This ensures any existing SPTEs for the old memslot will
3658 	 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
3659 	 */
3660 	if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
3661 		return true;
3662 
3663 	/* Don't expose private memslots to L2. */
3664 	if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
3665 		*pfn = KVM_PFN_NOSLOT;
3666 		*writable = false;
3667 		return false;
3668 	}
3669 
3670 	async = false;
3671 	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
3672 	if (!async)
3673 		return false; /* *pfn has correct page already */
3674 
3675 	if (!prefault && kvm_can_do_async_pf(vcpu)) {
3676 		trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
3677 		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
3678 			trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
3679 			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
3680 			return true;
3681 		} else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
3682 			return true;
3683 	}
3684 
3685 	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable);
3686 	return false;
3687 }
3688 
direct_page_fault(struct kvm_vcpu * vcpu,gpa_t gpa,u32 error_code,bool prefault,int max_level,bool is_tdp)3689 static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
3690 			     bool prefault, int max_level, bool is_tdp)
3691 {
3692 	bool write = error_code & PFERR_WRITE_MASK;
3693 	bool map_writable;
3694 
3695 	gfn_t gfn = gpa >> PAGE_SHIFT;
3696 	unsigned long mmu_seq;
3697 	kvm_pfn_t pfn;
3698 	int r;
3699 
3700 	if (page_fault_handle_page_track(vcpu, error_code, gfn))
3701 		return RET_PF_EMULATE;
3702 
3703 	if (!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) {
3704 		r = fast_page_fault(vcpu, gpa, error_code);
3705 		if (r != RET_PF_INVALID)
3706 			return r;
3707 	}
3708 
3709 	r = mmu_topup_memory_caches(vcpu, false);
3710 	if (r)
3711 		return r;
3712 
3713 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
3714 	smp_rmb();
3715 
3716 	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
3717 		return RET_PF_RETRY;
3718 
3719 	if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
3720 		return r;
3721 
3722 	r = RET_PF_RETRY;
3723 	spin_lock(&vcpu->kvm->mmu_lock);
3724 	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
3725 		goto out_unlock;
3726 	r = make_mmu_pages_available(vcpu);
3727 	if (r)
3728 		goto out_unlock;
3729 
3730 	if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
3731 		r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
3732 				    pfn, prefault);
3733 	else
3734 		r = __direct_map(vcpu, gpa, error_code, map_writable, max_level, pfn,
3735 				 prefault, is_tdp);
3736 
3737 out_unlock:
3738 	spin_unlock(&vcpu->kvm->mmu_lock);
3739 	kvm_release_pfn_clean(pfn);
3740 	return r;
3741 }
3742 
nonpaging_page_fault(struct kvm_vcpu * vcpu,gpa_t gpa,u32 error_code,bool prefault)3743 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
3744 				u32 error_code, bool prefault)
3745 {
3746 	pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
3747 
3748 	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
3749 	return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault,
3750 				 PG_LEVEL_2M, false);
3751 }
3752 
kvm_handle_page_fault(struct kvm_vcpu * vcpu,u64 error_code,u64 fault_address,char * insn,int insn_len)3753 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
3754 				u64 fault_address, char *insn, int insn_len)
3755 {
3756 	int r = 1;
3757 	u32 flags = vcpu->arch.apf.host_apf_flags;
3758 
3759 #ifndef CONFIG_X86_64
3760 	/* A 64-bit CR2 should be impossible on 32-bit KVM. */
3761 	if (WARN_ON_ONCE(fault_address >> 32))
3762 		return -EFAULT;
3763 #endif
3764 
3765 	vcpu->arch.l1tf_flush_l1d = true;
3766 	if (!flags) {
3767 		trace_kvm_page_fault(fault_address, error_code);
3768 
3769 		if (kvm_event_needs_reinjection(vcpu))
3770 			kvm_mmu_unprotect_page_virt(vcpu, fault_address);
3771 		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
3772 				insn_len);
3773 	} else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
3774 		vcpu->arch.apf.host_apf_flags = 0;
3775 		local_irq_disable();
3776 		kvm_async_pf_task_wait_schedule(fault_address);
3777 		local_irq_enable();
3778 	} else {
3779 		WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
3780 	}
3781 
3782 	return r;
3783 }
3784 EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
3785 
kvm_tdp_page_fault(struct kvm_vcpu * vcpu,gpa_t gpa,u32 error_code,bool prefault)3786 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
3787 		       bool prefault)
3788 {
3789 	int max_level;
3790 
3791 	for (max_level = KVM_MAX_HUGEPAGE_LEVEL;
3792 	     max_level > PG_LEVEL_4K;
3793 	     max_level--) {
3794 		int page_num = KVM_PAGES_PER_HPAGE(max_level);
3795 		gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1);
3796 
3797 		if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
3798 			break;
3799 	}
3800 
3801 	return direct_page_fault(vcpu, gpa, error_code, prefault,
3802 				 max_level, true);
3803 }
3804 
nonpaging_init_context(struct kvm_vcpu * vcpu,struct kvm_mmu * context)3805 static void nonpaging_init_context(struct kvm_vcpu *vcpu,
3806 				   struct kvm_mmu *context)
3807 {
3808 	context->page_fault = nonpaging_page_fault;
3809 	context->gva_to_gpa = nonpaging_gva_to_gpa;
3810 	context->sync_page = nonpaging_sync_page;
3811 	context->invlpg = NULL;
3812 	context->root_level = 0;
3813 	context->shadow_root_level = PT32E_ROOT_LEVEL;
3814 	context->direct_map = true;
3815 	context->nx = false;
3816 }
3817 
is_root_usable(struct kvm_mmu_root_info * root,gpa_t pgd,union kvm_mmu_page_role role)3818 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
3819 				  union kvm_mmu_page_role role)
3820 {
3821 	return (role.direct || pgd == root->pgd) &&
3822 	       VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) &&
3823 	       role.word == to_shadow_page(root->hpa)->role.word;
3824 }
3825 
3826 /*
3827  * Find out if a previously cached root matching the new pgd/role is available.
3828  * The current root is also inserted into the cache.
3829  * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
3830  * returned.
3831  * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
3832  * false is returned. This root should now be freed by the caller.
3833  */
cached_root_available(struct kvm_vcpu * vcpu,gpa_t new_pgd,union kvm_mmu_page_role new_role)3834 static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd,
3835 				  union kvm_mmu_page_role new_role)
3836 {
3837 	uint i;
3838 	struct kvm_mmu_root_info root;
3839 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3840 
3841 	root.pgd = mmu->root_pgd;
3842 	root.hpa = mmu->root_hpa;
3843 
3844 	if (is_root_usable(&root, new_pgd, new_role))
3845 		return true;
3846 
3847 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
3848 		swap(root, mmu->prev_roots[i]);
3849 
3850 		if (is_root_usable(&root, new_pgd, new_role))
3851 			break;
3852 	}
3853 
3854 	mmu->root_hpa = root.hpa;
3855 	mmu->root_pgd = root.pgd;
3856 
3857 	return i < KVM_MMU_NUM_PREV_ROOTS;
3858 }
3859 
fast_pgd_switch(struct kvm_vcpu * vcpu,gpa_t new_pgd,union kvm_mmu_page_role new_role)3860 static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
3861 			    union kvm_mmu_page_role new_role)
3862 {
3863 	struct kvm_mmu *mmu = vcpu->arch.mmu;
3864 
3865 	/*
3866 	 * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
3867 	 * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
3868 	 * later if necessary.
3869 	 */
3870 	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
3871 	    mmu->root_level >= PT64_ROOT_4LEVEL)
3872 		return cached_root_available(vcpu, new_pgd, new_role);
3873 
3874 	return false;
3875 }
3876 
__kvm_mmu_new_pgd(struct kvm_vcpu * vcpu,gpa_t new_pgd,union kvm_mmu_page_role new_role,bool skip_tlb_flush,bool skip_mmu_sync)3877 static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
3878 			      union kvm_mmu_page_role new_role,
3879 			      bool skip_tlb_flush, bool skip_mmu_sync)
3880 {
3881 	if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
3882 		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
3883 		return;
3884 	}
3885 
3886 	/*
3887 	 * It's possible that the cached previous root page is obsolete because
3888 	 * of a change in the MMU generation number. However, changing the
3889 	 * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will
3890 	 * free the root set here and allocate a new one.
3891 	 */
3892 	kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
3893 
3894 	if (!skip_mmu_sync || force_flush_and_sync_on_reuse)
3895 		kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
3896 	if (!skip_tlb_flush || force_flush_and_sync_on_reuse)
3897 		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
3898 
3899 	/*
3900 	 * The last MMIO access's GVA and GPA are cached in the VCPU. When
3901 	 * switching to a new CR3, that GVA->GPA mapping may no longer be
3902 	 * valid. So clear any cached MMIO info even when we don't need to sync
3903 	 * the shadow page tables.
3904 	 */
3905 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3906 
3907 	/*
3908 	 * If this is a direct root page, it doesn't have a write flooding
3909 	 * count. Otherwise, clear the write flooding count.
3910 	 */
3911 	if (!new_role.direct)
3912 		__clear_sp_write_flooding_count(
3913 				to_shadow_page(vcpu->arch.mmu->root_hpa));
3914 }
3915 
kvm_mmu_new_pgd(struct kvm_vcpu * vcpu,gpa_t new_pgd,bool skip_tlb_flush,bool skip_mmu_sync)3916 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
3917 		     bool skip_mmu_sync)
3918 {
3919 	__kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu),
3920 			  skip_tlb_flush, skip_mmu_sync);
3921 }
3922 EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
3923 
get_cr3(struct kvm_vcpu * vcpu)3924 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
3925 {
3926 	return kvm_read_cr3(vcpu);
3927 }
3928 
sync_mmio_spte(struct kvm_vcpu * vcpu,u64 * sptep,gfn_t gfn,unsigned int access,int * nr_present)3929 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
3930 			   unsigned int access, int *nr_present)
3931 {
3932 	if (unlikely(is_mmio_spte(*sptep))) {
3933 		if (gfn != get_mmio_spte_gfn(*sptep)) {
3934 			mmu_spte_clear_no_track(sptep);
3935 			return true;
3936 		}
3937 
3938 		(*nr_present)++;
3939 		mark_mmio_spte(vcpu, sptep, gfn, access);
3940 		return true;
3941 	}
3942 
3943 	return false;
3944 }
3945 
is_last_gpte(struct kvm_mmu * mmu,unsigned level,unsigned gpte)3946 static inline bool is_last_gpte(struct kvm_mmu *mmu,
3947 				unsigned level, unsigned gpte)
3948 {
3949 	/*
3950 	 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
3951 	 * If it is clear, there are no large pages at this level, so clear
3952 	 * PT_PAGE_SIZE_MASK in gpte if that is the case.
3953 	 */
3954 	gpte &= level - mmu->last_nonleaf_level;
3955 
3956 	/*
3957 	 * PG_LEVEL_4K always terminates.  The RHS has bit 7 set
3958 	 * iff level <= PG_LEVEL_4K, which for our purpose means
3959 	 * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
3960 	 */
3961 	gpte |= level - PG_LEVEL_4K - 1;
3962 
3963 	return gpte & PT_PAGE_SIZE_MASK;
3964 }
3965 
3966 #define PTTYPE_EPT 18 /* arbitrary */
3967 #define PTTYPE PTTYPE_EPT
3968 #include "paging_tmpl.h"
3969 #undef PTTYPE
3970 
3971 #define PTTYPE 64
3972 #include "paging_tmpl.h"
3973 #undef PTTYPE
3974 
3975 #define PTTYPE 32
3976 #include "paging_tmpl.h"
3977 #undef PTTYPE
3978 
3979 static void
__reset_rsvds_bits_mask(struct kvm_vcpu * vcpu,struct rsvd_bits_validate * rsvd_check,int maxphyaddr,int level,bool nx,bool gbpages,bool pse,bool amd)3980 __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
3981 			struct rsvd_bits_validate *rsvd_check,
3982 			int maxphyaddr, int level, bool nx, bool gbpages,
3983 			bool pse, bool amd)
3984 {
3985 	u64 exb_bit_rsvd = 0;
3986 	u64 gbpages_bit_rsvd = 0;
3987 	u64 nonleaf_bit8_rsvd = 0;
3988 
3989 	rsvd_check->bad_mt_xwr = 0;
3990 
3991 	if (!nx)
3992 		exb_bit_rsvd = rsvd_bits(63, 63);
3993 	if (!gbpages)
3994 		gbpages_bit_rsvd = rsvd_bits(7, 7);
3995 
3996 	/*
3997 	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
3998 	 * leaf entries) on AMD CPUs only.
3999 	 */
4000 	if (amd)
4001 		nonleaf_bit8_rsvd = rsvd_bits(8, 8);
4002 
4003 	switch (level) {
4004 	case PT32_ROOT_LEVEL:
4005 		/* no rsvd bits for 2 level 4K page table entries */
4006 		rsvd_check->rsvd_bits_mask[0][1] = 0;
4007 		rsvd_check->rsvd_bits_mask[0][0] = 0;
4008 		rsvd_check->rsvd_bits_mask[1][0] =
4009 			rsvd_check->rsvd_bits_mask[0][0];
4010 
4011 		if (!pse) {
4012 			rsvd_check->rsvd_bits_mask[1][1] = 0;
4013 			break;
4014 		}
4015 
4016 		if (is_cpuid_PSE36())
4017 			/* 36bits PSE 4MB page */
4018 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4019 		else
4020 			/* 32 bits PSE 4MB page */
4021 			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4022 		break;
4023 	case PT32E_ROOT_LEVEL:
4024 		rsvd_check->rsvd_bits_mask[0][2] =
4025 			rsvd_bits(maxphyaddr, 63) |
4026 			rsvd_bits(5, 8) | rsvd_bits(1, 2);	/* PDPTE */
4027 		rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
4028 			rsvd_bits(maxphyaddr, 62);	/* PDE */
4029 		rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
4030 			rsvd_bits(maxphyaddr, 62); 	/* PTE */
4031 		rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
4032 			rsvd_bits(maxphyaddr, 62) |
4033 			rsvd_bits(13, 20);		/* large page */
4034 		rsvd_check->rsvd_bits_mask[1][0] =
4035 			rsvd_check->rsvd_bits_mask[0][0];
4036 		break;
4037 	case PT64_ROOT_5LEVEL:
4038 		rsvd_check->rsvd_bits_mask[0][4] = exb_bit_rsvd |
4039 			nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
4040 			rsvd_bits(maxphyaddr, 51);
4041 		rsvd_check->rsvd_bits_mask[1][4] =
4042 			rsvd_check->rsvd_bits_mask[0][4];
4043 		fallthrough;
4044 	case PT64_ROOT_4LEVEL:
4045 		rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
4046 			nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
4047 			rsvd_bits(maxphyaddr, 51);
4048 		rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
4049 			gbpages_bit_rsvd |
4050 			rsvd_bits(maxphyaddr, 51);
4051 		rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
4052 			rsvd_bits(maxphyaddr, 51);
4053 		rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
4054 			rsvd_bits(maxphyaddr, 51);
4055 		rsvd_check->rsvd_bits_mask[1][3] =
4056 			rsvd_check->rsvd_bits_mask[0][3];
4057 		rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd |
4058 			gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) |
4059 			rsvd_bits(13, 29);
4060 		rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
4061 			rsvd_bits(maxphyaddr, 51) |
4062 			rsvd_bits(13, 20);		/* large page */
4063 		rsvd_check->rsvd_bits_mask[1][0] =
4064 			rsvd_check->rsvd_bits_mask[0][0];
4065 		break;
4066 	}
4067 }
4068 
reset_rsvds_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context)4069 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4070 				  struct kvm_mmu *context)
4071 {
4072 	__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
4073 				cpuid_maxphyaddr(vcpu), context->root_level,
4074 				context->nx,
4075 				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
4076 				is_pse(vcpu),
4077 				guest_cpuid_is_amd_or_hygon(vcpu));
4078 }
4079 
4080 static void
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate * rsvd_check,int maxphyaddr,bool execonly)4081 __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
4082 			    int maxphyaddr, bool execonly)
4083 {
4084 	u64 bad_mt_xwr;
4085 
4086 	rsvd_check->rsvd_bits_mask[0][4] =
4087 		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
4088 	rsvd_check->rsvd_bits_mask[0][3] =
4089 		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
4090 	rsvd_check->rsvd_bits_mask[0][2] =
4091 		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
4092 	rsvd_check->rsvd_bits_mask[0][1] =
4093 		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
4094 	rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51);
4095 
4096 	/* large page */
4097 	rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4098 	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
4099 	rsvd_check->rsvd_bits_mask[1][2] =
4100 		rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29);
4101 	rsvd_check->rsvd_bits_mask[1][1] =
4102 		rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20);
4103 	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4104 
4105 	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
4106 	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
4107 	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
4108 	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
4109 	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
4110 	if (!execonly) {
4111 		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
4112 		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4113 	}
4114 	rsvd_check->bad_mt_xwr = bad_mt_xwr;
4115 }
4116 
reset_rsvds_bits_mask_ept(struct kvm_vcpu * vcpu,struct kvm_mmu * context,bool execonly)4117 static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
4118 		struct kvm_mmu *context, bool execonly)
4119 {
4120 	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
4121 				    cpuid_maxphyaddr(vcpu), execonly);
4122 }
4123 
4124 /*
4125  * the page table on host is the shadow page table for the page
4126  * table in guest or amd nested guest, its mmu features completely
4127  * follow the features in guest.
4128  */
4129 void
reset_shadow_zero_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context)4130 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
4131 {
4132 	/*
4133 	 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
4134 	 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
4135 	 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
4136 	 * The iTLB multi-hit workaround can be toggled at any time, so assume
4137 	 * NX can be used by any non-nested shadow MMU to avoid having to reset
4138 	 * MMU contexts.  Note, KVM forces EFER.NX=1 when TDP is disabled.
4139 	 */
4140 	bool uses_nx = context->nx || !tdp_enabled ||
4141 		context->mmu_role.base.smep_andnot_wp;
4142 	struct rsvd_bits_validate *shadow_zero_check;
4143 	int i;
4144 
4145 	/*
4146 	 * Passing "true" to the last argument is okay; it adds a check
4147 	 * on bit 8 of the SPTEs which KVM doesn't use anyway.
4148 	 */
4149 	shadow_zero_check = &context->shadow_zero_check;
4150 	__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
4151 				shadow_phys_bits,
4152 				context->shadow_root_level, uses_nx,
4153 				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
4154 				is_pse(vcpu), true);
4155 
4156 	if (!shadow_me_mask)
4157 		return;
4158 
4159 	for (i = context->shadow_root_level; --i >= 0;) {
4160 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4161 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4162 	}
4163 
4164 }
4165 EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
4166 
boot_cpu_is_amd(void)4167 static inline bool boot_cpu_is_amd(void)
4168 {
4169 	WARN_ON_ONCE(!tdp_enabled);
4170 	return shadow_x_mask == 0;
4171 }
4172 
4173 /*
4174  * the direct page table on host, use as much mmu features as
4175  * possible, however, kvm currently does not do execution-protection.
4176  */
4177 static void
reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context)4178 reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4179 				struct kvm_mmu *context)
4180 {
4181 	struct rsvd_bits_validate *shadow_zero_check;
4182 	int i;
4183 
4184 	shadow_zero_check = &context->shadow_zero_check;
4185 
4186 	if (boot_cpu_is_amd())
4187 		__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
4188 					shadow_phys_bits,
4189 					context->shadow_root_level, false,
4190 					boot_cpu_has(X86_FEATURE_GBPAGES),
4191 					true, true);
4192 	else
4193 		__reset_rsvds_bits_mask_ept(shadow_zero_check,
4194 					    shadow_phys_bits,
4195 					    false);
4196 
4197 	if (!shadow_me_mask)
4198 		return;
4199 
4200 	for (i = context->shadow_root_level; --i >= 0;) {
4201 		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4202 		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4203 	}
4204 }
4205 
4206 /*
4207  * as the comments in reset_shadow_zero_bits_mask() except it
4208  * is the shadow page table for intel nested guest.
4209  */
4210 static void
reset_ept_shadow_zero_bits_mask(struct kvm_vcpu * vcpu,struct kvm_mmu * context,bool execonly)4211 reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4212 				struct kvm_mmu *context, bool execonly)
4213 {
4214 	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
4215 				    shadow_phys_bits, execonly);
4216 }
4217 
4218 #define BYTE_MASK(access) \
4219 	((1 & (access) ? 2 : 0) | \
4220 	 (2 & (access) ? 4 : 0) | \
4221 	 (3 & (access) ? 8 : 0) | \
4222 	 (4 & (access) ? 16 : 0) | \
4223 	 (5 & (access) ? 32 : 0) | \
4224 	 (6 & (access) ? 64 : 0) | \
4225 	 (7 & (access) ? 128 : 0))
4226 
4227 
update_permission_bitmask(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,bool ept)4228 static void update_permission_bitmask(struct kvm_vcpu *vcpu,
4229 				      struct kvm_mmu *mmu, bool ept)
4230 {
4231 	unsigned byte;
4232 
4233 	const u8 x = BYTE_MASK(ACC_EXEC_MASK);
4234 	const u8 w = BYTE_MASK(ACC_WRITE_MASK);
4235 	const u8 u = BYTE_MASK(ACC_USER_MASK);
4236 
4237 	bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0;
4238 	bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0;
4239 	bool cr0_wp = is_write_protection(vcpu);
4240 
4241 	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4242 		unsigned pfec = byte << 1;
4243 
4244 		/*
4245 		 * Each "*f" variable has a 1 bit for each UWX value
4246 		 * that causes a fault with the given PFEC.
4247 		 */
4248 
4249 		/* Faults from writes to non-writable pages */
4250 		u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
4251 		/* Faults from user mode accesses to supervisor pages */
4252 		u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
4253 		/* Faults from fetches of non-executable pages*/
4254 		u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
4255 		/* Faults from kernel mode fetches of user pages */
4256 		u8 smepf = 0;
4257 		/* Faults from kernel mode accesses of user pages */
4258 		u8 smapf = 0;
4259 
4260 		if (!ept) {
4261 			/* Faults from kernel mode accesses to user pages */
4262 			u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
4263 
4264 			/* Not really needed: !nx will cause pte.nx to fault */
4265 			if (!mmu->nx)
4266 				ff = 0;
4267 
4268 			/* Allow supervisor writes if !cr0.wp */
4269 			if (!cr0_wp)
4270 				wf = (pfec & PFERR_USER_MASK) ? wf : 0;
4271 
4272 			/* Disallow supervisor fetches of user code if cr4.smep */
4273 			if (cr4_smep)
4274 				smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
4275 
4276 			/*
4277 			 * SMAP:kernel-mode data accesses from user-mode
4278 			 * mappings should fault. A fault is considered
4279 			 * as a SMAP violation if all of the following
4280 			 * conditions are true:
4281 			 *   - X86_CR4_SMAP is set in CR4
4282 			 *   - A user page is accessed
4283 			 *   - The access is not a fetch
4284 			 *   - Page fault in kernel mode
4285 			 *   - if CPL = 3 or X86_EFLAGS_AC is clear
4286 			 *
4287 			 * Here, we cover the first three conditions.
4288 			 * The fourth is computed dynamically in permission_fault();
4289 			 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
4290 			 * *not* subject to SMAP restrictions.
4291 			 */
4292 			if (cr4_smap)
4293 				smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4294 		}
4295 
4296 		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4297 	}
4298 }
4299 
4300 /*
4301 * PKU is an additional mechanism by which the paging controls access to
4302 * user-mode addresses based on the value in the PKRU register.  Protection
4303 * key violations are reported through a bit in the page fault error code.
4304 * Unlike other bits of the error code, the PK bit is not known at the
4305 * call site of e.g. gva_to_gpa; it must be computed directly in
4306 * permission_fault based on two bits of PKRU, on some machine state (CR4,
4307 * CR0, EFER, CPL), and on other bits of the error code and the page tables.
4308 *
4309 * In particular the following conditions come from the error code, the
4310 * page tables and the machine state:
4311 * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
4312 * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
4313 * - PK is always zero if U=0 in the page tables
4314 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
4315 *
4316 * The PKRU bitmask caches the result of these four conditions.  The error
4317 * code (minus the P bit) and the page table's U bit form an index into the
4318 * PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
4319 * with the two bits of the PKRU register corresponding to the protection key.
4320 * For the first three conditions above the bits will be 00, thus masking
4321 * away both AD and WD.  For all reads or if the last condition holds, WD
4322 * only will be masked away.
4323 */
update_pkru_bitmask(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,bool ept)4324 static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
4325 				bool ept)
4326 {
4327 	unsigned bit;
4328 	bool wp;
4329 
4330 	if (ept) {
4331 		mmu->pkru_mask = 0;
4332 		return;
4333 	}
4334 
4335 	/* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */
4336 	if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) {
4337 		mmu->pkru_mask = 0;
4338 		return;
4339 	}
4340 
4341 	wp = is_write_protection(vcpu);
4342 
4343 	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
4344 		unsigned pfec, pkey_bits;
4345 		bool check_pkey, check_write, ff, uf, wf, pte_user;
4346 
4347 		pfec = bit << 1;
4348 		ff = pfec & PFERR_FETCH_MASK;
4349 		uf = pfec & PFERR_USER_MASK;
4350 		wf = pfec & PFERR_WRITE_MASK;
4351 
4352 		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
4353 		pte_user = pfec & PFERR_RSVD_MASK;
4354 
4355 		/*
4356 		 * Only need to check the access which is not an
4357 		 * instruction fetch and is to a user page.
4358 		 */
4359 		check_pkey = (!ff && pte_user);
4360 		/*
4361 		 * write access is controlled by PKRU if it is a
4362 		 * user access or CR0.WP = 1.
4363 		 */
4364 		check_write = check_pkey && wf && (uf || wp);
4365 
4366 		/* PKRU.AD stops both read and write access. */
4367 		pkey_bits = !!check_pkey;
4368 		/* PKRU.WD stops write access. */
4369 		pkey_bits |= (!!check_write) << 1;
4370 
4371 		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
4372 	}
4373 }
4374 
update_last_nonleaf_level(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)4375 static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
4376 {
4377 	unsigned root_level = mmu->root_level;
4378 
4379 	mmu->last_nonleaf_level = root_level;
4380 	if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
4381 		mmu->last_nonleaf_level++;
4382 }
4383 
paging64_init_context_common(struct kvm_vcpu * vcpu,struct kvm_mmu * context,int level)4384 static void paging64_init_context_common(struct kvm_vcpu *vcpu,
4385 					 struct kvm_mmu *context,
4386 					 int level)
4387 {
4388 	context->nx = is_nx(vcpu);
4389 	context->root_level = level;
4390 
4391 	reset_rsvds_bits_mask(vcpu, context);
4392 	update_permission_bitmask(vcpu, context, false);
4393 	update_pkru_bitmask(vcpu, context, false);
4394 	update_last_nonleaf_level(vcpu, context);
4395 
4396 	MMU_WARN_ON(!is_pae(vcpu));
4397 	context->page_fault = paging64_page_fault;
4398 	context->gva_to_gpa = paging64_gva_to_gpa;
4399 	context->sync_page = paging64_sync_page;
4400 	context->invlpg = paging64_invlpg;
4401 	context->shadow_root_level = level;
4402 	context->direct_map = false;
4403 }
4404 
paging64_init_context(struct kvm_vcpu * vcpu,struct kvm_mmu * context)4405 static void paging64_init_context(struct kvm_vcpu *vcpu,
4406 				  struct kvm_mmu *context)
4407 {
4408 	int root_level = is_la57_mode(vcpu) ?
4409 			 PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4410 
4411 	paging64_init_context_common(vcpu, context, root_level);
4412 }
4413 
paging32_init_context(struct kvm_vcpu * vcpu,struct kvm_mmu * context)4414 static void paging32_init_context(struct kvm_vcpu *vcpu,
4415 				  struct kvm_mmu *context)
4416 {
4417 	context->nx = false;
4418 	context->root_level = PT32_ROOT_LEVEL;
4419 
4420 	reset_rsvds_bits_mask(vcpu, context);
4421 	update_permission_bitmask(vcpu, context, false);
4422 	update_pkru_bitmask(vcpu, context, false);
4423 	update_last_nonleaf_level(vcpu, context);
4424 
4425 	context->page_fault = paging32_page_fault;
4426 	context->gva_to_gpa = paging32_gva_to_gpa;
4427 	context->sync_page = paging32_sync_page;
4428 	context->invlpg = paging32_invlpg;
4429 	context->shadow_root_level = PT32E_ROOT_LEVEL;
4430 	context->direct_map = false;
4431 }
4432 
paging32E_init_context(struct kvm_vcpu * vcpu,struct kvm_mmu * context)4433 static void paging32E_init_context(struct kvm_vcpu *vcpu,
4434 				   struct kvm_mmu *context)
4435 {
4436 	paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
4437 }
4438 
kvm_calc_mmu_role_ext(struct kvm_vcpu * vcpu)4439 static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
4440 {
4441 	union kvm_mmu_extended_role ext = {0};
4442 
4443 	ext.cr0_pg = !!is_paging(vcpu);
4444 	ext.cr4_pae = !!is_pae(vcpu);
4445 	ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
4446 	ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
4447 	ext.cr4_pse = !!is_pse(vcpu);
4448 	ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
4449 	ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
4450 	ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
4451 
4452 	ext.valid = 1;
4453 
4454 	return ext;
4455 }
4456 
kvm_calc_mmu_role_common(struct kvm_vcpu * vcpu,bool base_only)4457 static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
4458 						   bool base_only)
4459 {
4460 	union kvm_mmu_role role = {0};
4461 
4462 	role.base.access = ACC_ALL;
4463 	role.base.nxe = !!is_nx(vcpu);
4464 	role.base.cr0_wp = is_write_protection(vcpu);
4465 	role.base.smm = is_smm(vcpu);
4466 	role.base.guest_mode = is_guest_mode(vcpu);
4467 
4468 	if (base_only)
4469 		return role;
4470 
4471 	role.ext = kvm_calc_mmu_role_ext(vcpu);
4472 
4473 	return role;
4474 }
4475 
kvm_mmu_get_tdp_level(struct kvm_vcpu * vcpu)4476 static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
4477 {
4478 	/* Use 5-level TDP if and only if it's useful/necessary. */
4479 	if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
4480 		return 4;
4481 
4482 	return max_tdp_level;
4483 }
4484 
4485 static union kvm_mmu_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu * vcpu,bool base_only)4486 kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
4487 {
4488 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
4489 
4490 	role.base.ad_disabled = (shadow_accessed_mask == 0);
4491 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
4492 	role.base.direct = true;
4493 	role.base.gpte_is_8_bytes = true;
4494 
4495 	return role;
4496 }
4497 
init_kvm_tdp_mmu(struct kvm_vcpu * vcpu)4498 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4499 {
4500 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4501 	union kvm_mmu_role new_role =
4502 		kvm_calc_tdp_mmu_root_page_role(vcpu, false);
4503 
4504 	if (new_role.as_u64 == context->mmu_role.as_u64)
4505 		return;
4506 
4507 	context->mmu_role.as_u64 = new_role.as_u64;
4508 	context->page_fault = kvm_tdp_page_fault;
4509 	context->sync_page = nonpaging_sync_page;
4510 	context->invlpg = NULL;
4511 	context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
4512 	context->direct_map = true;
4513 	context->get_guest_pgd = get_cr3;
4514 	context->get_pdptr = kvm_pdptr_read;
4515 	context->inject_page_fault = kvm_inject_page_fault;
4516 
4517 	if (!is_paging(vcpu)) {
4518 		context->nx = false;
4519 		context->gva_to_gpa = nonpaging_gva_to_gpa;
4520 		context->root_level = 0;
4521 	} else if (is_long_mode(vcpu)) {
4522 		context->nx = is_nx(vcpu);
4523 		context->root_level = is_la57_mode(vcpu) ?
4524 				PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4525 		reset_rsvds_bits_mask(vcpu, context);
4526 		context->gva_to_gpa = paging64_gva_to_gpa;
4527 	} else if (is_pae(vcpu)) {
4528 		context->nx = is_nx(vcpu);
4529 		context->root_level = PT32E_ROOT_LEVEL;
4530 		reset_rsvds_bits_mask(vcpu, context);
4531 		context->gva_to_gpa = paging64_gva_to_gpa;
4532 	} else {
4533 		context->nx = false;
4534 		context->root_level = PT32_ROOT_LEVEL;
4535 		reset_rsvds_bits_mask(vcpu, context);
4536 		context->gva_to_gpa = paging32_gva_to_gpa;
4537 	}
4538 
4539 	update_permission_bitmask(vcpu, context, false);
4540 	update_pkru_bitmask(vcpu, context, false);
4541 	update_last_nonleaf_level(vcpu, context);
4542 	reset_tdp_shadow_zero_bits_mask(vcpu, context);
4543 }
4544 
4545 static union kvm_mmu_role
kvm_calc_shadow_root_page_role_common(struct kvm_vcpu * vcpu,bool base_only)4546 kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, bool base_only)
4547 {
4548 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
4549 
4550 	role.base.smep_andnot_wp = role.ext.cr4_smep &&
4551 		!is_write_protection(vcpu);
4552 	role.base.smap_andnot_wp = role.ext.cr4_smap &&
4553 		!is_write_protection(vcpu);
4554 	role.base.gpte_is_8_bytes = !!is_pae(vcpu);
4555 
4556 	return role;
4557 }
4558 
4559 static union kvm_mmu_role
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu * vcpu,bool base_only)4560 kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
4561 {
4562 	union kvm_mmu_role role =
4563 		kvm_calc_shadow_root_page_role_common(vcpu, base_only);
4564 
4565 	role.base.direct = !is_paging(vcpu);
4566 
4567 	if (!is_long_mode(vcpu))
4568 		role.base.level = PT32E_ROOT_LEVEL;
4569 	else if (is_la57_mode(vcpu))
4570 		role.base.level = PT64_ROOT_5LEVEL;
4571 	else
4572 		role.base.level = PT64_ROOT_4LEVEL;
4573 
4574 	return role;
4575 }
4576 
shadow_mmu_init_context(struct kvm_vcpu * vcpu,struct kvm_mmu * context,u32 cr0,u32 cr4,u32 efer,union kvm_mmu_role new_role)4577 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
4578 				    u32 cr0, u32 cr4, u32 efer,
4579 				    union kvm_mmu_role new_role)
4580 {
4581 	if (!(cr0 & X86_CR0_PG))
4582 		nonpaging_init_context(vcpu, context);
4583 	else if (efer & EFER_LMA)
4584 		paging64_init_context(vcpu, context);
4585 	else if (cr4 & X86_CR4_PAE)
4586 		paging32E_init_context(vcpu, context);
4587 	else
4588 		paging32_init_context(vcpu, context);
4589 
4590 	context->mmu_role.as_u64 = new_role.as_u64;
4591 	reset_shadow_zero_bits_mask(vcpu, context);
4592 }
4593 
kvm_init_shadow_mmu(struct kvm_vcpu * vcpu,u32 cr0,u32 cr4,u32 efer)4594 static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer)
4595 {
4596 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4597 	union kvm_mmu_role new_role =
4598 		kvm_calc_shadow_mmu_root_page_role(vcpu, false);
4599 
4600 	if (new_role.as_u64 != context->mmu_role.as_u64)
4601 		shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
4602 }
4603 
4604 static union kvm_mmu_role
kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu * vcpu)4605 kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu)
4606 {
4607 	union kvm_mmu_role role =
4608 		kvm_calc_shadow_root_page_role_common(vcpu, false);
4609 
4610 	role.base.direct = false;
4611 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
4612 
4613 	return role;
4614 }
4615 
kvm_init_shadow_npt_mmu(struct kvm_vcpu * vcpu,u32 cr0,u32 cr4,u32 efer,gpa_t nested_cr3)4616 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
4617 			     gpa_t nested_cr3)
4618 {
4619 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4620 	union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
4621 
4622 	__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);
4623 
4624 	if (new_role.as_u64 != context->mmu_role.as_u64) {
4625 		shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
4626 
4627 		/*
4628 		 * Override the level set by the common init helper, nested TDP
4629 		 * always uses the host's TDP configuration.
4630 		 */
4631 		context->shadow_root_level = new_role.base.level;
4632 	}
4633 }
4634 EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
4635 
4636 static union kvm_mmu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu * vcpu,bool accessed_dirty,bool execonly,u8 level)4637 kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
4638 				   bool execonly, u8 level)
4639 {
4640 	union kvm_mmu_role role = {0};
4641 
4642 	/* SMM flag is inherited from root_mmu */
4643 	role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
4644 
4645 	role.base.level = level;
4646 	role.base.gpte_is_8_bytes = true;
4647 	role.base.direct = false;
4648 	role.base.ad_disabled = !accessed_dirty;
4649 	role.base.guest_mode = true;
4650 	role.base.access = ACC_ALL;
4651 
4652 	/*
4653 	 * WP=1 and NOT_WP=1 is an impossible combination, use WP and the
4654 	 * SMAP variation to denote shadow EPT entries.
4655 	 */
4656 	role.base.cr0_wp = true;
4657 	role.base.smap_andnot_wp = true;
4658 
4659 	role.ext = kvm_calc_mmu_role_ext(vcpu);
4660 	role.ext.execonly = execonly;
4661 
4662 	return role;
4663 }
4664 
kvm_init_shadow_ept_mmu(struct kvm_vcpu * vcpu,bool execonly,bool accessed_dirty,gpa_t new_eptp)4665 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4666 			     bool accessed_dirty, gpa_t new_eptp)
4667 {
4668 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4669 	u8 level = vmx_eptp_page_walk_level(new_eptp);
4670 	union kvm_mmu_role new_role =
4671 		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
4672 						   execonly, level);
4673 
4674 	__kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base, true, true);
4675 
4676 	if (new_role.as_u64 == context->mmu_role.as_u64)
4677 		return;
4678 
4679 	context->shadow_root_level = level;
4680 
4681 	context->nx = true;
4682 	context->ept_ad = accessed_dirty;
4683 	context->page_fault = ept_page_fault;
4684 	context->gva_to_gpa = ept_gva_to_gpa;
4685 	context->sync_page = ept_sync_page;
4686 	context->invlpg = ept_invlpg;
4687 	context->root_level = level;
4688 	context->direct_map = false;
4689 	context->mmu_role.as_u64 = new_role.as_u64;
4690 
4691 	update_permission_bitmask(vcpu, context, true);
4692 	update_pkru_bitmask(vcpu, context, true);
4693 	update_last_nonleaf_level(vcpu, context);
4694 	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4695 	reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
4696 }
4697 EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
4698 
init_kvm_softmmu(struct kvm_vcpu * vcpu)4699 static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
4700 {
4701 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4702 
4703 	kvm_init_shadow_mmu(vcpu,
4704 			    kvm_read_cr0_bits(vcpu, X86_CR0_PG),
4705 			    kvm_read_cr4_bits(vcpu, X86_CR4_PAE),
4706 			    vcpu->arch.efer);
4707 
4708 	context->get_guest_pgd     = get_cr3;
4709 	context->get_pdptr         = kvm_pdptr_read;
4710 	context->inject_page_fault = kvm_inject_page_fault;
4711 }
4712 
kvm_calc_nested_mmu_role(struct kvm_vcpu * vcpu)4713 static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu)
4714 {
4715 	union kvm_mmu_role role = kvm_calc_shadow_root_page_role_common(vcpu, false);
4716 
4717 	/*
4718 	 * Nested MMUs are used only for walking L2's gva->gpa, they never have
4719 	 * shadow pages of their own and so "direct" has no meaning.   Set it
4720 	 * to "true" to try to detect bogus usage of the nested MMU.
4721 	 */
4722 	role.base.direct = true;
4723 
4724 	if (!is_paging(vcpu))
4725 		role.base.level = 0;
4726 	else if (is_long_mode(vcpu))
4727 		role.base.level = is_la57_mode(vcpu) ? PT64_ROOT_5LEVEL :
4728 						       PT64_ROOT_4LEVEL;
4729 	else if (is_pae(vcpu))
4730 		role.base.level = PT32E_ROOT_LEVEL;
4731 	else
4732 		role.base.level = PT32_ROOT_LEVEL;
4733 
4734 	return role;
4735 }
4736 
init_kvm_nested_mmu(struct kvm_vcpu * vcpu)4737 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
4738 {
4739 	union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu);
4740 	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
4741 
4742 	if (new_role.as_u64 == g_context->mmu_role.as_u64)
4743 		return;
4744 
4745 	g_context->mmu_role.as_u64 = new_role.as_u64;
4746 	g_context->get_guest_pgd     = get_cr3;
4747 	g_context->get_pdptr         = kvm_pdptr_read;
4748 	g_context->inject_page_fault = kvm_inject_page_fault;
4749 
4750 	/*
4751 	 * L2 page tables are never shadowed, so there is no need to sync
4752 	 * SPTEs.
4753 	 */
4754 	g_context->invlpg            = NULL;
4755 
4756 	/*
4757 	 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
4758 	 * L1's nested page tables (e.g. EPT12). The nested translation
4759 	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
4760 	 * L2's page tables as the first level of translation and L1's
4761 	 * nested page tables as the second level of translation. Basically
4762 	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
4763 	 */
4764 	if (!is_paging(vcpu)) {
4765 		g_context->nx = false;
4766 		g_context->root_level = 0;
4767 		g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
4768 	} else if (is_long_mode(vcpu)) {
4769 		g_context->nx = is_nx(vcpu);
4770 		g_context->root_level = is_la57_mode(vcpu) ?
4771 					PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4772 		reset_rsvds_bits_mask(vcpu, g_context);
4773 		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4774 	} else if (is_pae(vcpu)) {
4775 		g_context->nx = is_nx(vcpu);
4776 		g_context->root_level = PT32E_ROOT_LEVEL;
4777 		reset_rsvds_bits_mask(vcpu, g_context);
4778 		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4779 	} else {
4780 		g_context->nx = false;
4781 		g_context->root_level = PT32_ROOT_LEVEL;
4782 		reset_rsvds_bits_mask(vcpu, g_context);
4783 		g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
4784 	}
4785 
4786 	update_permission_bitmask(vcpu, g_context, false);
4787 	update_pkru_bitmask(vcpu, g_context, false);
4788 	update_last_nonleaf_level(vcpu, g_context);
4789 }
4790 
kvm_init_mmu(struct kvm_vcpu * vcpu,bool reset_roots)4791 void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
4792 {
4793 	if (reset_roots) {
4794 		uint i;
4795 
4796 		vcpu->arch.mmu->root_hpa = INVALID_PAGE;
4797 
4798 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
4799 			vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
4800 	}
4801 
4802 	if (mmu_is_nested(vcpu))
4803 		init_kvm_nested_mmu(vcpu);
4804 	else if (tdp_enabled)
4805 		init_kvm_tdp_mmu(vcpu);
4806 	else
4807 		init_kvm_softmmu(vcpu);
4808 }
4809 EXPORT_SYMBOL_GPL(kvm_init_mmu);
4810 
4811 static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu * vcpu)4812 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
4813 {
4814 	union kvm_mmu_role role;
4815 
4816 	if (tdp_enabled)
4817 		role = kvm_calc_tdp_mmu_root_page_role(vcpu, true);
4818 	else
4819 		role = kvm_calc_shadow_mmu_root_page_role(vcpu, true);
4820 
4821 	return role.base;
4822 }
4823 
kvm_mmu_reset_context(struct kvm_vcpu * vcpu)4824 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
4825 {
4826 	kvm_mmu_unload(vcpu);
4827 	kvm_init_mmu(vcpu, true);
4828 }
4829 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
4830 
kvm_mmu_load(struct kvm_vcpu * vcpu)4831 int kvm_mmu_load(struct kvm_vcpu *vcpu)
4832 {
4833 	int r;
4834 
4835 	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
4836 	if (r)
4837 		goto out;
4838 	r = mmu_alloc_roots(vcpu);
4839 	kvm_mmu_sync_roots(vcpu);
4840 	if (r)
4841 		goto out;
4842 	kvm_mmu_load_pgd(vcpu);
4843 	kvm_x86_ops.tlb_flush_current(vcpu);
4844 out:
4845 	return r;
4846 }
4847 EXPORT_SYMBOL_GPL(kvm_mmu_load);
4848 
kvm_mmu_unload(struct kvm_vcpu * vcpu)4849 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
4850 {
4851 	kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
4852 	WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
4853 	kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
4854 	WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
4855 }
4856 EXPORT_SYMBOL_GPL(kvm_mmu_unload);
4857 
need_remote_flush(u64 old,u64 new)4858 static bool need_remote_flush(u64 old, u64 new)
4859 {
4860 	if (!is_shadow_present_pte(old))
4861 		return false;
4862 	if (!is_shadow_present_pte(new))
4863 		return true;
4864 	if ((old ^ new) & PT64_BASE_ADDR_MASK)
4865 		return true;
4866 	old ^= shadow_nx_mask;
4867 	new ^= shadow_nx_mask;
4868 	return (old & ~new & PT64_PERM_MASK) != 0;
4869 }
4870 
mmu_pte_write_fetch_gpte(struct kvm_vcpu * vcpu,gpa_t * gpa,int * bytes)4871 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
4872 				    int *bytes)
4873 {
4874 	u64 gentry = 0;
4875 	int r;
4876 
4877 	/*
4878 	 * Assume that the pte write on a page table of the same type
4879 	 * as the current vcpu paging mode since we update the sptes only
4880 	 * when they have the same mode.
4881 	 */
4882 	if (is_pae(vcpu) && *bytes == 4) {
4883 		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
4884 		*gpa &= ~(gpa_t)7;
4885 		*bytes = 8;
4886 	}
4887 
4888 	if (*bytes == 4 || *bytes == 8) {
4889 		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
4890 		if (r)
4891 			gentry = 0;
4892 	}
4893 
4894 	return gentry;
4895 }
4896 
4897 /*
4898  * If we're seeing too many writes to a page, it may no longer be a page table,
4899  * or we may be forking, in which case it is better to unmap the page.
4900  */
detect_write_flooding(struct kvm_mmu_page * sp)4901 static bool detect_write_flooding(struct kvm_mmu_page *sp)
4902 {
4903 	/*
4904 	 * Skip write-flooding detected for the sp whose level is 1, because
4905 	 * it can become unsync, then the guest page is not write-protected.
4906 	 */
4907 	if (sp->role.level == PG_LEVEL_4K)
4908 		return false;
4909 
4910 	atomic_inc(&sp->write_flooding_count);
4911 	return atomic_read(&sp->write_flooding_count) >= 3;
4912 }
4913 
4914 /*
4915  * Misaligned accesses are too much trouble to fix up; also, they usually
4916  * indicate a page is not used as a page table.
4917  */
detect_write_misaligned(struct kvm_mmu_page * sp,gpa_t gpa,int bytes)4918 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
4919 				    int bytes)
4920 {
4921 	unsigned offset, pte_size, misaligned;
4922 
4923 	pgprintk("misaligned: gpa %llx bytes %d role %x\n",
4924 		 gpa, bytes, sp->role.word);
4925 
4926 	offset = offset_in_page(gpa);
4927 	pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
4928 
4929 	/*
4930 	 * Sometimes, the OS only writes the last one bytes to update status
4931 	 * bits, for example, in linux, andb instruction is used in clear_bit().
4932 	 */
4933 	if (!(offset & (pte_size - 1)) && bytes == 1)
4934 		return false;
4935 
4936 	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
4937 	misaligned |= bytes < 4;
4938 
4939 	return misaligned;
4940 }
4941 
get_written_sptes(struct kvm_mmu_page * sp,gpa_t gpa,int * nspte)4942 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
4943 {
4944 	unsigned page_offset, quadrant;
4945 	u64 *spte;
4946 	int level;
4947 
4948 	page_offset = offset_in_page(gpa);
4949 	level = sp->role.level;
4950 	*nspte = 1;
4951 	if (!sp->role.gpte_is_8_bytes) {
4952 		page_offset <<= 1;	/* 32->64 */
4953 		/*
4954 		 * A 32-bit pde maps 4MB while the shadow pdes map
4955 		 * only 2MB.  So we need to double the offset again
4956 		 * and zap two pdes instead of one.
4957 		 */
4958 		if (level == PT32_ROOT_LEVEL) {
4959 			page_offset &= ~7; /* kill rounding error */
4960 			page_offset <<= 1;
4961 			*nspte = 2;
4962 		}
4963 		quadrant = page_offset >> PAGE_SHIFT;
4964 		page_offset &= ~PAGE_MASK;
4965 		if (quadrant != sp->role.quadrant)
4966 			return NULL;
4967 	}
4968 
4969 	spte = &sp->spt[page_offset / sizeof(*spte)];
4970 	return spte;
4971 }
4972 
kvm_mmu_pte_write(struct kvm_vcpu * vcpu,gpa_t gpa,const u8 * new,int bytes,struct kvm_page_track_notifier_node * node)4973 static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
4974 			      const u8 *new, int bytes,
4975 			      struct kvm_page_track_notifier_node *node)
4976 {
4977 	gfn_t gfn = gpa >> PAGE_SHIFT;
4978 	struct kvm_mmu_page *sp;
4979 	LIST_HEAD(invalid_list);
4980 	u64 entry, gentry, *spte;
4981 	int npte;
4982 	bool remote_flush, local_flush;
4983 
4984 	/*
4985 	 * If we don't have indirect shadow pages, it means no page is
4986 	 * write-protected, so we can exit simply.
4987 	 */
4988 	if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
4989 		return;
4990 
4991 	remote_flush = local_flush = false;
4992 
4993 	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
4994 
4995 	/*
4996 	 * No need to care whether allocation memory is successful
4997 	 * or not since pte prefetch is skiped if it does not have
4998 	 * enough objects in the cache.
4999 	 */
5000 	mmu_topup_memory_caches(vcpu, true);
5001 
5002 	spin_lock(&vcpu->kvm->mmu_lock);
5003 
5004 	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
5005 
5006 	++vcpu->kvm->stat.mmu_pte_write;
5007 	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5008 
5009 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5010 		if (detect_write_misaligned(sp, gpa, bytes) ||
5011 		      detect_write_flooding(sp)) {
5012 			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
5013 			++vcpu->kvm->stat.mmu_flooded;
5014 			continue;
5015 		}
5016 
5017 		spte = get_written_sptes(sp, gpa, &npte);
5018 		if (!spte)
5019 			continue;
5020 
5021 		local_flush = true;
5022 		while (npte--) {
5023 			entry = *spte;
5024 			mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5025 			if (gentry && sp->role.level != PG_LEVEL_4K)
5026 				++vcpu->kvm->stat.mmu_pde_zapped;
5027 			if (need_remote_flush(entry, *spte))
5028 				remote_flush = true;
5029 			++spte;
5030 		}
5031 	}
5032 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
5033 	kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
5034 	spin_unlock(&vcpu->kvm->mmu_lock);
5035 }
5036 
kvm_mmu_unprotect_page_virt(struct kvm_vcpu * vcpu,gva_t gva)5037 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
5038 {
5039 	gpa_t gpa;
5040 	int r;
5041 
5042 	if (vcpu->arch.mmu->direct_map)
5043 		return 0;
5044 
5045 	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
5046 
5047 	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
5048 
5049 	return r;
5050 }
5051 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
5052 
kvm_mmu_page_fault(struct kvm_vcpu * vcpu,gpa_t cr2_or_gpa,u64 error_code,void * insn,int insn_len)5053 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5054 		       void *insn, int insn_len)
5055 {
5056 	int r, emulation_type = EMULTYPE_PF;
5057 	bool direct = vcpu->arch.mmu->direct_map;
5058 
5059 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
5060 		return RET_PF_RETRY;
5061 
5062 	r = RET_PF_INVALID;
5063 	if (unlikely(error_code & PFERR_RSVD_MASK)) {
5064 		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5065 		if (r == RET_PF_EMULATE)
5066 			goto emulate;
5067 	}
5068 
5069 	if (r == RET_PF_INVALID) {
5070 		r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
5071 					  lower_32_bits(error_code), false);
5072 		if (WARN_ON_ONCE(r == RET_PF_INVALID))
5073 			return -EIO;
5074 	}
5075 
5076 	if (r < 0)
5077 		return r;
5078 	if (r != RET_PF_EMULATE)
5079 		return 1;
5080 
5081 	/*
5082 	 * Before emulating the instruction, check if the error code
5083 	 * was due to a RO violation while translating the guest page.
5084 	 * This can occur when using nested virtualization with nested
5085 	 * paging in both guests. If true, we simply unprotect the page
5086 	 * and resume the guest.
5087 	 */
5088 	if (vcpu->arch.mmu->direct_map &&
5089 	    (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5090 		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5091 		return 1;
5092 	}
5093 
5094 	/*
5095 	 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
5096 	 * optimistically try to just unprotect the page and let the processor
5097 	 * re-execute the instruction that caused the page fault.  Do not allow
5098 	 * retrying MMIO emulation, as it's not only pointless but could also
5099 	 * cause us to enter an infinite loop because the processor will keep
5100 	 * faulting on the non-existent MMIO address.  Retrying an instruction
5101 	 * from a nested guest is also pointless and dangerous as we are only
5102 	 * explicitly shadowing L1's page tables, i.e. unprotecting something
5103 	 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5104 	 */
5105 	if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
5106 		emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5107 emulate:
5108 	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5109 				       insn_len);
5110 }
5111 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
5112 
kvm_mmu_invalidate_gva(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,gva_t gva,hpa_t root_hpa)5113 void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
5114 			    gva_t gva, hpa_t root_hpa)
5115 {
5116 	int i;
5117 
5118 	/* It's actually a GPA for vcpu->arch.guest_mmu.  */
5119 	if (mmu != &vcpu->arch.guest_mmu) {
5120 		/* INVLPG on a non-canonical address is a NOP according to the SDM.  */
5121 		if (is_noncanonical_address(gva, vcpu))
5122 			return;
5123 
5124 		kvm_x86_ops.tlb_flush_gva(vcpu, gva);
5125 	}
5126 
5127 	if (!mmu->invlpg)
5128 		return;
5129 
5130 	if (root_hpa == INVALID_PAGE) {
5131 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5132 
5133 		/*
5134 		 * INVLPG is required to invalidate any global mappings for the VA,
5135 		 * irrespective of PCID. Since it would take us roughly similar amount
5136 		 * of work to determine whether any of the prev_root mappings of the VA
5137 		 * is marked global, or to just sync it blindly, so we might as well
5138 		 * just always sync it.
5139 		 *
5140 		 * Mappings not reachable via the current cr3 or the prev_roots will be
5141 		 * synced when switching to that cr3, so nothing needs to be done here
5142 		 * for them.
5143 		 */
5144 		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5145 			if (VALID_PAGE(mmu->prev_roots[i].hpa))
5146 				mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5147 	} else {
5148 		mmu->invlpg(vcpu, gva, root_hpa);
5149 	}
5150 }
5151 EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_gva);
5152 
kvm_mmu_invlpg(struct kvm_vcpu * vcpu,gva_t gva)5153 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
5154 {
5155 	kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
5156 	++vcpu->stat.invlpg;
5157 }
5158 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
5159 
5160 
kvm_mmu_invpcid_gva(struct kvm_vcpu * vcpu,gva_t gva,unsigned long pcid)5161 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
5162 {
5163 	struct kvm_mmu *mmu = vcpu->arch.mmu;
5164 	bool tlb_flush = false;
5165 	uint i;
5166 
5167 	if (pcid == kvm_get_active_pcid(vcpu)) {
5168 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5169 		tlb_flush = true;
5170 	}
5171 
5172 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5173 		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
5174 		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
5175 			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5176 			tlb_flush = true;
5177 		}
5178 	}
5179 
5180 	if (tlb_flush)
5181 		kvm_x86_ops.tlb_flush_gva(vcpu, gva);
5182 
5183 	++vcpu->stat.invlpg;
5184 
5185 	/*
5186 	 * Mappings not reachable via the current cr3 or the prev_roots will be
5187 	 * synced when switching to that cr3, so nothing needs to be done here
5188 	 * for them.
5189 	 */
5190 }
5191 EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva);
5192 
kvm_configure_mmu(bool enable_tdp,int tdp_max_root_level,int tdp_huge_page_level)5193 void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
5194 		       int tdp_huge_page_level)
5195 {
5196 	tdp_enabled = enable_tdp;
5197 	max_tdp_level = tdp_max_root_level;
5198 
5199 	/*
5200 	 * max_huge_page_level reflects KVM's MMU capabilities irrespective
5201 	 * of kernel support, e.g. KVM may be capable of using 1GB pages when
5202 	 * the kernel is not.  But, KVM never creates a page size greater than
5203 	 * what is used by the kernel for any given HVA, i.e. the kernel's
5204 	 * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
5205 	 */
5206 	if (tdp_enabled)
5207 		max_huge_page_level = tdp_huge_page_level;
5208 	else if (boot_cpu_has(X86_FEATURE_GBPAGES))
5209 		max_huge_page_level = PG_LEVEL_1G;
5210 	else
5211 		max_huge_page_level = PG_LEVEL_2M;
5212 }
5213 EXPORT_SYMBOL_GPL(kvm_configure_mmu);
5214 
5215 /* The return value indicates if tlb flush on all vcpus is needed. */
5216 typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
5217 
5218 /* The caller should hold mmu-lock before calling this function. */
5219 static __always_inline bool
slot_handle_level_range(struct kvm * kvm,struct kvm_memory_slot * memslot,slot_level_handler fn,int start_level,int end_level,gfn_t start_gfn,gfn_t end_gfn,bool lock_flush_tlb)5220 slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
5221 			slot_level_handler fn, int start_level, int end_level,
5222 			gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
5223 {
5224 	struct slot_rmap_walk_iterator iterator;
5225 	bool flush = false;
5226 
5227 	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
5228 			end_gfn, &iterator) {
5229 		if (iterator.rmap)
5230 			flush |= fn(kvm, iterator.rmap);
5231 
5232 		if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
5233 			if (flush && lock_flush_tlb) {
5234 				kvm_flush_remote_tlbs_with_address(kvm,
5235 						start_gfn,
5236 						iterator.gfn - start_gfn + 1);
5237 				flush = false;
5238 			}
5239 			cond_resched_lock(&kvm->mmu_lock);
5240 		}
5241 	}
5242 
5243 	if (flush && lock_flush_tlb) {
5244 		kvm_flush_remote_tlbs_with_address(kvm, start_gfn,
5245 						   end_gfn - start_gfn + 1);
5246 		flush = false;
5247 	}
5248 
5249 	return flush;
5250 }
5251 
5252 static __always_inline bool
slot_handle_level(struct kvm * kvm,struct kvm_memory_slot * memslot,slot_level_handler fn,int start_level,int end_level,bool lock_flush_tlb)5253 slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5254 		  slot_level_handler fn, int start_level, int end_level,
5255 		  bool lock_flush_tlb)
5256 {
5257 	return slot_handle_level_range(kvm, memslot, fn, start_level,
5258 			end_level, memslot->base_gfn,
5259 			memslot->base_gfn + memslot->npages - 1,
5260 			lock_flush_tlb);
5261 }
5262 
5263 static __always_inline bool
slot_handle_all_level(struct kvm * kvm,struct kvm_memory_slot * memslot,slot_level_handler fn,bool lock_flush_tlb)5264 slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5265 		      slot_level_handler fn, bool lock_flush_tlb)
5266 {
5267 	return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
5268 				 KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
5269 }
5270 
5271 static __always_inline bool
slot_handle_large_level(struct kvm * kvm,struct kvm_memory_slot * memslot,slot_level_handler fn,bool lock_flush_tlb)5272 slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5273 			slot_level_handler fn, bool lock_flush_tlb)
5274 {
5275 	return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K + 1,
5276 				 KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
5277 }
5278 
5279 static __always_inline bool
slot_handle_leaf(struct kvm * kvm,struct kvm_memory_slot * memslot,slot_level_handler fn,bool lock_flush_tlb)5280 slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
5281 		 slot_level_handler fn, bool lock_flush_tlb)
5282 {
5283 	return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
5284 				 PG_LEVEL_4K, lock_flush_tlb);
5285 }
5286 
free_mmu_pages(struct kvm_mmu * mmu)5287 static void free_mmu_pages(struct kvm_mmu *mmu)
5288 {
5289 	free_page((unsigned long)mmu->pae_root);
5290 	free_page((unsigned long)mmu->lm_root);
5291 }
5292 
__kvm_mmu_create(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)5293 static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
5294 {
5295 	struct page *page;
5296 	int i;
5297 
5298 	mmu->root_hpa = INVALID_PAGE;
5299 	mmu->root_pgd = 0;
5300 	mmu->translate_gpa = translate_gpa;
5301 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5302 		mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
5303 
5304 	/*
5305 	 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
5306 	 * while the PDP table is a per-vCPU construct that's allocated at MMU
5307 	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
5308 	 * x86_64.  Therefore we need to allocate the PDP table in the first
5309 	 * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
5310 	 * generally doesn't use PAE paging and can skip allocating the PDP
5311 	 * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
5312 	 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
5313 	 * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
5314 	 */
5315 	if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
5316 		return 0;
5317 
5318 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
5319 	if (!page)
5320 		return -ENOMEM;
5321 
5322 	mmu->pae_root = page_address(page);
5323 	for (i = 0; i < 4; ++i)
5324 		mmu->pae_root[i] = INVALID_PAGE;
5325 
5326 	return 0;
5327 }
5328 
kvm_mmu_create(struct kvm_vcpu * vcpu)5329 int kvm_mmu_create(struct kvm_vcpu *vcpu)
5330 {
5331 	int ret;
5332 
5333 	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
5334 	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
5335 
5336 	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
5337 	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
5338 
5339 	vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
5340 
5341 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
5342 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
5343 
5344 	vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5345 
5346 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
5347 	if (ret)
5348 		return ret;
5349 
5350 	ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
5351 	if (ret)
5352 		goto fail_allocate_root;
5353 
5354 	return ret;
5355  fail_allocate_root:
5356 	free_mmu_pages(&vcpu->arch.guest_mmu);
5357 	return ret;
5358 }
5359 
5360 #define BATCH_ZAP_PAGES	10
kvm_zap_obsolete_pages(struct kvm * kvm)5361 static void kvm_zap_obsolete_pages(struct kvm *kvm)
5362 {
5363 	struct kvm_mmu_page *sp, *node;
5364 	int nr_zapped, batch = 0;
5365 
5366 restart:
5367 	list_for_each_entry_safe_reverse(sp, node,
5368 	      &kvm->arch.active_mmu_pages, link) {
5369 		/*
5370 		 * No obsolete valid page exists before a newly created page
5371 		 * since active_mmu_pages is a FIFO list.
5372 		 */
5373 		if (!is_obsolete_sp(kvm, sp))
5374 			break;
5375 
5376 		/*
5377 		 * Invalid pages should never land back on the list of active
5378 		 * pages.  Skip the bogus page, otherwise we'll get stuck in an
5379 		 * infinite loop if the page gets put back on the list (again).
5380 		 */
5381 		if (WARN_ON(sp->role.invalid))
5382 			continue;
5383 
5384 		/*
5385 		 * No need to flush the TLB since we're only zapping shadow
5386 		 * pages with an obsolete generation number and all vCPUS have
5387 		 * loaded a new root, i.e. the shadow pages being zapped cannot
5388 		 * be in active use by the guest.
5389 		 */
5390 		if (batch >= BATCH_ZAP_PAGES &&
5391 		    cond_resched_lock(&kvm->mmu_lock)) {
5392 			batch = 0;
5393 			goto restart;
5394 		}
5395 
5396 		if (__kvm_mmu_prepare_zap_page(kvm, sp,
5397 				&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5398 			batch += nr_zapped;
5399 			goto restart;
5400 		}
5401 	}
5402 
5403 	/*
5404 	 * Trigger a remote TLB flush before freeing the page tables to ensure
5405 	 * KVM is not in the middle of a lockless shadow page table walk, which
5406 	 * may reference the pages.
5407 	 */
5408 	kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5409 }
5410 
5411 /*
5412  * Fast invalidate all shadow pages and use lock-break technique
5413  * to zap obsolete pages.
5414  *
5415  * It's required when memslot is being deleted or VM is being
5416  * destroyed, in these cases, we should ensure that KVM MMU does
5417  * not use any resource of the being-deleted slot or all slots
5418  * after calling the function.
5419  */
kvm_mmu_zap_all_fast(struct kvm * kvm)5420 static void kvm_mmu_zap_all_fast(struct kvm *kvm)
5421 {
5422 	lockdep_assert_held(&kvm->slots_lock);
5423 
5424 	spin_lock(&kvm->mmu_lock);
5425 	trace_kvm_mmu_zap_all_fast(kvm);
5426 
5427 	/*
5428 	 * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
5429 	 * held for the entire duration of zapping obsolete pages, it's
5430 	 * impossible for there to be multiple invalid generations associated
5431 	 * with *valid* shadow pages at any given time, i.e. there is exactly
5432 	 * one valid generation and (at most) one invalid generation.
5433 	 */
5434 	kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5435 
5436 	/*
5437 	 * Notify all vcpus to reload its shadow page table and flush TLB.
5438 	 * Then all vcpus will switch to new shadow page table with the new
5439 	 * mmu_valid_gen.
5440 	 *
5441 	 * Note: we need to do this under the protection of mmu_lock,
5442 	 * otherwise, vcpu would purge shadow page but miss tlb flush.
5443 	 */
5444 	kvm_reload_remote_mmus(kvm);
5445 
5446 	kvm_zap_obsolete_pages(kvm);
5447 
5448 	if (kvm->arch.tdp_mmu_enabled)
5449 		kvm_tdp_mmu_zap_all(kvm);
5450 
5451 	spin_unlock(&kvm->mmu_lock);
5452 }
5453 
kvm_has_zapped_obsolete_pages(struct kvm * kvm)5454 static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5455 {
5456 	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
5457 }
5458 
kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm * kvm,struct kvm_memory_slot * slot,struct kvm_page_track_notifier_node * node)5459 static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5460 			struct kvm_memory_slot *slot,
5461 			struct kvm_page_track_notifier_node *node)
5462 {
5463 	kvm_mmu_zap_all_fast(kvm);
5464 }
5465 
kvm_mmu_init_vm(struct kvm * kvm)5466 void kvm_mmu_init_vm(struct kvm *kvm)
5467 {
5468 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5469 
5470 	kvm_mmu_init_tdp_mmu(kvm);
5471 
5472 	node->track_write = kvm_mmu_pte_write;
5473 	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5474 	kvm_page_track_register_notifier(kvm, node);
5475 }
5476 
kvm_mmu_uninit_vm(struct kvm * kvm)5477 void kvm_mmu_uninit_vm(struct kvm *kvm)
5478 {
5479 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5480 
5481 	kvm_page_track_unregister_notifier(kvm, node);
5482 
5483 	kvm_mmu_uninit_tdp_mmu(kvm);
5484 }
5485 
kvm_zap_gfn_range(struct kvm * kvm,gfn_t gfn_start,gfn_t gfn_end)5486 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
5487 {
5488 	struct kvm_memslots *slots;
5489 	struct kvm_memory_slot *memslot;
5490 	int i;
5491 	bool flush;
5492 
5493 	spin_lock(&kvm->mmu_lock);
5494 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5495 		slots = __kvm_memslots(kvm, i);
5496 		kvm_for_each_memslot(memslot, slots) {
5497 			gfn_t start, end;
5498 
5499 			start = max(gfn_start, memslot->base_gfn);
5500 			end = min(gfn_end, memslot->base_gfn + memslot->npages);
5501 			if (start >= end)
5502 				continue;
5503 
5504 			slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
5505 						PG_LEVEL_4K,
5506 						KVM_MAX_HUGEPAGE_LEVEL,
5507 						start, end - 1, true);
5508 		}
5509 	}
5510 
5511 	if (kvm->arch.tdp_mmu_enabled) {
5512 		flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end);
5513 		if (flush)
5514 			kvm_flush_remote_tlbs(kvm);
5515 	}
5516 
5517 	spin_unlock(&kvm->mmu_lock);
5518 }
5519 
slot_rmap_write_protect(struct kvm * kvm,struct kvm_rmap_head * rmap_head)5520 static bool slot_rmap_write_protect(struct kvm *kvm,
5521 				    struct kvm_rmap_head *rmap_head)
5522 {
5523 	return __rmap_write_protect(kvm, rmap_head, false);
5524 }
5525 
kvm_mmu_slot_remove_write_access(struct kvm * kvm,struct kvm_memory_slot * memslot,int start_level)5526 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
5527 				      struct kvm_memory_slot *memslot,
5528 				      int start_level)
5529 {
5530 	bool flush;
5531 
5532 	spin_lock(&kvm->mmu_lock);
5533 	flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
5534 				start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
5535 	if (kvm->arch.tdp_mmu_enabled)
5536 		flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K);
5537 	spin_unlock(&kvm->mmu_lock);
5538 
5539 	/*
5540 	 * We can flush all the TLBs out of the mmu lock without TLB
5541 	 * corruption since we just change the spte from writable to
5542 	 * readonly so that we only need to care the case of changing
5543 	 * spte from present to present (changing the spte from present
5544 	 * to nonpresent will flush all the TLBs immediately), in other
5545 	 * words, the only case we care is mmu_spte_update() where we
5546 	 * have checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE
5547 	 * instead of PT_WRITABLE_MASK, that means it does not depend
5548 	 * on PT_WRITABLE_MASK anymore.
5549 	 */
5550 	if (flush)
5551 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5552 }
5553 
kvm_mmu_zap_collapsible_spte(struct kvm * kvm,struct kvm_rmap_head * rmap_head)5554 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
5555 					 struct kvm_rmap_head *rmap_head)
5556 {
5557 	u64 *sptep;
5558 	struct rmap_iterator iter;
5559 	int need_tlb_flush = 0;
5560 	kvm_pfn_t pfn;
5561 	struct kvm_mmu_page *sp;
5562 
5563 restart:
5564 	for_each_rmap_spte(rmap_head, &iter, sptep) {
5565 		sp = sptep_to_sp(sptep);
5566 		pfn = spte_to_pfn(*sptep);
5567 
5568 		/*
5569 		 * We cannot do huge page mapping for indirect shadow pages,
5570 		 * which are found on the last rmap (level = 1) when not using
5571 		 * tdp; such shadow pages are synced with the page table in
5572 		 * the guest, and the guest page table is using 4K page size
5573 		 * mapping if the indirect sp has level = 1.
5574 		 */
5575 		if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
5576 		    (kvm_is_zone_device_pfn(pfn) ||
5577 		     PageCompound(pfn_to_page(pfn)))) {
5578 			pte_list_remove(rmap_head, sptep);
5579 
5580 			if (kvm_available_flush_tlb_with_range())
5581 				kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
5582 					KVM_PAGES_PER_HPAGE(sp->role.level));
5583 			else
5584 				need_tlb_flush = 1;
5585 
5586 			goto restart;
5587 		}
5588 	}
5589 
5590 	return need_tlb_flush;
5591 }
5592 
kvm_mmu_zap_collapsible_sptes(struct kvm * kvm,const struct kvm_memory_slot * memslot)5593 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5594 				   const struct kvm_memory_slot *memslot)
5595 {
5596 	/* FIXME: const-ify all uses of struct kvm_memory_slot.  */
5597 	spin_lock(&kvm->mmu_lock);
5598 	slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
5599 			 kvm_mmu_zap_collapsible_spte, true);
5600 
5601 	if (kvm->arch.tdp_mmu_enabled)
5602 		kvm_tdp_mmu_zap_collapsible_sptes(kvm, memslot);
5603 	spin_unlock(&kvm->mmu_lock);
5604 }
5605 
kvm_arch_flush_remote_tlbs_memslot(struct kvm * kvm,struct kvm_memory_slot * memslot)5606 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
5607 					struct kvm_memory_slot *memslot)
5608 {
5609 	/*
5610 	 * All current use cases for flushing the TLBs for a specific memslot
5611 	 * are related to dirty logging, and do the TLB flush out of mmu_lock.
5612 	 * The interaction between the various operations on memslot must be
5613 	 * serialized by slots_locks to ensure the TLB flush from one operation
5614 	 * is observed by any other operation on the same memslot.
5615 	 */
5616 	lockdep_assert_held(&kvm->slots_lock);
5617 	kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
5618 					   memslot->npages);
5619 }
5620 
kvm_mmu_slot_leaf_clear_dirty(struct kvm * kvm,struct kvm_memory_slot * memslot)5621 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
5622 				   struct kvm_memory_slot *memslot)
5623 {
5624 	bool flush;
5625 
5626 	spin_lock(&kvm->mmu_lock);
5627 	flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
5628 	if (kvm->arch.tdp_mmu_enabled)
5629 		flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
5630 	spin_unlock(&kvm->mmu_lock);
5631 
5632 	/*
5633 	 * It's also safe to flush TLBs out of mmu lock here as currently this
5634 	 * function is only used for dirty logging, in which case flushing TLB
5635 	 * out of mmu lock also guarantees no dirty pages will be lost in
5636 	 * dirty_bitmap.
5637 	 */
5638 	if (flush)
5639 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5640 }
5641 EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
5642 
kvm_mmu_slot_largepage_remove_write_access(struct kvm * kvm,struct kvm_memory_slot * memslot)5643 void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
5644 					struct kvm_memory_slot *memslot)
5645 {
5646 	bool flush;
5647 
5648 	spin_lock(&kvm->mmu_lock);
5649 	flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
5650 					false);
5651 	if (kvm->arch.tdp_mmu_enabled)
5652 		flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M);
5653 	spin_unlock(&kvm->mmu_lock);
5654 
5655 	if (flush)
5656 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5657 }
5658 EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
5659 
kvm_mmu_slot_set_dirty(struct kvm * kvm,struct kvm_memory_slot * memslot)5660 void kvm_mmu_slot_set_dirty(struct kvm *kvm,
5661 			    struct kvm_memory_slot *memslot)
5662 {
5663 	bool flush;
5664 
5665 	spin_lock(&kvm->mmu_lock);
5666 	flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
5667 	if (kvm->arch.tdp_mmu_enabled)
5668 		flush |= kvm_tdp_mmu_slot_set_dirty(kvm, memslot);
5669 	spin_unlock(&kvm->mmu_lock);
5670 
5671 	if (flush)
5672 		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5673 }
5674 EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
5675 
kvm_mmu_zap_all(struct kvm * kvm)5676 void kvm_mmu_zap_all(struct kvm *kvm)
5677 {
5678 	struct kvm_mmu_page *sp, *node;
5679 	LIST_HEAD(invalid_list);
5680 	int ign;
5681 
5682 	spin_lock(&kvm->mmu_lock);
5683 restart:
5684 	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
5685 		if (WARN_ON(sp->role.invalid))
5686 			continue;
5687 		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
5688 			goto restart;
5689 		if (cond_resched_lock(&kvm->mmu_lock))
5690 			goto restart;
5691 	}
5692 
5693 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
5694 
5695 	if (kvm->arch.tdp_mmu_enabled)
5696 		kvm_tdp_mmu_zap_all(kvm);
5697 
5698 	spin_unlock(&kvm->mmu_lock);
5699 }
5700 
kvm_mmu_invalidate_mmio_sptes(struct kvm * kvm,u64 gen)5701 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
5702 {
5703 	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
5704 
5705 	gen &= MMIO_SPTE_GEN_MASK;
5706 
5707 	/*
5708 	 * Generation numbers are incremented in multiples of the number of
5709 	 * address spaces in order to provide unique generations across all
5710 	 * address spaces.  Strip what is effectively the address space
5711 	 * modifier prior to checking for a wrap of the MMIO generation so
5712 	 * that a wrap in any address space is detected.
5713 	 */
5714 	gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
5715 
5716 	/*
5717 	 * The very rare case: if the MMIO generation number has wrapped,
5718 	 * zap all shadow pages.
5719 	 */
5720 	if (unlikely(gen == 0)) {
5721 		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
5722 		kvm_mmu_zap_all_fast(kvm);
5723 	}
5724 }
5725 
5726 static unsigned long
mmu_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)5727 mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5728 {
5729 	struct kvm *kvm;
5730 	int nr_to_scan = sc->nr_to_scan;
5731 	unsigned long freed = 0;
5732 
5733 	mutex_lock(&kvm_lock);
5734 
5735 	list_for_each_entry(kvm, &vm_list, vm_list) {
5736 		int idx;
5737 		LIST_HEAD(invalid_list);
5738 
5739 		/*
5740 		 * Never scan more than sc->nr_to_scan VM instances.
5741 		 * Will not hit this condition practically since we do not try
5742 		 * to shrink more than one VM and it is very unlikely to see
5743 		 * !n_used_mmu_pages so many times.
5744 		 */
5745 		if (!nr_to_scan--)
5746 			break;
5747 		/*
5748 		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
5749 		 * here. We may skip a VM instance errorneosly, but we do not
5750 		 * want to shrink a VM that only started to populate its MMU
5751 		 * anyway.
5752 		 */
5753 		if (!kvm->arch.n_used_mmu_pages &&
5754 		    !kvm_has_zapped_obsolete_pages(kvm))
5755 			continue;
5756 
5757 		idx = srcu_read_lock(&kvm->srcu);
5758 		spin_lock(&kvm->mmu_lock);
5759 
5760 		if (kvm_has_zapped_obsolete_pages(kvm)) {
5761 			kvm_mmu_commit_zap_page(kvm,
5762 			      &kvm->arch.zapped_obsolete_pages);
5763 			goto unlock;
5764 		}
5765 
5766 		freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
5767 
5768 unlock:
5769 		spin_unlock(&kvm->mmu_lock);
5770 		srcu_read_unlock(&kvm->srcu, idx);
5771 
5772 		/*
5773 		 * unfair on small ones
5774 		 * per-vm shrinkers cry out
5775 		 * sadness comes quickly
5776 		 */
5777 		list_move_tail(&kvm->vm_list, &vm_list);
5778 		break;
5779 	}
5780 
5781 	mutex_unlock(&kvm_lock);
5782 	return freed;
5783 }
5784 
5785 static unsigned long
mmu_shrink_count(struct shrinker * shrink,struct shrink_control * sc)5786 mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
5787 {
5788 	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
5789 }
5790 
5791 static struct shrinker mmu_shrinker = {
5792 	.count_objects = mmu_shrink_count,
5793 	.scan_objects = mmu_shrink_scan,
5794 	.seeks = DEFAULT_SEEKS * 10,
5795 };
5796 
mmu_destroy_caches(void)5797 static void mmu_destroy_caches(void)
5798 {
5799 	kmem_cache_destroy(pte_list_desc_cache);
5800 	kmem_cache_destroy(mmu_page_header_cache);
5801 }
5802 
kvm_set_mmio_spte_mask(void)5803 static void kvm_set_mmio_spte_mask(void)
5804 {
5805 	u64 mask;
5806 
5807 	/*
5808 	 * Set a reserved PA bit in MMIO SPTEs to generate page faults with
5809 	 * PFEC.RSVD=1 on MMIO accesses.  64-bit PTEs (PAE, x86-64, and EPT
5810 	 * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
5811 	 * 52-bit physical addresses then there are no reserved PA bits in the
5812 	 * PTEs and so the reserved PA approach must be disabled.
5813 	 */
5814 	if (shadow_phys_bits < 52)
5815 		mask = BIT_ULL(51) | PT_PRESENT_MASK;
5816 	else
5817 		mask = 0;
5818 
5819 	kvm_mmu_set_mmio_spte_mask(mask, ACC_WRITE_MASK | ACC_USER_MASK);
5820 }
5821 
get_nx_auto_mode(void)5822 static bool get_nx_auto_mode(void)
5823 {
5824 	/* Return true when CPU has the bug, and mitigations are ON */
5825 	return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
5826 }
5827 
__set_nx_huge_pages(bool val)5828 static void __set_nx_huge_pages(bool val)
5829 {
5830 	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
5831 }
5832 
set_nx_huge_pages(const char * val,const struct kernel_param * kp)5833 static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
5834 {
5835 	bool old_val = nx_huge_pages;
5836 	bool new_val;
5837 
5838 	/* In "auto" mode deploy workaround only if CPU has the bug. */
5839 	if (sysfs_streq(val, "off"))
5840 		new_val = 0;
5841 	else if (sysfs_streq(val, "force"))
5842 		new_val = 1;
5843 	else if (sysfs_streq(val, "auto"))
5844 		new_val = get_nx_auto_mode();
5845 	else if (strtobool(val, &new_val) < 0)
5846 		return -EINVAL;
5847 
5848 	__set_nx_huge_pages(new_val);
5849 
5850 	if (new_val != old_val) {
5851 		struct kvm *kvm;
5852 
5853 		mutex_lock(&kvm_lock);
5854 
5855 		list_for_each_entry(kvm, &vm_list, vm_list) {
5856 			mutex_lock(&kvm->slots_lock);
5857 			kvm_mmu_zap_all_fast(kvm);
5858 			mutex_unlock(&kvm->slots_lock);
5859 
5860 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
5861 		}
5862 		mutex_unlock(&kvm_lock);
5863 	}
5864 
5865 	return 0;
5866 }
5867 
kvm_mmu_module_init(void)5868 int kvm_mmu_module_init(void)
5869 {
5870 	int ret = -ENOMEM;
5871 
5872 	if (nx_huge_pages == -1)
5873 		__set_nx_huge_pages(get_nx_auto_mode());
5874 
5875 	/*
5876 	 * MMU roles use union aliasing which is, generally speaking, an
5877 	 * undefined behavior. However, we supposedly know how compilers behave
5878 	 * and the current status quo is unlikely to change. Guardians below are
5879 	 * supposed to let us know if the assumption becomes false.
5880 	 */
5881 	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
5882 	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
5883 	BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
5884 
5885 	kvm_mmu_reset_all_pte_masks();
5886 
5887 	kvm_set_mmio_spte_mask();
5888 
5889 	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
5890 					    sizeof(struct pte_list_desc),
5891 					    0, SLAB_ACCOUNT, NULL);
5892 	if (!pte_list_desc_cache)
5893 		goto out;
5894 
5895 	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
5896 						  sizeof(struct kvm_mmu_page),
5897 						  0, SLAB_ACCOUNT, NULL);
5898 	if (!mmu_page_header_cache)
5899 		goto out;
5900 
5901 	if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
5902 		goto out;
5903 
5904 	ret = register_shrinker(&mmu_shrinker);
5905 	if (ret)
5906 		goto out;
5907 
5908 	return 0;
5909 
5910 out:
5911 	mmu_destroy_caches();
5912 	return ret;
5913 }
5914 
5915 /*
5916  * Calculate mmu pages needed for kvm.
5917  */
kvm_mmu_calculate_default_mmu_pages(struct kvm * kvm)5918 unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
5919 {
5920 	unsigned long nr_mmu_pages;
5921 	unsigned long nr_pages = 0;
5922 	struct kvm_memslots *slots;
5923 	struct kvm_memory_slot *memslot;
5924 	int i;
5925 
5926 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5927 		slots = __kvm_memslots(kvm, i);
5928 
5929 		kvm_for_each_memslot(memslot, slots)
5930 			nr_pages += memslot->npages;
5931 	}
5932 
5933 	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
5934 	nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
5935 
5936 	return nr_mmu_pages;
5937 }
5938 
kvm_mmu_destroy(struct kvm_vcpu * vcpu)5939 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
5940 {
5941 	kvm_mmu_unload(vcpu);
5942 	free_mmu_pages(&vcpu->arch.root_mmu);
5943 	free_mmu_pages(&vcpu->arch.guest_mmu);
5944 	mmu_free_memory_caches(vcpu);
5945 }
5946 
kvm_mmu_module_exit(void)5947 void kvm_mmu_module_exit(void)
5948 {
5949 	mmu_destroy_caches();
5950 	percpu_counter_destroy(&kvm_total_used_mmu_pages);
5951 	unregister_shrinker(&mmu_shrinker);
5952 	mmu_audit_disable();
5953 }
5954 
set_nx_huge_pages_recovery_ratio(const char * val,const struct kernel_param * kp)5955 static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
5956 {
5957 	unsigned int old_val;
5958 	int err;
5959 
5960 	old_val = nx_huge_pages_recovery_ratio;
5961 	err = param_set_uint(val, kp);
5962 	if (err)
5963 		return err;
5964 
5965 	if (READ_ONCE(nx_huge_pages) &&
5966 	    !old_val && nx_huge_pages_recovery_ratio) {
5967 		struct kvm *kvm;
5968 
5969 		mutex_lock(&kvm_lock);
5970 
5971 		list_for_each_entry(kvm, &vm_list, vm_list)
5972 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
5973 
5974 		mutex_unlock(&kvm_lock);
5975 	}
5976 
5977 	return err;
5978 }
5979 
kvm_recover_nx_lpages(struct kvm * kvm)5980 static void kvm_recover_nx_lpages(struct kvm *kvm)
5981 {
5982 	int rcu_idx;
5983 	struct kvm_mmu_page *sp;
5984 	unsigned int ratio;
5985 	LIST_HEAD(invalid_list);
5986 	bool flush = false;
5987 	ulong to_zap;
5988 
5989 	rcu_idx = srcu_read_lock(&kvm->srcu);
5990 	spin_lock(&kvm->mmu_lock);
5991 
5992 	ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
5993 	to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
5994 	for ( ; to_zap; --to_zap) {
5995 		if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
5996 			break;
5997 
5998 		/*
5999 		 * We use a separate list instead of just using active_mmu_pages
6000 		 * because the number of lpage_disallowed pages is expected to
6001 		 * be relatively small compared to the total.
6002 		 */
6003 		sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
6004 				      struct kvm_mmu_page,
6005 				      lpage_disallowed_link);
6006 		WARN_ON_ONCE(!sp->lpage_disallowed);
6007 		if (sp->tdp_mmu_page) {
6008 			flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
6009 		} else {
6010 			kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
6011 			WARN_ON_ONCE(sp->lpage_disallowed);
6012 		}
6013 
6014 		if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
6015 			kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6016 			cond_resched_lock(&kvm->mmu_lock);
6017 			flush = false;
6018 		}
6019 	}
6020 	kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6021 
6022 	spin_unlock(&kvm->mmu_lock);
6023 	srcu_read_unlock(&kvm->srcu, rcu_idx);
6024 }
6025 
get_nx_lpage_recovery_timeout(u64 start_time)6026 static long get_nx_lpage_recovery_timeout(u64 start_time)
6027 {
6028 	return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
6029 		? start_time + 60 * HZ - get_jiffies_64()
6030 		: MAX_SCHEDULE_TIMEOUT;
6031 }
6032 
kvm_nx_lpage_recovery_worker(struct kvm * kvm,uintptr_t data)6033 static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
6034 {
6035 	u64 start_time;
6036 	long remaining_time;
6037 
6038 	while (true) {
6039 		start_time = get_jiffies_64();
6040 		remaining_time = get_nx_lpage_recovery_timeout(start_time);
6041 
6042 		set_current_state(TASK_INTERRUPTIBLE);
6043 		while (!kthread_should_stop() && remaining_time > 0) {
6044 			schedule_timeout(remaining_time);
6045 			remaining_time = get_nx_lpage_recovery_timeout(start_time);
6046 			set_current_state(TASK_INTERRUPTIBLE);
6047 		}
6048 
6049 		set_current_state(TASK_RUNNING);
6050 
6051 		if (kthread_should_stop())
6052 			return 0;
6053 
6054 		kvm_recover_nx_lpages(kvm);
6055 	}
6056 }
6057 
kvm_mmu_post_init_vm(struct kvm * kvm)6058 int kvm_mmu_post_init_vm(struct kvm *kvm)
6059 {
6060 	int err;
6061 
6062 	err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
6063 					  "kvm-nx-lpage-recovery",
6064 					  &kvm->arch.nx_lpage_recovery_thread);
6065 	if (!err)
6066 		kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
6067 
6068 	return err;
6069 }
6070 
kvm_mmu_pre_destroy_vm(struct kvm * kvm)6071 void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
6072 {
6073 	if (kvm->arch.nx_lpage_recovery_thread)
6074 		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
6075 }
6076