Lines Matching refs:spte
207 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ argument
210 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
211 __shadow_walk_next(&(_walker), spte))
267 static void mmu_spte_set(u64 *sptep, u64 spte);
269 static bool is_executable_pte(u64 spte);
288 static inline bool spte_ad_enabled(u64 spte) in spte_ad_enabled() argument
290 MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value); in spte_ad_enabled()
291 return !(spte & shadow_acc_track_value); in spte_ad_enabled()
299 static inline u64 spte_shadow_accessed_mask(u64 spte) in spte_shadow_accessed_mask() argument
301 MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value); in spte_shadow_accessed_mask()
302 return spte_ad_enabled(spte) ? shadow_accessed_mask : 0; in spte_shadow_accessed_mask()
305 static inline u64 spte_shadow_dirty_mask(u64 spte) in spte_shadow_dirty_mask() argument
307 MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value); in spte_shadow_dirty_mask()
308 return spte_ad_enabled(spte) ? shadow_dirty_mask : 0; in spte_shadow_dirty_mask()
311 static inline bool is_access_track_spte(u64 spte) in is_access_track_spte() argument
313 return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0; in is_access_track_spte()
344 static unsigned int get_mmio_spte_generation(u64 spte) in get_mmio_spte_generation() argument
348 spte &= ~shadow_mmio_mask; in get_mmio_spte_generation()
350 gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK; in get_mmio_spte_generation()
351 gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT; in get_mmio_spte_generation()
377 static bool is_mmio_spte(u64 spte) in is_mmio_spte() argument
379 return (spte & shadow_mmio_mask) == shadow_mmio_value; in is_mmio_spte()
382 static gfn_t get_mmio_spte_gfn(u64 spte) in get_mmio_spte_gfn() argument
384 u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; in get_mmio_spte_gfn()
386 gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len) in get_mmio_spte_gfn()
392 static unsigned get_mmio_spte_access(u64 spte) in get_mmio_spte_access() argument
395 return (spte & ~mask) & ~PAGE_MASK; in get_mmio_spte_access()
409 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) in check_mmio_spte() argument
414 spte_gen = get_mmio_spte_generation(spte); in check_mmio_spte()
416 trace_check_mmio_spte(spte, kvm_gen, spte_gen); in check_mmio_spte()
506 static bool is_executable_pte(u64 spte) in is_executable_pte() argument
508 return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask; in is_executable_pte()
524 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument
526 WRITE_ONCE(*sptep, spte); in __set_spte()
529 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast() argument
531 WRITE_ONCE(*sptep, spte); in __update_clear_spte_fast()
534 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow() argument
536 return xchg(sptep, spte); in __update_clear_spte_slow()
549 u64 spte; member
552 static void count_spte_clear(u64 *sptep, u64 spte) in count_spte_clear() argument
556 if (is_shadow_present_pte(spte)) in count_spte_clear()
564 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument
569 sspte = (union split_spte)spte; in __set_spte()
583 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast() argument
588 sspte = (union split_spte)spte; in __update_clear_spte_fast()
599 count_spte_clear(sptep, spte); in __update_clear_spte_fast()
602 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow() argument
607 sspte = (union split_spte)spte; in __update_clear_spte_slow()
613 count_spte_clear(sptep, spte); in __update_clear_spte_slow()
615 return orig.spte; in __update_clear_spte_slow()
639 union split_spte spte, *orig = (union split_spte *)sptep; in __get_spte_lockless() local
646 spte.spte_low = orig->spte_low; in __get_spte_lockless()
649 spte.spte_high = orig->spte_high; in __get_spte_lockless()
652 if (unlikely(spte.spte_low != orig->spte_low || in __get_spte_lockless()
656 return spte.spte; in __get_spte_lockless()
660 static bool spte_can_locklessly_be_made_writable(u64 spte) in spte_can_locklessly_be_made_writable() argument
662 return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) == in spte_can_locklessly_be_made_writable()
666 static bool spte_has_volatile_bits(u64 spte) in spte_has_volatile_bits() argument
668 if (!is_shadow_present_pte(spte)) in spte_has_volatile_bits()
677 if (spte_can_locklessly_be_made_writable(spte) || in spte_has_volatile_bits()
678 is_access_track_spte(spte)) in spte_has_volatile_bits()
681 if (spte_ad_enabled(spte)) { in spte_has_volatile_bits()
682 if ((spte & shadow_accessed_mask) == 0 || in spte_has_volatile_bits()
683 (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0)) in spte_has_volatile_bits()
690 static bool is_accessed_spte(u64 spte) in is_accessed_spte() argument
692 u64 accessed_mask = spte_shadow_accessed_mask(spte); in is_accessed_spte()
694 return accessed_mask ? spte & accessed_mask in is_accessed_spte()
695 : !is_access_track_spte(spte); in is_accessed_spte()
698 static bool is_dirty_spte(u64 spte) in is_dirty_spte() argument
700 u64 dirty_mask = spte_shadow_dirty_mask(spte); in is_dirty_spte()
702 return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK; in is_dirty_spte()
840 static u64 mark_spte_for_access_track(u64 spte) in mark_spte_for_access_track() argument
842 if (spte_ad_enabled(spte)) in mark_spte_for_access_track()
843 return spte & ~shadow_accessed_mask; in mark_spte_for_access_track()
845 if (is_access_track_spte(spte)) in mark_spte_for_access_track()
846 return spte; in mark_spte_for_access_track()
853 WARN_ONCE((spte & PT_WRITABLE_MASK) && in mark_spte_for_access_track()
854 !spte_can_locklessly_be_made_writable(spte), in mark_spte_for_access_track()
857 WARN_ONCE(spte & (shadow_acc_track_saved_bits_mask << in mark_spte_for_access_track()
861 spte |= (spte & shadow_acc_track_saved_bits_mask) << in mark_spte_for_access_track()
863 spte &= ~shadow_acc_track_mask; in mark_spte_for_access_track()
865 return spte; in mark_spte_for_access_track()
869 static u64 restore_acc_track_spte(u64 spte) in restore_acc_track_spte() argument
871 u64 new_spte = spte; in restore_acc_track_spte()
872 u64 saved_bits = (spte >> shadow_acc_track_saved_bits_shift) in restore_acc_track_spte()
875 WARN_ON_ONCE(spte_ad_enabled(spte)); in restore_acc_track_spte()
876 WARN_ON_ONCE(!is_access_track_spte(spte)); in restore_acc_track_spte()
889 u64 spte = mmu_spte_get_lockless(sptep); in mmu_spte_age() local
891 if (!is_accessed_spte(spte)) in mmu_spte_age()
894 if (spte_ad_enabled(spte)) { in mmu_spte_age()
902 if (is_writable_pte(spte)) in mmu_spte_age()
903 kvm_set_pfn_dirty(spte_to_pfn(spte)); in mmu_spte_age()
905 spte = mark_spte_for_access_track(spte); in mmu_spte_age()
906 mmu_spte_update_no_track(sptep, spte); in mmu_spte_age()
1248 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, in pte_list_add() argument
1255 rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); in pte_list_add()
1256 rmap_head->val = (unsigned long)spte; in pte_list_add()
1258 rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); in pte_list_add()
1261 desc->sptes[1] = spte; in pte_list_add()
1265 rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); in pte_list_add()
1277 desc->sptes[i] = spte; in pte_list_add()
1305 static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) in pte_list_remove() argument
1312 printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte); in pte_list_remove()
1315 rmap_printk("pte_list_remove: %p 1->0\n", spte); in pte_list_remove()
1316 if ((u64 *)rmap_head->val != spte) { in pte_list_remove()
1317 printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte); in pte_list_remove()
1322 rmap_printk("pte_list_remove: %p many->many\n", spte); in pte_list_remove()
1327 if (desc->sptes[i] == spte) { in pte_list_remove()
1336 pr_err("pte_list_remove: %p many->many\n", spte); in pte_list_remove()
1369 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_add() argument
1374 sp = page_header(__pa(spte)); in rmap_add()
1375 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); in rmap_add()
1377 return pte_list_add(vcpu, spte, rmap_head); in rmap_add()
1380 static void rmap_remove(struct kvm *kvm, u64 *spte) in rmap_remove() argument
1386 sp = page_header(__pa(spte)); in rmap_remove()
1387 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); in rmap_remove()
1389 pte_list_remove(spte, rmap_head); in rmap_remove()
1509 u64 spte = *sptep; in spte_write_protect() local
1511 if (!is_writable_pte(spte) && in spte_write_protect()
1512 !(pt_protect && spte_can_locklessly_be_made_writable(spte))) in spte_write_protect()
1518 spte &= ~SPTE_MMU_WRITEABLE; in spte_write_protect()
1519 spte = spte & ~PT_WRITABLE_MASK; in spte_write_protect()
1521 return mmu_spte_update(sptep, spte); in spte_write_protect()
1540 u64 spte = *sptep; in spte_clear_dirty() local
1544 spte &= ~shadow_dirty_mask; in spte_clear_dirty()
1546 return mmu_spte_update(sptep, spte); in spte_clear_dirty()
1582 u64 spte = *sptep; in spte_set_dirty() local
1586 spte |= shadow_dirty_mask; in spte_set_dirty()
1588 return mmu_spte_update(sptep, spte); in spte_set_dirty()
1954 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_recycle() argument
1959 sp = page_header(__pa(spte)); in rmap_recycle()
2063 static void mark_unsync(u64 *spte);
2074 static void mark_unsync(u64 *spte) in mark_unsync() argument
2079 sp = page_header(__pa(spte)); in mark_unsync()
2080 index = spte - sp->spt; in mark_unsync()
2099 struct kvm_mmu_page *sp, u64 *spte, in nonpaging_update_pte() argument
2402 static void clear_sp_write_flooding_count(u64 *spte) in clear_sp_write_flooding_count() argument
2404 struct kvm_mmu_page *sp = page_header(__pa(spte)); in clear_sp_write_flooding_count()
2533 u64 spte) in __shadow_walk_next() argument
2535 if (is_last_spte(spte, iterator->level)) { in __shadow_walk_next()
2540 iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK; in __shadow_walk_next()
2552 u64 spte; in link_shadow_page() local
2556 spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK | in link_shadow_page()
2560 spte |= shadow_acc_track_value; in link_shadow_page()
2562 spte |= shadow_accessed_mask; in link_shadow_page()
2564 mmu_spte_set(sptep, spte); in link_shadow_page()
2595 u64 *spte) in mmu_page_zap_pte() argument
2600 pte = *spte; in mmu_page_zap_pte()
2603 drop_spte(kvm, spte); in mmu_page_zap_pte()
2608 drop_parent_pte(child, spte); in mmu_page_zap_pte()
2614 mmu_spte_clear_no_track(spte); in mmu_page_zap_pte()
2829 u64 spte = 0; in set_spte() local
2838 spte |= shadow_acc_track_value; in set_spte()
2846 spte |= shadow_present_mask; in set_spte()
2848 spte |= spte_shadow_accessed_mask(spte); in set_spte()
2856 spte |= shadow_x_mask; in set_spte()
2858 spte |= shadow_nx_mask; in set_spte()
2861 spte |= shadow_user_mask; in set_spte()
2864 spte |= PT_PAGE_SIZE_MASK; in set_spte()
2866 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, in set_spte()
2870 spte |= SPTE_HOST_WRITEABLE; in set_spte()
2875 spte |= shadow_me_mask; in set_spte()
2877 spte |= (u64)pfn << PAGE_SHIFT; in set_spte()
2891 spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; in set_spte()
2907 spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); in set_spte()
2913 spte |= spte_shadow_dirty_mask(spte); in set_spte()
2917 spte = mark_spte_for_access_track(spte); in set_spte()
2920 if (mmu_spte_update(sptep, spte)) in set_spte()
3028 u64 *spte, *start = NULL; in __direct_pte_prefetch() local
3034 spte = sp->spt + i; in __direct_pte_prefetch()
3036 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { in __direct_pte_prefetch()
3037 if (is_shadow_present_pte(*spte) || spte == sptep) { in __direct_pte_prefetch()
3040 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) in __direct_pte_prefetch()
3044 start = spte; in __direct_pte_prefetch()
3072 u64 spte = *it.sptep; in disallowed_hugepage_adjust() local
3076 is_shadow_present_pte(spte) && in disallowed_hugepage_adjust()
3077 !is_large_pte(spte)) { in disallowed_hugepage_adjust()
3292 static bool is_access_allowed(u32 fault_err_code, u64 spte) in is_access_allowed() argument
3295 return is_executable_pte(spte); in is_access_allowed()
3298 return is_writable_pte(spte); in is_access_allowed()
3301 return spte & PT_PRESENT_MASK; in is_access_allowed()
3315 u64 spte = 0ull; in fast_page_fault() local
3329 for_each_shadow_entry_lockless(vcpu, gva, iterator, spte) in fast_page_fault()
3330 if (!is_shadow_present_pte(spte) || in fast_page_fault()
3335 if (!is_last_spte(spte, sp->role.level)) in fast_page_fault()
3348 if (is_access_allowed(error_code, spte)) { in fast_page_fault()
3353 new_spte = spte; in fast_page_fault()
3355 if (is_access_track_spte(spte)) in fast_page_fault()
3364 spte_can_locklessly_be_made_writable(spte)) in fast_page_fault()
3384 if (new_spte == spte || in fast_page_fault()
3394 iterator.sptep, spte, in fast_page_fault()
3408 spte, fault_handled); in fast_page_fault()
3750 static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level) in is_shadow_zero_bits_set() argument
3752 return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level); in is_shadow_zero_bits_set()
3775 u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull; in walk_shadow_page_get_mmio_spte() local
3787 __shadow_walk_next(&iterator, spte)) { in walk_shadow_page_get_mmio_spte()
3788 spte = mmu_spte_get_lockless(iterator.sptep); in walk_shadow_page_get_mmio_spte()
3790 sptes[leaf - 1] = spte; in walk_shadow_page_get_mmio_spte()
3793 if (!is_shadow_present_pte(spte)) in walk_shadow_page_get_mmio_spte()
3796 reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, in walk_shadow_page_get_mmio_spte()
3812 *sptep = spte; in walk_shadow_page_get_mmio_spte()
3818 u64 spte; in handle_mmio_page_fault() local
3824 reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte); in handle_mmio_page_fault()
3828 if (is_mmio_spte(spte)) { in handle_mmio_page_fault()
3829 gfn_t gfn = get_mmio_spte_gfn(spte); in handle_mmio_page_fault()
3830 unsigned access = get_mmio_spte_access(spte); in handle_mmio_page_fault()
3832 if (!check_mmio_spte(vcpu, spte)) in handle_mmio_page_fault()
3874 u64 spte; in shadow_page_table_clear_flood() local
3880 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { in shadow_page_table_clear_flood()
3882 if (!is_shadow_present_pte(spte)) in shadow_page_table_clear_flood()
4802 struct kvm_mmu_page *sp, u64 *spte, in mmu_pte_write_new_pte() argument
4811 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); in mmu_pte_write_new_pte()
4901 u64 *spte; in get_written_sptes() local
4925 spte = &sp->spt[page_offset / sizeof(*spte)]; in get_written_sptes()
4926 return spte; in get_written_sptes()
4936 u64 entry, gentry, *spte; in kvm_mmu_pte_write() local
4982 spte = get_written_sptes(sp, gpa, &npte); in kvm_mmu_pte_write()
4983 if (!spte) in kvm_mmu_pte_write()
4988 entry = *spte; in kvm_mmu_pte_write()
4989 mmu_page_zap_pte(vcpu->kvm, sp, spte); in kvm_mmu_pte_write()
4993 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); in kvm_mmu_pte_write()
4994 if (need_remote_flush(entry, *spte)) in kvm_mmu_pte_write()
4996 ++spte; in kvm_mmu_pte_write()