Home
last modified time | relevance | path

Searched refs:spte (Results 1 – 13 of 13) sorted by relevance

/kernel/linux/linux-5.10/arch/x86/kvm/mmu/
Dspte.c89 u64 spte = 0; in make_spte() local
93 spte |= SPTE_AD_DISABLED_MASK; in make_spte()
95 spte |= SPTE_AD_WRPROT_ONLY_MASK; in make_spte()
103 spte |= shadow_present_mask; in make_spte()
105 spte |= spte_shadow_accessed_mask(spte); in make_spte()
113 spte |= shadow_x_mask; in make_spte()
115 spte |= shadow_nx_mask; in make_spte()
118 spte |= shadow_user_mask; in make_spte()
121 spte |= PT_PAGE_SIZE_MASK; in make_spte()
123 spte |= kvm_x86_ops.get_mt_mask(vcpu, gfn, in make_spte()
[all …]
Dspte.h143 static inline bool is_mmio_spte(u64 spte) in is_mmio_spte() argument
145 return (spte & SPTE_SPECIAL_MASK) == SPTE_MMIO_MASK; in is_mmio_spte()
153 static inline bool spte_ad_enabled(u64 spte) in spte_ad_enabled() argument
155 MMU_WARN_ON(is_mmio_spte(spte)); in spte_ad_enabled()
156 return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK; in spte_ad_enabled()
159 static inline bool spte_ad_need_write_protect(u64 spte) in spte_ad_need_write_protect() argument
161 MMU_WARN_ON(is_mmio_spte(spte)); in spte_ad_need_write_protect()
162 return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK; in spte_ad_need_write_protect()
165 static inline u64 spte_shadow_accessed_mask(u64 spte) in spte_shadow_accessed_mask() argument
167 MMU_WARN_ON(is_mmio_spte(spte)); in spte_shadow_accessed_mask()
[all …]
Dmmutrace.h205 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
206 TP_ARGS(sptep, gfn, spte),
218 __entry->access = spte & ACC_ALL;
219 __entry->gen = get_mmio_spte_generation(spte);
305 TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen),
306 TP_ARGS(spte, kvm_gen, spte_gen),
311 __field(u64, spte)
317 __entry->spte = spte;
320 TP_printk("spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte,
333 __field(u64, spte)
[all …]
Dmmu.c166 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ argument
169 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
170 __shadow_walk_next(&(_walker), spte))
176 static void mmu_spte_set(u64 *sptep, u64 spte);
226 static gfn_t get_mmio_spte_gfn(u64 spte) in get_mmio_spte_gfn() argument
228 u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; in get_mmio_spte_gfn()
230 gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN) in get_mmio_spte_gfn()
236 static unsigned get_mmio_spte_access(u64 spte) in get_mmio_spte_access() argument
238 return spte & shadow_mmio_access_mask; in get_mmio_spte_access()
252 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) in check_mmio_spte() argument
[all …]
Dtdp_iter.c51 u64 *spte_to_child_pt(u64 spte, int level) in spte_to_child_pt() argument
57 if (!is_shadow_present_pte(spte) || is_last_spte(spte, level)) in spte_to_child_pt()
60 return __va(spte_to_pfn(spte) << PAGE_SHIFT); in spte_to_child_pt()
Dpaging_tmpl.h187 struct kvm_mmu_page *sp, u64 *spte, in FNAME()
204 drop_spte(vcpu->kvm, spte); in FNAME()
529 u64 *spte, pt_element_t gpte, bool no_dirty_log) in FNAME()
535 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) in FNAME()
538 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); in FNAME()
552 mmu_set_spte(vcpu, spte, pte_access, false, PG_LEVEL_4K, gfn, pfn, in FNAME()
560 u64 *spte, const void *pte) in FNAME()
564 FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false); in FNAME()
595 u64 *spte; in FNAME() local
607 spte = sp->spt + i; in FNAME()
[all …]
Dmmu_internal.h139 void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
/kernel/linux/linux-5.10/Documentation/virt/kvm/
Dlocking.rst39 the spte.
42 SPTE_MMU_WRITEABLE bit on the spte:
49 On fast page fault path, we will use cmpxchg to atomically set the spte W
50 bit if spte.SPTE_HOST_WRITEABLE = 1 and spte.SPTE_WRITE_PROTECT = 1, or
67 | spte is the shadow page table entry corresponding with gpte and |
68 | spte = pfn1 |
76 | old_spte = *spte; | |
80 | | spte = 0; |
87 | | spte = pfn1; |
91 | if (cmpxchg(spte, old_spte, old_spte+W) |
[all …]
Dmmu.rst55 spte shadow pte (referring to pfns)
125 A nonleaf spte allows the hardware mmu to reach the leaf pages and
128 A leaf spte corresponds to either one or two translations encoded into
232 parent_ptes bit 0 is zero, only one spte points at this page and
233 parent_ptes points at this single spte, otherwise, there exists multiple
250 Only present on 32-bit hosts, where a 64-bit spte cannot be written
315 - check for valid generation number in the spte (see "Fast invalidation of
334 - walk the shadow page table to find the spte for the translation,
337 - If this is an mmio request, cache the mmio info to the spte and set some
338 reserved bit on the spte (see callers of kvm_mmu_set_mmio_spte_mask)
[all …]
/kernel/linux/linux-5.10/arch/x86/kvm/
DMakefile19 mmu/spte.o mmu/tdp_iter.o mmu/tdp_mmu.o
/kernel/linux/linux-5.10/arch/s390/mm/
Dpgtable.c652 pte_t spte, tpte; in ptep_shadow_pte() local
658 spte = *sptep; in ptep_shadow_pte()
659 if (!(pte_val(spte) & _PAGE_INVALID) && in ptep_shadow_pte()
660 !((pte_val(spte) & _PAGE_PROTECT) && in ptep_shadow_pte()
664 pte_val(tpte) = (pte_val(spte) & PAGE_MASK) | in ptep_shadow_pte()
/kernel/linux/linux-5.10/mm/
Dhugetlb.c5421 pte_t *spte = NULL; in huge_pmd_share() local
5435 spte = huge_pte_offset(svma->vm_mm, saddr, in huge_pmd_share()
5437 if (spte) { in huge_pmd_share()
5438 get_page(virt_to_page(spte)); in huge_pmd_share()
5444 if (!spte) in huge_pmd_share()
5447 ptl = huge_pte_lock(hstate_vma(vma), mm, spte); in huge_pmd_share()
5450 (pmd_t *)((unsigned long)spte & PAGE_MASK)); in huge_pmd_share()
5453 put_page(virt_to_page(spte)); in huge_pmd_share()
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dvmm.c448 bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES; in nvkm_vmm_ref_hwpt() local
451 if (spte != next) in nvkm_vmm_ref_hwpt()
455 if (!spte) { in nvkm_vmm_ref_hwpt()