Searched refs:VALID_PAGE (Results 1 – 7 of 7) sorted by relevance
54 #define VALID_PAGE(page) (((page) - mem_map) < max_mapnr) macro
74 #define VALID_PAGE(page) (((page) - mem_map) < max_mapnr) macro
62 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_spte_walk()76 if (root && VALID_PAGE(root)) { in mmu_spte_walk()
2643 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_free_roots()2708 ASSERT(!VALID_PAGE(root)); in mmu_alloc_direct_roots()2746 ASSERT(!VALID_PAGE(root)); in mmu_alloc_shadow_roots()2771 ASSERT(!VALID_PAGE(root)); in mmu_alloc_shadow_roots()2839 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_sync_roots()2854 if (root && VALID_PAGE(root)) { in mmu_sync_roots()2991 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); in nonpaging_page_fault()3060 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); in tdp_page_fault()3352 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); in kvm_init_shadow_mmu()3435 if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) in destroy_kvm_mmu()[all …]
2869 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in vmx_flush_tlb()
172 # define VALID_PAGE(page) ((page - mem_map) < max_mapnr) macro
64 #define VALID_PAGE(x) ((x) != INVALID_PAGE) macro