Lines Matching refs:mmu_lock
2000 if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) { in mmu_sync_children()
2002 cond_resched_lock(&vcpu->kvm->mmu_lock); in mmu_sync_children()
2454 spin_lock(&kvm->mmu_lock); in kvm_mmu_change_mmu_pages()
2465 spin_unlock(&kvm->mmu_lock); in kvm_mmu_change_mmu_pages()
2476 spin_lock(&kvm->mmu_lock); in kvm_mmu_unprotect_page()
2484 spin_unlock(&kvm->mmu_lock); in kvm_mmu_unprotect_page()
3176 spin_lock(&kvm->mmu_lock); in kvm_mmu_free_roots()
3199 spin_unlock(&kvm->mmu_lock); in kvm_mmu_free_roots()
3220 spin_lock(&vcpu->kvm->mmu_lock); in mmu_alloc_root()
3223 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_root()
3229 spin_unlock(&vcpu->kvm->mmu_lock); in mmu_alloc_root()
3412 spin_lock(&vcpu->kvm->mmu_lock); in kvm_mmu_sync_roots()
3418 spin_unlock(&vcpu->kvm->mmu_lock); in kvm_mmu_sync_roots()
3422 spin_lock(&vcpu->kvm->mmu_lock); in kvm_mmu_sync_roots()
3436 spin_unlock(&vcpu->kvm->mmu_lock); in kvm_mmu_sync_roots()
3723 spin_lock(&vcpu->kvm->mmu_lock); in direct_page_fault()
3738 spin_unlock(&vcpu->kvm->mmu_lock); in direct_page_fault()
5002 spin_lock(&vcpu->kvm->mmu_lock); in kvm_mmu_pte_write()
5034 spin_unlock(&vcpu->kvm->mmu_lock); in kvm_mmu_pte_write()
5232 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { in slot_handle_level_range()
5239 cond_resched_lock(&kvm->mmu_lock); in slot_handle_level_range()
5391 cond_resched_lock(&kvm->mmu_lock)) { in kvm_zap_obsolete_pages()
5424 spin_lock(&kvm->mmu_lock); in kvm_mmu_zap_all_fast()
5451 spin_unlock(&kvm->mmu_lock); in kvm_mmu_zap_all_fast()
5493 spin_lock(&kvm->mmu_lock); in kvm_zap_gfn_range()
5517 spin_unlock(&kvm->mmu_lock); in kvm_zap_gfn_range()
5532 spin_lock(&kvm->mmu_lock); in kvm_mmu_slot_remove_write_access()
5537 spin_unlock(&kvm->mmu_lock); in kvm_mmu_slot_remove_write_access()
5597 spin_lock(&kvm->mmu_lock); in kvm_mmu_zap_collapsible_sptes()
5603 spin_unlock(&kvm->mmu_lock); in kvm_mmu_zap_collapsible_sptes()
5626 spin_lock(&kvm->mmu_lock); in kvm_mmu_slot_leaf_clear_dirty()
5630 spin_unlock(&kvm->mmu_lock); in kvm_mmu_slot_leaf_clear_dirty()
5648 spin_lock(&kvm->mmu_lock); in kvm_mmu_slot_largepage_remove_write_access()
5653 spin_unlock(&kvm->mmu_lock); in kvm_mmu_slot_largepage_remove_write_access()
5665 spin_lock(&kvm->mmu_lock); in kvm_mmu_slot_set_dirty()
5669 spin_unlock(&kvm->mmu_lock); in kvm_mmu_slot_set_dirty()
5682 spin_lock(&kvm->mmu_lock); in kvm_mmu_zap_all()
5689 if (cond_resched_lock(&kvm->mmu_lock)) in kvm_mmu_zap_all()
5698 spin_unlock(&kvm->mmu_lock); in kvm_mmu_zap_all()
5758 spin_lock(&kvm->mmu_lock); in mmu_shrink_scan()
5769 spin_unlock(&kvm->mmu_lock); in mmu_shrink_scan()
5990 spin_lock(&kvm->mmu_lock); in kvm_recover_nx_lpages()
6014 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { in kvm_recover_nx_lpages()
6016 cond_resched_lock(&kvm->mmu_lock); in kvm_recover_nx_lpages()
6022 spin_unlock(&kvm->mmu_lock); in kvm_recover_nx_lpages()