Lines Matching refs:flush
501 bool flush = false; in mmu_spte_update() local
514 flush = true; in mmu_spte_update()
522 flush = true; in mmu_spte_update()
527 flush = true; in mmu_spte_update()
531 return flush; in mmu_spte_update()
1129 bool flush = false; in __rmap_write_protect() local
1132 flush |= spte_write_protect(sptep, pt_protect); in __rmap_write_protect()
1134 return flush; in __rmap_write_protect()
1168 bool flush = false; in __rmap_clear_dirty() local
1172 flush |= spte_wrprot_for_clear_dirty(sptep); in __rmap_clear_dirty()
1174 flush |= spte_clear_dirty(sptep); in __rmap_clear_dirty()
1176 return flush; in __rmap_clear_dirty()
1199 bool flush = false; in __rmap_set_dirty() local
1203 flush |= spte_set_dirty(sptep); in __rmap_set_dirty()
1205 return flush; in __rmap_set_dirty()
1319 bool flush = false; in kvm_zap_rmapp() local
1325 flush = true; in kvm_zap_rmapp()
1328 return flush; in kvm_zap_rmapp()
1983 bool flush = false; in mmu_sync_children() local
1993 flush = false; in mmu_sync_children()
1997 flush |= kvm_sync_page(vcpu, sp, &invalid_list); in mmu_sync_children()
2001 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); in mmu_sync_children()
2003 flush = false; in mmu_sync_children()
2007 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); in mmu_sync_children()
2033 bool flush = false; in kvm_mmu_get_page() local
2104 flush |= kvm_sync_pages(vcpu, gfn, &invalid_list); in kvm_mmu_get_page()
2108 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); in kvm_mmu_get_page()
2596 bool flush = false; in mmu_set_spte() local
2612 flush = true; in mmu_set_spte()
2617 flush = true; in mmu_set_spte()
2630 if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush) in mmu_set_spte()
5240 bool flush = false; in slot_handle_level_range() local
5245 flush |= fn(kvm, iterator.rmap); in slot_handle_level_range()
5248 if (flush && lock_flush_tlb) { in slot_handle_level_range()
5252 flush = false; in slot_handle_level_range()
5258 if (flush && lock_flush_tlb) { in slot_handle_level_range()
5261 flush = false; in slot_handle_level_range()
5264 return flush; in slot_handle_level_range()
5508 bool flush; in kvm_zap_gfn_range() local
5529 flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end); in kvm_zap_gfn_range()
5530 if (flush) in kvm_zap_gfn_range()
5547 bool flush; in kvm_mmu_slot_remove_write_access() local
5550 flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect, in kvm_mmu_slot_remove_write_access()
5553 flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K); in kvm_mmu_slot_remove_write_access()
5567 if (flush) in kvm_mmu_slot_remove_write_access()
5641 bool flush; in kvm_mmu_slot_leaf_clear_dirty() local
5644 flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false); in kvm_mmu_slot_leaf_clear_dirty()
5646 flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot); in kvm_mmu_slot_leaf_clear_dirty()
5655 if (flush) in kvm_mmu_slot_leaf_clear_dirty()
5663 bool flush; in kvm_mmu_slot_largepage_remove_write_access() local
5666 flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect, in kvm_mmu_slot_largepage_remove_write_access()
5669 flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M); in kvm_mmu_slot_largepage_remove_write_access()
5672 if (flush) in kvm_mmu_slot_largepage_remove_write_access()
5680 bool flush; in kvm_mmu_slot_set_dirty() local
5683 flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false); in kvm_mmu_slot_set_dirty()
5685 flush |= kvm_tdp_mmu_slot_set_dirty(kvm, memslot); in kvm_mmu_slot_set_dirty()
5688 if (flush) in kvm_mmu_slot_set_dirty()
6015 bool flush = false; in kvm_recover_nx_lpages() local
6037 flush |= kvm_tdp_mmu_zap_sp(kvm, sp); in kvm_recover_nx_lpages()
6044 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); in kvm_recover_nx_lpages()
6046 flush = false; in kvm_recover_nx_lpages()
6049 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); in kvm_recover_nx_lpages()