Searched refs:pv_ops (Results 1 – 13 of 13) sorted by relevance
80 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; in hv_init_spinlocks()81 pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); in hv_init_spinlocks()82 pv_ops.lock.wait = hv_qlock_wait; in hv_init_spinlocks()83 pv_ops.lock.kick = hv_qlock_kick; in hv_init_spinlocks()84 pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted); in hv_init_spinlocks()
241 pv_ops.mmu.flush_tlb_multi = hyperv_flush_tlb_multi; in hyperv_setup_mmu_ops()242 pv_ops.mmu.tlb_remove_table = tlb_remove_table; in hyperv_setup_mmu_ops()
139 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; in xen_init_spinlocks()140 pv_ops.lock.queued_spin_unlock = in xen_init_spinlocks()142 pv_ops.lock.wait = xen_qlock_wait; in xen_init_spinlocks()143 pv_ops.lock.kick = xen_qlock_kick; in xen_init_spinlocks()144 pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen); in xen_init_spinlocks()
1014 pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); in xen_setup_vcpu_info_placement()1015 pv_ops.irq.irq_disable = in xen_setup_vcpu_info_placement()1017 pv_ops.irq.irq_enable = in xen_setup_vcpu_info_placement()1019 pv_ops.mmu.read_cr2 = in xen_setup_vcpu_info_placement()1179 pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry_boot; in xen_setup_gdt()1180 pv_ops.cpu.load_gdt = xen_load_gdt_boot; in xen_setup_gdt()1184 pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry; in xen_setup_gdt()1185 pv_ops.cpu.load_gdt = xen_load_gdt; in xen_setup_gdt()1215 pv_ops.cpu = xen_cpu_ops; in xen_start_kernel()
76 pv_ops.mmu.exit_mmap = xen_hvm_exit_mmap; in xen_hvm_init_mmu_ops()
108 pv_ops.irq = xen_irq_ops; in xen_init_irq_ops()
2055 pv_ops.mmu.set_pte = xen_set_pte; in xen_post_allocator_init()2056 pv_ops.mmu.set_pmd = xen_set_pmd; in xen_post_allocator_init()2057 pv_ops.mmu.set_pud = xen_set_pud; in xen_post_allocator_init()2058 pv_ops.mmu.set_p4d = xen_set_p4d; in xen_post_allocator_init()2062 pv_ops.mmu.alloc_pte = xen_alloc_pte; in xen_post_allocator_init()2063 pv_ops.mmu.alloc_pmd = xen_alloc_pmd; in xen_post_allocator_init()2064 pv_ops.mmu.release_pte = xen_release_pte; in xen_post_allocator_init()2065 pv_ops.mmu.release_pmd = xen_release_pmd; in xen_post_allocator_init()2066 pv_ops.mmu.alloc_pud = xen_alloc_pud; in xen_post_allocator_init()2067 pv_ops.mmu.release_pud = xen_release_pud; in xen_post_allocator_init()[all …]
20 return pv_ops.lock.queued_spin_unlock.func == in pv_is_native_spin_unlock()32 return pv_ops.lock.vcpu_is_preempted.func == in pv_is_native_vcpu_is_preempted()
311 pv_ops.cpu.io_delay = kvm_io_delay; in paravirt_ops_setup()749 pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi; in kvm_guest_init()750 pv_ops.mmu.tlb_remove_table = tlb_remove_table; in kvm_guest_init()994 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; in kvm_spinlock_init()995 pv_ops.lock.queued_spin_unlock = in kvm_spinlock_init()997 pv_ops.lock.wait = kvm_wait; in kvm_spinlock_init()998 pv_ops.lock.kick = kvm_kick_cpu; in kvm_spinlock_init()1001 pv_ops.lock.vcpu_is_preempted = in kvm_spinlock_init()
109 void *opfunc = *((void **)&pv_ops + type); in paravirt_patch()241 struct paravirt_patch_template pv_ops = { variable382 EXPORT_SYMBOL(pv_ops);
55 pv_ops.cpu.io_delay(); in slow_down_io()57 pv_ops.cpu.io_delay(); in slow_down_io()58 pv_ops.cpu.io_delay(); in slow_down_io()59 pv_ops.cpu.io_delay(); in slow_down_io()576 pv_ops.mmu.set_fixmap(idx, phys, flags); in __set_fixmap()755 call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);)
273 extern struct paravirt_patch_template pv_ops;281 [paravirt_opptr] "i" (&(pv_ops.op))426 #define PVOP_TEST_NULL(op) BUG_ON(pv_ops.op == NULL)428 #define PVOP_TEST_NULL(op) ((void)pv_ops.op)
332 pv_ops.cpu.io_delay = paravirt_nop; in vmware_paravirt_ops_setup()