• Home
  • Raw
  • Download

Lines Matching refs:vcpu

188 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
190 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
192 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
206 struct kvm_vcpu *vcpu; member
214 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
215 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
216 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
218 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
328 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) in kvm_vcpu_exiting_guest_mode() argument
336 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); in kvm_vcpu_exiting_guest_mode()
376 u32 vcpu; member
425 static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_memslots_id() argument
523 #define vcpu_unimpl(vcpu, fmt, ...) \ argument
525 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
527 #define vcpu_debug(vcpu, fmt, ...) \ argument
528 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
529 #define vcpu_debug_ratelimited(vcpu, fmt, ...) \ argument
530 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \
532 #define vcpu_err(vcpu, fmt, ...) \ argument
533 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
585 struct kvm_vcpu *vcpu = NULL; in kvm_get_vcpu_by_id() local
591 vcpu = kvm_get_vcpu(kvm, id); in kvm_get_vcpu_by_id()
592 if (vcpu && vcpu->vcpu_id == id) in kvm_get_vcpu_by_id()
593 return vcpu; in kvm_get_vcpu_by_id()
594 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_get_vcpu_by_id()
595 if (vcpu->vcpu_id == id) in kvm_get_vcpu_by_id()
596 return vcpu; in kvm_get_vcpu_by_id()
600 static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) in kvm_vcpu_get_idx() argument
602 return vcpu->vcpu_idx; in kvm_vcpu_get_idx()
610 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
611 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
613 void vcpu_load(struct kvm_vcpu *vcpu);
614 void vcpu_put(struct kvm_vcpu *vcpu);
661 static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) in kvm_vcpu_memslots() argument
663 int as_id = kvm_arch_vcpu_memslots_id(vcpu); in kvm_vcpu_memslots()
665 return __kvm_memslots(vcpu->kvm, as_id); in kvm_vcpu_memslots()
776 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
779 struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
780 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
781 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
782 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
783 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
784 int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
786 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
787 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
788 int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
790 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
791 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
792 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
794 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
796 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
798 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
800 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
802 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
804 void kvm_sigset_activate(struct kvm_vcpu *vcpu);
805 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
807 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
808 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
809 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
810 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
811 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
813 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
825 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
854 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
855 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
857 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
860 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
861 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
862 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
864 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
866 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
868 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
870 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
872 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
877 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
878 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
880 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
882 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
883 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
884 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
886 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
887 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
888 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
891 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
899 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
900 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
901 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
902 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
964 static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_wq() argument
967 return vcpu->arch.wqp; in kvm_arch_vcpu_wq()
969 return &vcpu->wq; in kvm_arch_vcpu_wq()
991 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
992 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
1108 static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu, in kvm_vcpu_gpa_to_page() argument
1111 return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa)); in kvm_vcpu_gpa_to_page()
1229 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) in kvm_make_request() argument
1236 set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); in kvm_make_request()
1239 static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) in kvm_request_pending() argument
1241 return READ_ONCE(vcpu->requests); in kvm_request_pending()
1244 static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) in kvm_test_request() argument
1246 return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); in kvm_test_request()
1249 static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu) in kvm_clear_request() argument
1251 clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); in kvm_clear_request()
1254 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) in kvm_check_request() argument
1256 if (kvm_test_request(req, vcpu)) { in kvm_check_request()
1257 kvm_clear_request(req, vcpu); in kvm_check_request()
1340 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) in kvm_vcpu_set_in_spin_loop() argument
1342 vcpu->spin_loop.in_spin_loop = val; in kvm_vcpu_set_in_spin_loop()
1344 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) in kvm_vcpu_set_dy_eligible() argument
1346 vcpu->spin_loop.dy_eligible = val; in kvm_vcpu_set_dy_eligible()
1351 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) in kvm_vcpu_set_in_spin_loop() argument
1355 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) in kvm_vcpu_set_dy_eligible() argument
1374 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) in vcpu_valid_wakeup() argument
1376 return vcpu->valid_wakeup; in vcpu_valid_wakeup()
1380 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) in vcpu_valid_wakeup() argument
1388 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
1390 static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) in kvm_arch_no_poll() argument
1412 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
1414 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_run_pid_change() argument