• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef __KVM_HOST_H
2 #define __KVM_HOST_H
3 
4 /*
5  * This work is licensed under the terms of the GNU GPL, version 2.  See
6  * the COPYING file in the top-level directory.
7  */
8 
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/bug.h>
17 #include <linux/mm.h>
18 #include <linux/mmu_notifier.h>
19 #include <linux/preempt.h>
20 #include <linux/msi.h>
21 #include <linux/slab.h>
22 #include <linux/rcupdate.h>
23 #include <linux/ratelimit.h>
24 #include <linux/err.h>
25 #include <linux/irqflags.h>
26 #include <linux/context_tracking.h>
27 #include <asm/signal.h>
28 
29 #include <linux/kvm.h>
30 #include <linux/kvm_para.h>
31 
32 #include <linux/kvm_types.h>
33 
34 #include <asm/kvm_host.h>
35 
36 #ifndef KVM_MMIO_SIZE
37 #define KVM_MMIO_SIZE 8
38 #endif
39 
40 /*
41  * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
42  * in kvm, other bits are visible for userspace which are defined in
43  * include/linux/kvm_h.
44  */
45 #define KVM_MEMSLOT_INVALID	(1UL << 16)
46 #define KVM_MEMSLOT_INCOHERENT	(1UL << 17)
47 
48 /* Two fragments for cross MMIO pages. */
49 #define KVM_MAX_MMIO_FRAGMENTS	2
50 
51 /*
52  * For the normal pfn, the highest 12 bits should be zero,
53  * so we can mask bit 62 ~ bit 52  to indicate the error pfn,
54  * mask bit 63 to indicate the noslot pfn.
55  */
56 #define KVM_PFN_ERR_MASK	(0x7ffULL << 52)
57 #define KVM_PFN_ERR_NOSLOT_MASK	(0xfffULL << 52)
58 #define KVM_PFN_NOSLOT		(0x1ULL << 63)
59 
60 #define KVM_PFN_ERR_FAULT	(KVM_PFN_ERR_MASK)
61 #define KVM_PFN_ERR_HWPOISON	(KVM_PFN_ERR_MASK + 1)
62 #define KVM_PFN_ERR_RO_FAULT	(KVM_PFN_ERR_MASK + 2)
63 
64 /*
65  * error pfns indicate that the gfn is in slot but faild to
66  * translate it to pfn on host.
67  */
is_error_pfn(pfn_t pfn)68 static inline bool is_error_pfn(pfn_t pfn)
69 {
70 	return !!(pfn & KVM_PFN_ERR_MASK);
71 }
72 
73 /*
74  * error_noslot pfns indicate that the gfn can not be
75  * translated to pfn - it is not in slot or failed to
76  * translate it to pfn.
77  */
is_error_noslot_pfn(pfn_t pfn)78 static inline bool is_error_noslot_pfn(pfn_t pfn)
79 {
80 	return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
81 }
82 
83 /* noslot pfn indicates that the gfn is not in slot. */
is_noslot_pfn(pfn_t pfn)84 static inline bool is_noslot_pfn(pfn_t pfn)
85 {
86 	return pfn == KVM_PFN_NOSLOT;
87 }
88 
89 /*
90  * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
91  * provide own defines and kvm_is_error_hva
92  */
93 #ifndef KVM_HVA_ERR_BAD
94 
95 #define KVM_HVA_ERR_BAD		(PAGE_OFFSET)
96 #define KVM_HVA_ERR_RO_BAD	(PAGE_OFFSET + PAGE_SIZE)
97 
kvm_is_error_hva(unsigned long addr)98 static inline bool kvm_is_error_hva(unsigned long addr)
99 {
100 	return addr >= PAGE_OFFSET;
101 }
102 
103 #endif
104 
105 #define KVM_ERR_PTR_BAD_PAGE	(ERR_PTR(-ENOENT))
106 
is_error_page(struct page * page)107 static inline bool is_error_page(struct page *page)
108 {
109 	return IS_ERR(page);
110 }
111 
112 /*
113  * vcpu->requests bit members
114  */
115 #define KVM_REQ_TLB_FLUSH          0
116 #define KVM_REQ_MIGRATE_TIMER      1
117 #define KVM_REQ_REPORT_TPR_ACCESS  2
118 #define KVM_REQ_MMU_RELOAD         3
119 #define KVM_REQ_TRIPLE_FAULT       4
120 #define KVM_REQ_PENDING_TIMER      5
121 #define KVM_REQ_UNHALT             6
122 #define KVM_REQ_MMU_SYNC           7
123 #define KVM_REQ_CLOCK_UPDATE       8
124 #define KVM_REQ_KICK               9
125 #define KVM_REQ_DEACTIVATE_FPU    10
126 #define KVM_REQ_EVENT             11
127 #define KVM_REQ_APF_HALT          12
128 #define KVM_REQ_STEAL_UPDATE      13
129 #define KVM_REQ_NMI               14
130 #define KVM_REQ_PMU               15
131 #define KVM_REQ_PMI               16
132 #define KVM_REQ_WATCHDOG          17
133 #define KVM_REQ_MASTERCLOCK_UPDATE 18
134 #define KVM_REQ_MCLOCK_INPROGRESS 19
135 #define KVM_REQ_EPR_EXIT          20
136 #define KVM_REQ_SCAN_IOAPIC       21
137 #define KVM_REQ_GLOBAL_CLOCK_UPDATE 22
138 #define KVM_REQ_ENABLE_IBS        23
139 #define KVM_REQ_DISABLE_IBS       24
140 #define KVM_REQ_APIC_PAGE_RELOAD  25
141 
142 #define KVM_USERSPACE_IRQ_SOURCE_ID		0
143 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
144 
145 extern struct kmem_cache *kvm_vcpu_cache;
146 
147 extern spinlock_t kvm_lock;
148 extern struct list_head vm_list;
149 
150 struct kvm_io_range {
151 	gpa_t addr;
152 	int len;
153 	struct kvm_io_device *dev;
154 };
155 
156 #define NR_IOBUS_DEVS 1000
157 
158 struct kvm_io_bus {
159 	int dev_count;
160 	int ioeventfd_count;
161 	struct kvm_io_range range[];
162 };
163 
164 enum kvm_bus {
165 	KVM_MMIO_BUS,
166 	KVM_PIO_BUS,
167 	KVM_VIRTIO_CCW_NOTIFY_BUS,
168 	KVM_FAST_MMIO_BUS,
169 	KVM_NR_BUSES
170 };
171 
172 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
173 		     int len, const void *val);
174 int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
175 			    int len, const void *val, long cookie);
176 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
177 		    void *val);
178 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
179 			    int len, struct kvm_io_device *dev);
180 void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
181 			       struct kvm_io_device *dev);
182 
183 #ifdef CONFIG_KVM_ASYNC_PF
184 struct kvm_async_pf {
185 	struct work_struct work;
186 	struct list_head link;
187 	struct list_head queue;
188 	struct kvm_vcpu *vcpu;
189 	struct mm_struct *mm;
190 	gva_t gva;
191 	unsigned long addr;
192 	struct kvm_arch_async_pf arch;
193 	bool   wakeup_all;
194 };
195 
196 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
197 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
198 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
199 		       struct kvm_arch_async_pf *arch);
200 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
201 #endif
202 
203 /*
204  * Carry out a gup that requires IO. Allow the mm to relinquish the mmap
205  * semaphore if the filemap/swap has to wait on a page lock. pagep == NULL
206  * controls whether we retry the gup one more time to completion in that case.
207  * Typically this is called after a FAULT_FLAG_RETRY_NOWAIT in the main tdp
208  * handler.
209  */
210 int kvm_get_user_page_io(struct task_struct *tsk, struct mm_struct *mm,
211 			 unsigned long addr, bool write_fault,
212 			 struct page **pagep);
213 
214 enum {
215 	OUTSIDE_GUEST_MODE,
216 	IN_GUEST_MODE,
217 	EXITING_GUEST_MODE,
218 	READING_SHADOW_PAGE_TABLES,
219 };
220 
221 /*
222  * Sometimes a large or cross-page mmio needs to be broken up into separate
223  * exits for userspace servicing.
224  */
225 struct kvm_mmio_fragment {
226 	gpa_t gpa;
227 	void *data;
228 	unsigned len;
229 };
230 
231 struct kvm_vcpu {
232 	struct kvm *kvm;
233 #ifdef CONFIG_PREEMPT_NOTIFIERS
234 	struct preempt_notifier preempt_notifier;
235 #endif
236 	int cpu;
237 	int vcpu_id;
238 	int srcu_idx;
239 	int mode;
240 	unsigned long requests;
241 	unsigned long guest_debug;
242 
243 	struct mutex mutex;
244 	struct kvm_run *run;
245 
246 	int fpu_active;
247 	int guest_fpu_loaded, guest_xcr0_loaded;
248 	wait_queue_head_t wq;
249 	struct pid *pid;
250 	int sigset_active;
251 	sigset_t sigset;
252 	struct kvm_vcpu_stat stat;
253 
254 #ifdef CONFIG_HAS_IOMEM
255 	int mmio_needed;
256 	int mmio_read_completed;
257 	int mmio_is_write;
258 	int mmio_cur_fragment;
259 	int mmio_nr_fragments;
260 	struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
261 #endif
262 
263 #ifdef CONFIG_KVM_ASYNC_PF
264 	struct {
265 		u32 queued;
266 		struct list_head queue;
267 		struct list_head done;
268 		spinlock_t lock;
269 	} async_pf;
270 #endif
271 
272 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
273 	/*
274 	 * Cpu relax intercept or pause loop exit optimization
275 	 * in_spin_loop: set when a vcpu does a pause loop exit
276 	 *  or cpu relax intercepted.
277 	 * dy_eligible: indicates whether vcpu is eligible for directed yield.
278 	 */
279 	struct {
280 		bool in_spin_loop;
281 		bool dy_eligible;
282 	} spin_loop;
283 #endif
284 	bool preempted;
285 	struct kvm_vcpu_arch arch;
286 };
287 
kvm_vcpu_exiting_guest_mode(struct kvm_vcpu * vcpu)288 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
289 {
290 	return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
291 }
292 
293 /*
294  * Some of the bitops functions do not support too long bitmaps.
295  * This number must be determined not to exceed such limits.
296  */
297 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
298 
299 struct kvm_memory_slot {
300 	gfn_t base_gfn;
301 	unsigned long npages;
302 	unsigned long *dirty_bitmap;
303 	struct kvm_arch_memory_slot arch;
304 	unsigned long userspace_addr;
305 	u32 flags;
306 	short id;
307 };
308 
kvm_dirty_bitmap_bytes(struct kvm_memory_slot * memslot)309 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
310 {
311 	return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
312 }
313 
314 struct kvm_s390_adapter_int {
315 	u64 ind_addr;
316 	u64 summary_addr;
317 	u64 ind_offset;
318 	u32 summary_offset;
319 	u32 adapter_id;
320 };
321 
322 struct kvm_kernel_irq_routing_entry {
323 	u32 gsi;
324 	u32 type;
325 	int (*set)(struct kvm_kernel_irq_routing_entry *e,
326 		   struct kvm *kvm, int irq_source_id, int level,
327 		   bool line_status);
328 	union {
329 		struct {
330 			unsigned irqchip;
331 			unsigned pin;
332 		} irqchip;
333 		struct msi_msg msi;
334 		struct kvm_s390_adapter_int adapter;
335 	};
336 	struct hlist_node link;
337 };
338 
339 #ifndef KVM_PRIVATE_MEM_SLOTS
340 #define KVM_PRIVATE_MEM_SLOTS 0
341 #endif
342 
343 #ifndef KVM_MEM_SLOTS_NUM
344 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
345 #endif
346 
347 /*
348  * Note:
349  * memslots are not sorted by id anymore, please use id_to_memslot()
350  * to get the memslot by its id.
351  */
352 struct kvm_memslots {
353 	u64 generation;
354 	struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
355 	/* The mapping table from slot id to the index in memslots[]. */
356 	short id_to_index[KVM_MEM_SLOTS_NUM];
357 };
358 
359 struct kvm {
360 	spinlock_t mmu_lock;
361 	struct mutex slots_lock;
362 	struct mm_struct *mm; /* userspace tied to this vm */
363 	struct kvm_memslots *memslots;
364 	struct srcu_struct srcu;
365 	struct srcu_struct irq_srcu;
366 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
367 	u32 bsp_vcpu_id;
368 #endif
369 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
370 	atomic_t online_vcpus;
371 	int last_boosted_vcpu;
372 	struct list_head vm_list;
373 	struct mutex lock;
374 	struct kvm_io_bus *buses[KVM_NR_BUSES];
375 #ifdef CONFIG_HAVE_KVM_EVENTFD
376 	struct {
377 		spinlock_t        lock;
378 		struct list_head  items;
379 		struct list_head  resampler_list;
380 		struct mutex      resampler_lock;
381 	} irqfds;
382 	struct list_head ioeventfds;
383 #endif
384 	struct kvm_vm_stat stat;
385 	struct kvm_arch arch;
386 	atomic_t users_count;
387 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
388 	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
389 	spinlock_t ring_lock;
390 	struct list_head coalesced_zones;
391 #endif
392 
393 	struct mutex irq_lock;
394 #ifdef CONFIG_HAVE_KVM_IRQCHIP
395 	/*
396 	 * Update side is protected by irq_lock.
397 	 */
398 	struct kvm_irq_routing_table __rcu *irq_routing;
399 	struct hlist_head mask_notifier_list;
400 #endif
401 #ifdef CONFIG_HAVE_KVM_IRQFD
402 	struct hlist_head irq_ack_notifier_list;
403 #endif
404 
405 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
406 	struct mmu_notifier mmu_notifier;
407 	unsigned long mmu_notifier_seq;
408 	long mmu_notifier_count;
409 #endif
410 	long tlbs_dirty;
411 	struct list_head devices;
412 };
413 
414 #define kvm_err(fmt, ...) \
415 	pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
416 #define kvm_info(fmt, ...) \
417 	pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
418 #define kvm_debug(fmt, ...) \
419 	pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
420 #define kvm_pr_unimpl(fmt, ...) \
421 	pr_err_ratelimited("kvm [%i]: " fmt, \
422 			   task_tgid_nr(current), ## __VA_ARGS__)
423 
424 /* The guest did something we don't support. */
425 #define vcpu_unimpl(vcpu, fmt, ...)					\
426 	kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
427 
kvm_get_vcpu(struct kvm * kvm,int i)428 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
429 {
430 	smp_rmb();
431 	return kvm->vcpus[i];
432 }
433 
434 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
435 	for (idx = 0; \
436 	     idx < atomic_read(&kvm->online_vcpus) && \
437 	     (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
438 	     idx++)
439 
440 #define kvm_for_each_memslot(memslot, slots)	\
441 	for (memslot = &slots->memslots[0];	\
442 	      memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
443 		memslot++)
444 
445 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
446 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
447 
448 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
449 void vcpu_put(struct kvm_vcpu *vcpu);
450 
451 #ifdef CONFIG_HAVE_KVM_IRQFD
452 int kvm_irqfd_init(void);
453 void kvm_irqfd_exit(void);
454 #else
kvm_irqfd_init(void)455 static inline int kvm_irqfd_init(void)
456 {
457 	return 0;
458 }
459 
kvm_irqfd_exit(void)460 static inline void kvm_irqfd_exit(void)
461 {
462 }
463 #endif
464 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
465 		  struct module *module);
466 void kvm_exit(void);
467 
468 void kvm_get_kvm(struct kvm *kvm);
469 void kvm_put_kvm(struct kvm *kvm);
470 
kvm_memslots(struct kvm * kvm)471 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
472 {
473 	return rcu_dereference_check(kvm->memslots,
474 			srcu_read_lock_held(&kvm->srcu)
475 			|| lockdep_is_held(&kvm->slots_lock));
476 }
477 
478 static inline struct kvm_memory_slot *
id_to_memslot(struct kvm_memslots * slots,int id)479 id_to_memslot(struct kvm_memslots *slots, int id)
480 {
481 	int index = slots->id_to_index[id];
482 	struct kvm_memory_slot *slot;
483 
484 	slot = &slots->memslots[index];
485 
486 	WARN_ON(slot->id != id);
487 	return slot;
488 }
489 
490 /*
491  * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
492  * - create a new memory slot
493  * - delete an existing memory slot
494  * - modify an existing memory slot
495  *   -- move it in the guest physical memory space
496  *   -- just change its flags
497  *
498  * Since flags can be changed by some of these operations, the following
499  * differentiation is the best we can do for __kvm_set_memory_region():
500  */
501 enum kvm_mr_change {
502 	KVM_MR_CREATE,
503 	KVM_MR_DELETE,
504 	KVM_MR_MOVE,
505 	KVM_MR_FLAGS_ONLY,
506 };
507 
508 int kvm_set_memory_region(struct kvm *kvm,
509 			  struct kvm_userspace_memory_region *mem);
510 int __kvm_set_memory_region(struct kvm *kvm,
511 			    struct kvm_userspace_memory_region *mem);
512 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
513 			   struct kvm_memory_slot *dont);
514 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
515 			    unsigned long npages);
516 void kvm_arch_memslots_updated(struct kvm *kvm);
517 int kvm_arch_prepare_memory_region(struct kvm *kvm,
518 				struct kvm_memory_slot *memslot,
519 				struct kvm_userspace_memory_region *mem,
520 				enum kvm_mr_change change);
521 void kvm_arch_commit_memory_region(struct kvm *kvm,
522 				struct kvm_userspace_memory_region *mem,
523 				const struct kvm_memory_slot *old,
524 				enum kvm_mr_change change);
525 bool kvm_largepages_enabled(void);
526 void kvm_disable_largepages(void);
527 /* flush all memory translations */
528 void kvm_arch_flush_shadow_all(struct kvm *kvm);
529 /* flush memory translations pointing to 'slot' */
530 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
531 				   struct kvm_memory_slot *slot);
532 
533 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
534 			    int nr_pages);
535 
536 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
537 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
538 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
539 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
540 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
541 				      bool *writable);
542 void kvm_release_page_clean(struct page *page);
543 void kvm_release_page_dirty(struct page *page);
544 void kvm_set_page_accessed(struct page *page);
545 
546 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
547 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
548 		       bool write_fault, bool *writable);
549 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
550 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
551 		      bool *writable);
552 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
553 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
554 
555 void kvm_release_pfn_clean(pfn_t pfn);
556 void kvm_set_pfn_dirty(pfn_t pfn);
557 void kvm_set_pfn_accessed(pfn_t pfn);
558 void kvm_get_pfn(pfn_t pfn);
559 
560 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
561 			int len);
562 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
563 			  unsigned long len);
564 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
565 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
566 			   void *data, unsigned long len);
567 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
568 			 int offset, int len);
569 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
570 		    unsigned long len);
571 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
572 			   void *data, unsigned long len);
573 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
574 			      gpa_t gpa, unsigned long len);
575 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
576 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
577 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
578 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
579 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
580 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
581 
582 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
583 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
584 int kvm_vcpu_yield_to(struct kvm_vcpu *target);
585 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
586 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
587 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
588 
589 void kvm_flush_remote_tlbs(struct kvm *kvm);
590 void kvm_reload_remote_mmus(struct kvm *kvm);
591 void kvm_make_mclock_inprogress_request(struct kvm *kvm);
592 void kvm_make_scan_ioapic_request(struct kvm *kvm);
593 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
594 
595 long kvm_arch_dev_ioctl(struct file *filp,
596 			unsigned int ioctl, unsigned long arg);
597 long kvm_arch_vcpu_ioctl(struct file *filp,
598 			 unsigned int ioctl, unsigned long arg);
599 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
600 
601 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
602 
603 int kvm_get_dirty_log(struct kvm *kvm,
604 			struct kvm_dirty_log *log, int *is_dirty);
605 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
606 				struct kvm_dirty_log *log);
607 
608 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
609 			bool line_status);
610 long kvm_arch_vm_ioctl(struct file *filp,
611 		       unsigned int ioctl, unsigned long arg);
612 
613 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
614 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
615 
616 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
617 				    struct kvm_translation *tr);
618 
619 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
620 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
621 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
622 				  struct kvm_sregs *sregs);
623 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
624 				  struct kvm_sregs *sregs);
625 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
626 				    struct kvm_mp_state *mp_state);
627 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
628 				    struct kvm_mp_state *mp_state);
629 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
630 					struct kvm_guest_debug *dbg);
631 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
632 
633 int kvm_arch_init(void *opaque);
634 void kvm_arch_exit(void);
635 
636 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
637 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
638 
639 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
640 
641 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
642 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
643 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
644 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
645 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
646 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
647 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
648 
649 int kvm_arch_hardware_enable(void);
650 void kvm_arch_hardware_disable(void);
651 int kvm_arch_hardware_setup(void);
652 void kvm_arch_hardware_unsetup(void);
653 void kvm_arch_check_processor_compat(void *rtn);
654 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
655 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
656 
657 void *kvm_kvzalloc(unsigned long size);
658 void kvm_kvfree(const void *addr);
659 
660 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
kvm_arch_alloc_vm(void)661 static inline struct kvm *kvm_arch_alloc_vm(void)
662 {
663 	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
664 }
665 
kvm_arch_free_vm(struct kvm * kvm)666 static inline void kvm_arch_free_vm(struct kvm *kvm)
667 {
668 	kfree(kvm);
669 }
670 #endif
671 
672 #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
673 void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
674 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
675 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
676 #else
kvm_arch_register_noncoherent_dma(struct kvm * kvm)677 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
678 {
679 }
680 
kvm_arch_unregister_noncoherent_dma(struct kvm * kvm)681 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
682 {
683 }
684 
kvm_arch_has_noncoherent_dma(struct kvm * kvm)685 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
686 {
687 	return false;
688 }
689 #endif
690 
kvm_arch_vcpu_wq(struct kvm_vcpu * vcpu)691 static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
692 {
693 #ifdef __KVM_HAVE_ARCH_WQP
694 	return vcpu->arch.wqp;
695 #else
696 	return &vcpu->wq;
697 #endif
698 }
699 
700 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
701 void kvm_arch_destroy_vm(struct kvm *kvm);
702 void kvm_arch_sync_events(struct kvm *kvm);
703 
704 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
705 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
706 
707 bool kvm_is_reserved_pfn(pfn_t pfn);
708 
709 struct kvm_irq_ack_notifier {
710 	struct hlist_node link;
711 	unsigned gsi;
712 	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
713 };
714 
715 struct kvm_assigned_dev_kernel {
716 	struct kvm_irq_ack_notifier ack_notifier;
717 	struct list_head list;
718 	int assigned_dev_id;
719 	int host_segnr;
720 	int host_busnr;
721 	int host_devfn;
722 	unsigned int entries_nr;
723 	int host_irq;
724 	bool host_irq_disabled;
725 	bool pci_2_3;
726 	struct msix_entry *host_msix_entries;
727 	int guest_irq;
728 	struct msix_entry *guest_msix_entries;
729 	unsigned long irq_requested_type;
730 	int irq_source_id;
731 	int flags;
732 	struct pci_dev *dev;
733 	struct kvm *kvm;
734 	spinlock_t intx_lock;
735 	spinlock_t intx_mask_lock;
736 	char irq_name[32];
737 	struct pci_saved_state *pci_saved_state;
738 };
739 
740 struct kvm_irq_mask_notifier {
741 	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
742 	int irq;
743 	struct hlist_node link;
744 };
745 
746 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
747 				    struct kvm_irq_mask_notifier *kimn);
748 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
749 				      struct kvm_irq_mask_notifier *kimn);
750 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
751 			     bool mask);
752 
753 int kvm_irq_map_gsi(struct kvm *kvm,
754 		    struct kvm_kernel_irq_routing_entry *entries, int gsi);
755 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
756 
757 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
758 		bool line_status);
759 int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
760 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
761 		int irq_source_id, int level, bool line_status);
762 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
763 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
764 void kvm_register_irq_ack_notifier(struct kvm *kvm,
765 				   struct kvm_irq_ack_notifier *kian);
766 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
767 				   struct kvm_irq_ack_notifier *kian);
768 int kvm_request_irq_source_id(struct kvm *kvm);
769 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
770 
771 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
772 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
773 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
774 int kvm_iommu_map_guest(struct kvm *kvm);
775 int kvm_iommu_unmap_guest(struct kvm *kvm);
776 int kvm_assign_device(struct kvm *kvm,
777 		      struct kvm_assigned_dev_kernel *assigned_dev);
778 int kvm_deassign_device(struct kvm *kvm,
779 			struct kvm_assigned_dev_kernel *assigned_dev);
780 #else
kvm_iommu_map_pages(struct kvm * kvm,struct kvm_memory_slot * slot)781 static inline int kvm_iommu_map_pages(struct kvm *kvm,
782 				      struct kvm_memory_slot *slot)
783 {
784 	return 0;
785 }
786 
kvm_iommu_unmap_pages(struct kvm * kvm,struct kvm_memory_slot * slot)787 static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
788 					 struct kvm_memory_slot *slot)
789 {
790 }
791 
kvm_iommu_unmap_guest(struct kvm * kvm)792 static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
793 {
794 	return 0;
795 }
796 #endif
797 
kvm_guest_enter(void)798 static inline void kvm_guest_enter(void)
799 {
800 	unsigned long flags;
801 
802 	BUG_ON(preemptible());
803 
804 	local_irq_save(flags);
805 	guest_enter();
806 	local_irq_restore(flags);
807 
808 	/* KVM does not hold any references to rcu protected data when it
809 	 * switches CPU into a guest mode. In fact switching to a guest mode
810 	 * is very similar to exiting to userspace from rcu point of view. In
811 	 * addition CPU may stay in a guest mode for quite a long time (up to
812 	 * one time slice). Lets treat guest mode as quiescent state, just like
813 	 * we do with user-mode execution.
814 	 */
815 	rcu_virt_note_context_switch(smp_processor_id());
816 }
817 
kvm_guest_exit(void)818 static inline void kvm_guest_exit(void)
819 {
820 	unsigned long flags;
821 
822 	local_irq_save(flags);
823 	guest_exit();
824 	local_irq_restore(flags);
825 }
826 
827 /*
828  * search_memslots() and __gfn_to_memslot() are here because they are
829  * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
830  * gfn_to_memslot() itself isn't here as an inline because that would
831  * bloat other code too much.
832  */
833 static inline struct kvm_memory_slot *
search_memslots(struct kvm_memslots * slots,gfn_t gfn)834 search_memslots(struct kvm_memslots *slots, gfn_t gfn)
835 {
836 	struct kvm_memory_slot *memslot;
837 
838 	kvm_for_each_memslot(memslot, slots)
839 		if (gfn >= memslot->base_gfn &&
840 		      gfn < memslot->base_gfn + memslot->npages)
841 			return memslot;
842 
843 	return NULL;
844 }
845 
846 static inline struct kvm_memory_slot *
__gfn_to_memslot(struct kvm_memslots * slots,gfn_t gfn)847 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
848 {
849 	return search_memslots(slots, gfn);
850 }
851 
852 static inline unsigned long
__gfn_to_hva_memslot(struct kvm_memory_slot * slot,gfn_t gfn)853 __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
854 {
855 	return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
856 }
857 
memslot_id(struct kvm * kvm,gfn_t gfn)858 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
859 {
860 	return gfn_to_memslot(kvm, gfn)->id;
861 }
862 
863 static inline gfn_t
hva_to_gfn_memslot(unsigned long hva,struct kvm_memory_slot * slot)864 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
865 {
866 	gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
867 
868 	return slot->base_gfn + gfn_offset;
869 }
870 
gfn_to_gpa(gfn_t gfn)871 static inline gpa_t gfn_to_gpa(gfn_t gfn)
872 {
873 	return (gpa_t)gfn << PAGE_SHIFT;
874 }
875 
gpa_to_gfn(gpa_t gpa)876 static inline gfn_t gpa_to_gfn(gpa_t gpa)
877 {
878 	return (gfn_t)(gpa >> PAGE_SHIFT);
879 }
880 
pfn_to_hpa(pfn_t pfn)881 static inline hpa_t pfn_to_hpa(pfn_t pfn)
882 {
883 	return (hpa_t)pfn << PAGE_SHIFT;
884 }
885 
kvm_is_error_gpa(struct kvm * kvm,gpa_t gpa)886 static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
887 {
888 	unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
889 
890 	return kvm_is_error_hva(hva);
891 }
892 
kvm_migrate_timers(struct kvm_vcpu * vcpu)893 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
894 {
895 	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
896 }
897 
898 enum kvm_stat_kind {
899 	KVM_STAT_VM,
900 	KVM_STAT_VCPU,
901 };
902 
903 struct kvm_stats_debugfs_item {
904 	const char *name;
905 	int offset;
906 	enum kvm_stat_kind kind;
907 	struct dentry *dentry;
908 };
909 extern struct kvm_stats_debugfs_item debugfs_entries[];
910 extern struct dentry *kvm_debugfs_dir;
911 
912 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_retry(struct kvm * kvm,unsigned long mmu_seq)913 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
914 {
915 	if (unlikely(kvm->mmu_notifier_count))
916 		return 1;
917 	/*
918 	 * Ensure the read of mmu_notifier_count happens before the read
919 	 * of mmu_notifier_seq.  This interacts with the smp_wmb() in
920 	 * mmu_notifier_invalidate_range_end to make sure that the caller
921 	 * either sees the old (non-zero) value of mmu_notifier_count or
922 	 * the new (incremented) value of mmu_notifier_seq.
923 	 * PowerPC Book3s HV KVM calls this under a per-page lock
924 	 * rather than under kvm->mmu_lock, for scalability, so
925 	 * can't rely on kvm->mmu_lock to keep things ordered.
926 	 */
927 	smp_rmb();
928 	if (kvm->mmu_notifier_seq != mmu_seq)
929 		return 1;
930 	return 0;
931 }
932 #endif
933 
934 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
935 
936 #ifdef CONFIG_S390
937 #define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
938 #else
939 #define KVM_MAX_IRQ_ROUTES 1024
940 #endif
941 
942 int kvm_setup_default_irq_routing(struct kvm *kvm);
943 int kvm_set_irq_routing(struct kvm *kvm,
944 			const struct kvm_irq_routing_entry *entries,
945 			unsigned nr,
946 			unsigned flags);
947 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
948 			  const struct kvm_irq_routing_entry *ue);
949 void kvm_free_irq_routing(struct kvm *kvm);
950 
951 #else
952 
kvm_free_irq_routing(struct kvm * kvm)953 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
954 
955 #endif
956 
957 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
958 
959 #ifdef CONFIG_HAVE_KVM_EVENTFD
960 
961 void kvm_eventfd_init(struct kvm *kvm);
962 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
963 
964 #ifdef CONFIG_HAVE_KVM_IRQFD
965 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
966 void kvm_irqfd_release(struct kvm *kvm);
967 void kvm_irq_routing_update(struct kvm *);
968 #else
kvm_irqfd(struct kvm * kvm,struct kvm_irqfd * args)969 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
970 {
971 	return -EINVAL;
972 }
973 
kvm_irqfd_release(struct kvm * kvm)974 static inline void kvm_irqfd_release(struct kvm *kvm) {}
975 #endif
976 
977 #else
978 
kvm_eventfd_init(struct kvm * kvm)979 static inline void kvm_eventfd_init(struct kvm *kvm) {}
980 
kvm_irqfd(struct kvm * kvm,struct kvm_irqfd * args)981 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
982 {
983 	return -EINVAL;
984 }
985 
kvm_irqfd_release(struct kvm * kvm)986 static inline void kvm_irqfd_release(struct kvm *kvm) {}
987 
988 #ifdef CONFIG_HAVE_KVM_IRQCHIP
kvm_irq_routing_update(struct kvm * kvm)989 static inline void kvm_irq_routing_update(struct kvm *kvm)
990 {
991 }
992 #endif
993 
kvm_ioeventfd(struct kvm * kvm,struct kvm_ioeventfd * args)994 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
995 {
996 	return -ENOSYS;
997 }
998 
999 #endif /* CONFIG_HAVE_KVM_EVENTFD */
1000 
1001 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
kvm_vcpu_is_bsp(struct kvm_vcpu * vcpu)1002 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
1003 {
1004 	return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
1005 }
1006 
1007 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
1008 
1009 #else
1010 
kvm_vcpu_compatible(struct kvm_vcpu * vcpu)1011 static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
1012 
1013 #endif
1014 
1015 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
1016 
1017 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
1018 				  unsigned long arg);
1019 
1020 void kvm_free_all_assigned_devices(struct kvm *kvm);
1021 
1022 #else
1023 
kvm_vm_ioctl_assigned_device(struct kvm * kvm,unsigned ioctl,unsigned long arg)1024 static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
1025 						unsigned long arg)
1026 {
1027 	return -ENOTTY;
1028 }
1029 
kvm_free_all_assigned_devices(struct kvm * kvm)1030 static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {}
1031 
1032 #endif
1033 
kvm_make_request(int req,struct kvm_vcpu * vcpu)1034 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1035 {
1036 	set_bit(req, &vcpu->requests);
1037 }
1038 
kvm_check_request(int req,struct kvm_vcpu * vcpu)1039 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1040 {
1041 	if (test_bit(req, &vcpu->requests)) {
1042 		clear_bit(req, &vcpu->requests);
1043 		return true;
1044 	} else {
1045 		return false;
1046 	}
1047 }
1048 
1049 extern bool kvm_rebooting;
1050 
1051 struct kvm_device {
1052 	struct kvm_device_ops *ops;
1053 	struct kvm *kvm;
1054 	void *private;
1055 	struct list_head vm_node;
1056 };
1057 
1058 /* create, destroy, and name are mandatory */
1059 struct kvm_device_ops {
1060 	const char *name;
1061 	int (*create)(struct kvm_device *dev, u32 type);
1062 
1063 	/*
1064 	 * Destroy is responsible for freeing dev.
1065 	 *
1066 	 * Destroy may be called before or after destructors are called
1067 	 * on emulated I/O regions, depending on whether a reference is
1068 	 * held by a vcpu or other kvm component that gets destroyed
1069 	 * after the emulated I/O.
1070 	 */
1071 	void (*destroy)(struct kvm_device *dev);
1072 
1073 	int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1074 	int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1075 	int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1076 	long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1077 		      unsigned long arg);
1078 };
1079 
1080 void kvm_device_get(struct kvm_device *dev);
1081 void kvm_device_put(struct kvm_device *dev);
1082 struct kvm_device *kvm_device_from_filp(struct file *filp);
1083 int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
1084 void kvm_unregister_device_ops(u32 type);
1085 
1086 extern struct kvm_device_ops kvm_mpic_ops;
1087 extern struct kvm_device_ops kvm_xics_ops;
1088 
1089 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1090 
kvm_vcpu_set_in_spin_loop(struct kvm_vcpu * vcpu,bool val)1091 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1092 {
1093 	vcpu->spin_loop.in_spin_loop = val;
1094 }
kvm_vcpu_set_dy_eligible(struct kvm_vcpu * vcpu,bool val)1095 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1096 {
1097 	vcpu->spin_loop.dy_eligible = val;
1098 }
1099 
1100 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1101 
kvm_vcpu_set_in_spin_loop(struct kvm_vcpu * vcpu,bool val)1102 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1103 {
1104 }
1105 
kvm_vcpu_set_dy_eligible(struct kvm_vcpu * vcpu,bool val)1106 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1107 {
1108 }
1109 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1110 #endif
1111 
1112