1 #ifndef __KVM_HOST_H
2 #define __KVM_HOST_H
3
4 /*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/bug.h>
17 #include <linux/mm.h>
18 #include <linux/mmu_notifier.h>
19 #include <linux/preempt.h>
20 #include <linux/msi.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ratelimit.h>
25 #include <linux/err.h>
26 #include <linux/irqflags.h>
27 #include <linux/context_tracking.h>
28 #include <linux/irqbypass.h>
29 #include <linux/swait.h>
30 #include <linux/refcount.h>
31 #include <linux/nospec.h>
32 #include <asm/signal.h>
33
34 #include <linux/kvm.h>
35 #include <linux/kvm_para.h>
36
37 #include <linux/kvm_types.h>
38
39 #include <asm/kvm_host.h>
40
41 #ifndef KVM_MAX_VCPU_ID
42 #define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
43 #endif
44
45 /*
46 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
47 * in kvm, other bits are visible for userspace which are defined in
48 * include/linux/kvm_h.
49 */
50 #define KVM_MEMSLOT_INVALID (1UL << 16)
51
52 /* Two fragments for cross MMIO pages. */
53 #define KVM_MAX_MMIO_FRAGMENTS 2
54
55 #ifndef KVM_ADDRESS_SPACE_NUM
56 #define KVM_ADDRESS_SPACE_NUM 1
57 #endif
58
59 /*
60 * For the normal pfn, the highest 12 bits should be zero,
61 * so we can mask bit 62 ~ bit 52 to indicate the error pfn,
62 * mask bit 63 to indicate the noslot pfn.
63 */
64 #define KVM_PFN_ERR_MASK (0x7ffULL << 52)
65 #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
66 #define KVM_PFN_NOSLOT (0x1ULL << 63)
67
68 #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
69 #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
70 #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
71
72 /*
73 * error pfns indicate that the gfn is in slot but faild to
74 * translate it to pfn on host.
75 */
is_error_pfn(kvm_pfn_t pfn)76 static inline bool is_error_pfn(kvm_pfn_t pfn)
77 {
78 return !!(pfn & KVM_PFN_ERR_MASK);
79 }
80
81 /*
82 * error_noslot pfns indicate that the gfn can not be
83 * translated to pfn - it is not in slot or failed to
84 * translate it to pfn.
85 */
is_error_noslot_pfn(kvm_pfn_t pfn)86 static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
87 {
88 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
89 }
90
91 /* noslot pfn indicates that the gfn is not in slot. */
is_noslot_pfn(kvm_pfn_t pfn)92 static inline bool is_noslot_pfn(kvm_pfn_t pfn)
93 {
94 return pfn == KVM_PFN_NOSLOT;
95 }
96
97 /*
98 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
99 * provide own defines and kvm_is_error_hva
100 */
101 #ifndef KVM_HVA_ERR_BAD
102
103 #define KVM_HVA_ERR_BAD (PAGE_OFFSET)
104 #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
105
kvm_is_error_hva(unsigned long addr)106 static inline bool kvm_is_error_hva(unsigned long addr)
107 {
108 return addr >= PAGE_OFFSET;
109 }
110
111 #endif
112
113 #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
114
is_error_page(struct page * page)115 static inline bool is_error_page(struct page *page)
116 {
117 return IS_ERR(page);
118 }
119
120 #define KVM_REQUEST_MASK GENMASK(7,0)
121 #define KVM_REQUEST_NO_WAKEUP BIT(8)
122 #define KVM_REQUEST_WAIT BIT(9)
123 /*
124 * Architecture-independent vcpu->requests bit members
125 * Bits 4-7 are reserved for more arch-independent bits.
126 */
127 #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
128 #define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
129 #define KVM_REQ_PENDING_TIMER 2
130 #define KVM_REQ_UNHALT 3
131 #define KVM_REQUEST_ARCH_BASE 8
132
133 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
134 BUILD_BUG_ON((unsigned)(nr) >= (FIELD_SIZEOF(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
135 (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
136 })
137 #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
138
139 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
140 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
141
142 extern struct kmem_cache *kvm_vcpu_cache;
143
144 extern struct mutex kvm_lock;
145 extern struct list_head vm_list;
146
147 struct kvm_io_range {
148 gpa_t addr;
149 int len;
150 struct kvm_io_device *dev;
151 };
152
153 #define NR_IOBUS_DEVS 1000
154
155 struct kvm_io_bus {
156 int dev_count;
157 int ioeventfd_count;
158 struct kvm_io_range range[];
159 };
160
161 enum kvm_bus {
162 KVM_MMIO_BUS,
163 KVM_PIO_BUS,
164 KVM_VIRTIO_CCW_NOTIFY_BUS,
165 KVM_FAST_MMIO_BUS,
166 KVM_NR_BUSES
167 };
168
169 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
170 int len, const void *val);
171 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
172 gpa_t addr, int len, const void *val, long cookie);
173 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
174 int len, void *val);
175 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
176 int len, struct kvm_io_device *dev);
177 void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
178 struct kvm_io_device *dev);
179 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
180 gpa_t addr);
181
182 #ifdef CONFIG_KVM_ASYNC_PF
183 struct kvm_async_pf {
184 struct work_struct work;
185 struct list_head link;
186 struct list_head queue;
187 struct kvm_vcpu *vcpu;
188 struct mm_struct *mm;
189 gpa_t cr2_or_gpa;
190 unsigned long addr;
191 struct kvm_arch_async_pf arch;
192 bool wakeup_all;
193 };
194
195 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
196 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
197 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
198 unsigned long hva, struct kvm_arch_async_pf *arch);
199 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
200 #endif
201
202 enum {
203 OUTSIDE_GUEST_MODE,
204 IN_GUEST_MODE,
205 EXITING_GUEST_MODE,
206 READING_SHADOW_PAGE_TABLES,
207 };
208
209 #define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA)
210
211 struct kvm_host_map {
212 /*
213 * Only valid if the 'pfn' is managed by the host kernel (i.e. There is
214 * a 'struct page' for it. When using mem= kernel parameter some memory
215 * can be used as guest memory but they are not managed by host
216 * kernel).
217 * If 'pfn' is not managed by the host kernel, this field is
218 * initialized to KVM_UNMAPPED_PAGE.
219 */
220 struct page *page;
221 void *hva;
222 kvm_pfn_t pfn;
223 kvm_pfn_t gfn;
224 };
225
226 /*
227 * Used to check if the mapping is valid or not. Never use 'kvm_host_map'
228 * directly to check for that.
229 */
kvm_vcpu_mapped(struct kvm_host_map * map)230 static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
231 {
232 return !!map->hva;
233 }
234
235 /*
236 * Sometimes a large or cross-page mmio needs to be broken up into separate
237 * exits for userspace servicing.
238 */
239 struct kvm_mmio_fragment {
240 gpa_t gpa;
241 void *data;
242 unsigned len;
243 };
244
245 struct kvm_vcpu {
246 struct kvm *kvm;
247 #ifdef CONFIG_PREEMPT_NOTIFIERS
248 struct preempt_notifier preempt_notifier;
249 #endif
250 int cpu;
251 int vcpu_id;
252 int srcu_idx;
253 int mode;
254 u64 requests;
255 unsigned long guest_debug;
256
257 int pre_pcpu;
258 struct list_head blocked_vcpu_list;
259
260 struct mutex mutex;
261 struct kvm_run *run;
262
263 int guest_xcr0_loaded;
264 struct swait_queue_head wq;
265 struct pid __rcu *pid;
266 int sigset_active;
267 sigset_t sigset;
268 struct kvm_vcpu_stat stat;
269 unsigned int halt_poll_ns;
270 bool valid_wakeup;
271
272 #ifdef CONFIG_HAS_IOMEM
273 int mmio_needed;
274 int mmio_read_completed;
275 int mmio_is_write;
276 int mmio_cur_fragment;
277 int mmio_nr_fragments;
278 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
279 #endif
280
281 #ifdef CONFIG_KVM_ASYNC_PF
282 struct {
283 u32 queued;
284 struct list_head queue;
285 struct list_head done;
286 spinlock_t lock;
287 } async_pf;
288 #endif
289
290 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
291 /*
292 * Cpu relax intercept or pause loop exit optimization
293 * in_spin_loop: set when a vcpu does a pause loop exit
294 * or cpu relax intercepted.
295 * dy_eligible: indicates whether vcpu is eligible for directed yield.
296 */
297 struct {
298 bool in_spin_loop;
299 bool dy_eligible;
300 } spin_loop;
301 #endif
302 bool preempted;
303 struct kvm_vcpu_arch arch;
304 struct dentry *debugfs_dentry;
305 };
306
kvm_vcpu_exiting_guest_mode(struct kvm_vcpu * vcpu)307 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
308 {
309 /*
310 * The memory barrier ensures a previous write to vcpu->requests cannot
311 * be reordered with the read of vcpu->mode. It pairs with the general
312 * memory barrier following the write of vcpu->mode in VCPU RUN.
313 */
314 smp_mb__before_atomic();
315 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
316 }
317
318 /*
319 * Some of the bitops functions do not support too long bitmaps.
320 * This number must be determined not to exceed such limits.
321 */
322 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
323
324 struct kvm_memory_slot {
325 gfn_t base_gfn;
326 unsigned long npages;
327 unsigned long *dirty_bitmap;
328 struct kvm_arch_memory_slot arch;
329 unsigned long userspace_addr;
330 u32 flags;
331 short id;
332 };
333
kvm_dirty_bitmap_bytes(struct kvm_memory_slot * memslot)334 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
335 {
336 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
337 }
338
kvm_second_dirty_bitmap(struct kvm_memory_slot * memslot)339 static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
340 {
341 unsigned long len = kvm_dirty_bitmap_bytes(memslot);
342
343 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
344 }
345
346 struct kvm_s390_adapter_int {
347 u64 ind_addr;
348 u64 summary_addr;
349 u64 ind_offset;
350 u32 summary_offset;
351 u32 adapter_id;
352 };
353
354 struct kvm_hv_sint {
355 u32 vcpu;
356 u32 sint;
357 };
358
359 struct kvm_kernel_irq_routing_entry {
360 u32 gsi;
361 u32 type;
362 int (*set)(struct kvm_kernel_irq_routing_entry *e,
363 struct kvm *kvm, int irq_source_id, int level,
364 bool line_status);
365 union {
366 struct {
367 unsigned irqchip;
368 unsigned pin;
369 } irqchip;
370 struct {
371 u32 address_lo;
372 u32 address_hi;
373 u32 data;
374 u32 flags;
375 u32 devid;
376 } msi;
377 struct kvm_s390_adapter_int adapter;
378 struct kvm_hv_sint hv_sint;
379 };
380 struct hlist_node link;
381 };
382
383 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
384 struct kvm_irq_routing_table {
385 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
386 u32 nr_rt_entries;
387 /*
388 * Array indexed by gsi. Each entry contains list of irq chips
389 * the gsi is connected to.
390 */
391 struct hlist_head map[0];
392 };
393 #endif
394
395 #ifndef KVM_PRIVATE_MEM_SLOTS
396 #define KVM_PRIVATE_MEM_SLOTS 0
397 #endif
398
399 #ifndef KVM_MEM_SLOTS_NUM
400 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
401 #endif
402
403 #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
kvm_arch_vcpu_memslots_id(struct kvm_vcpu * vcpu)404 static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
405 {
406 return 0;
407 }
408 #endif
409
410 /*
411 * Note:
412 * memslots are not sorted by id anymore, please use id_to_memslot()
413 * to get the memslot by its id.
414 */
415 struct kvm_memslots {
416 u64 generation;
417 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
418 /* The mapping table from slot id to the index in memslots[]. */
419 short id_to_index[KVM_MEM_SLOTS_NUM];
420 atomic_t lru_slot;
421 int used_slots;
422 };
423
424 struct kvm {
425 spinlock_t mmu_lock;
426 struct mutex slots_lock;
427 struct mm_struct *mm; /* userspace tied to this vm */
428 struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
429 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
430
431 /*
432 * created_vcpus is protected by kvm->lock, and is incremented
433 * at the beginning of KVM_CREATE_VCPU. online_vcpus is only
434 * incremented after storing the kvm_vcpu pointer in vcpus,
435 * and is accessed atomically.
436 */
437 atomic_t online_vcpus;
438 int created_vcpus;
439 int last_boosted_vcpu;
440 struct list_head vm_list;
441 struct mutex lock;
442 struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
443 #ifdef CONFIG_HAVE_KVM_EVENTFD
444 struct {
445 spinlock_t lock;
446 struct list_head items;
447 struct list_head resampler_list;
448 struct mutex resampler_lock;
449 } irqfds;
450 struct list_head ioeventfds;
451 #endif
452 struct kvm_vm_stat stat;
453 struct kvm_arch arch;
454 refcount_t users_count;
455 #ifdef CONFIG_KVM_MMIO
456 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
457 spinlock_t ring_lock;
458 struct list_head coalesced_zones;
459 #endif
460
461 struct mutex irq_lock;
462 #ifdef CONFIG_HAVE_KVM_IRQCHIP
463 /*
464 * Update side is protected by irq_lock.
465 */
466 struct kvm_irq_routing_table __rcu *irq_routing;
467 #endif
468 #ifdef CONFIG_HAVE_KVM_IRQFD
469 struct hlist_head irq_ack_notifier_list;
470 #endif
471
472 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
473 struct mmu_notifier mmu_notifier;
474 unsigned long mmu_notifier_seq;
475 long mmu_notifier_count;
476 #endif
477 long tlbs_dirty;
478 struct list_head devices;
479 struct dentry *debugfs_dentry;
480 struct kvm_stat_data **debugfs_stat_data;
481 struct srcu_struct srcu;
482 struct srcu_struct irq_srcu;
483 pid_t userspace_pid;
484 };
485
486 #define kvm_err(fmt, ...) \
487 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
488 #define kvm_info(fmt, ...) \
489 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
490 #define kvm_debug(fmt, ...) \
491 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
492 #define kvm_debug_ratelimited(fmt, ...) \
493 pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
494 ## __VA_ARGS__)
495 #define kvm_pr_unimpl(fmt, ...) \
496 pr_err_ratelimited("kvm [%i]: " fmt, \
497 task_tgid_nr(current), ## __VA_ARGS__)
498
499 /* The guest did something we don't support. */
500 #define vcpu_unimpl(vcpu, fmt, ...) \
501 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \
502 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
503
504 #define vcpu_debug(vcpu, fmt, ...) \
505 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
506 #define vcpu_debug_ratelimited(vcpu, fmt, ...) \
507 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \
508 ## __VA_ARGS__)
509 #define vcpu_err(vcpu, fmt, ...) \
510 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
511
kvm_get_bus(struct kvm * kvm,enum kvm_bus idx)512 static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
513 {
514 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
515 lockdep_is_held(&kvm->slots_lock) ||
516 !refcount_read(&kvm->users_count));
517 }
518
kvm_get_vcpu(struct kvm * kvm,int i)519 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
520 {
521 int num_vcpus = atomic_read(&kvm->online_vcpus);
522 i = array_index_nospec(i, num_vcpus);
523
524 /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
525 smp_rmb();
526 return kvm->vcpus[i];
527 }
528
529 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
530 for (idx = 0; \
531 idx < atomic_read(&kvm->online_vcpus) && \
532 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
533 idx++)
534
kvm_get_vcpu_by_id(struct kvm * kvm,int id)535 static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
536 {
537 struct kvm_vcpu *vcpu = NULL;
538 int i;
539
540 if (id < 0)
541 return NULL;
542 if (id < KVM_MAX_VCPUS)
543 vcpu = kvm_get_vcpu(kvm, id);
544 if (vcpu && vcpu->vcpu_id == id)
545 return vcpu;
546 kvm_for_each_vcpu(i, vcpu, kvm)
547 if (vcpu->vcpu_id == id)
548 return vcpu;
549 return NULL;
550 }
551
kvm_vcpu_get_idx(struct kvm_vcpu * vcpu)552 static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
553 {
554 struct kvm_vcpu *tmp;
555 int idx;
556
557 kvm_for_each_vcpu(idx, tmp, vcpu->kvm)
558 if (tmp == vcpu)
559 return idx;
560 BUG();
561 }
562
563 #define kvm_for_each_memslot(memslot, slots) \
564 for (memslot = &slots->memslots[0]; \
565 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
566 memslot++)
567
568 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
569 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
570
571 void vcpu_load(struct kvm_vcpu *vcpu);
572 void vcpu_put(struct kvm_vcpu *vcpu);
573
574 #ifdef __KVM_HAVE_IOAPIC
575 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
576 void kvm_arch_post_irq_routing_update(struct kvm *kvm);
577 #else
kvm_arch_post_irq_ack_notifier_list_update(struct kvm * kvm)578 static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
579 {
580 }
kvm_arch_post_irq_routing_update(struct kvm * kvm)581 static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
582 {
583 }
584 #endif
585
586 #ifdef CONFIG_HAVE_KVM_IRQFD
587 int kvm_irqfd_init(void);
588 void kvm_irqfd_exit(void);
589 #else
kvm_irqfd_init(void)590 static inline int kvm_irqfd_init(void)
591 {
592 return 0;
593 }
594
kvm_irqfd_exit(void)595 static inline void kvm_irqfd_exit(void)
596 {
597 }
598 #endif
599 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
600 struct module *module);
601 void kvm_exit(void);
602
603 void kvm_get_kvm(struct kvm *kvm);
604 void kvm_put_kvm(struct kvm *kvm);
605
__kvm_memslots(struct kvm * kvm,int as_id)606 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
607 {
608 as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
609 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
610 lockdep_is_held(&kvm->slots_lock) ||
611 !refcount_read(&kvm->users_count));
612 }
613
kvm_memslots(struct kvm * kvm)614 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
615 {
616 return __kvm_memslots(kvm, 0);
617 }
618
kvm_vcpu_memslots(struct kvm_vcpu * vcpu)619 static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
620 {
621 int as_id = kvm_arch_vcpu_memslots_id(vcpu);
622
623 return __kvm_memslots(vcpu->kvm, as_id);
624 }
625
626 static inline struct kvm_memory_slot *
id_to_memslot(struct kvm_memslots * slots,int id)627 id_to_memslot(struct kvm_memslots *slots, int id)
628 {
629 int index = slots->id_to_index[id];
630 struct kvm_memory_slot *slot;
631
632 slot = &slots->memslots[index];
633
634 WARN_ON(slot->id != id);
635 return slot;
636 }
637
638 /*
639 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
640 * - create a new memory slot
641 * - delete an existing memory slot
642 * - modify an existing memory slot
643 * -- move it in the guest physical memory space
644 * -- just change its flags
645 *
646 * Since flags can be changed by some of these operations, the following
647 * differentiation is the best we can do for __kvm_set_memory_region():
648 */
649 enum kvm_mr_change {
650 KVM_MR_CREATE,
651 KVM_MR_DELETE,
652 KVM_MR_MOVE,
653 KVM_MR_FLAGS_ONLY,
654 };
655
656 int kvm_set_memory_region(struct kvm *kvm,
657 const struct kvm_userspace_memory_region *mem);
658 int __kvm_set_memory_region(struct kvm *kvm,
659 const struct kvm_userspace_memory_region *mem);
660 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
661 struct kvm_memory_slot *dont);
662 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
663 unsigned long npages);
664 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
665 int kvm_arch_prepare_memory_region(struct kvm *kvm,
666 struct kvm_memory_slot *memslot,
667 const struct kvm_userspace_memory_region *mem,
668 enum kvm_mr_change change);
669 void kvm_arch_commit_memory_region(struct kvm *kvm,
670 const struct kvm_userspace_memory_region *mem,
671 const struct kvm_memory_slot *old,
672 const struct kvm_memory_slot *new,
673 enum kvm_mr_change change);
674 bool kvm_largepages_enabled(void);
675 void kvm_disable_largepages(void);
676 /* flush all memory translations */
677 void kvm_arch_flush_shadow_all(struct kvm *kvm);
678 /* flush memory translations pointing to 'slot' */
679 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
680 struct kvm_memory_slot *slot);
681
682 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
683 struct page **pages, int nr_pages);
684
685 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
686 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
687 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
688 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
689 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
690 bool *writable);
691 void kvm_release_page_clean(struct page *page);
692 void kvm_release_page_dirty(struct page *page);
693 void kvm_set_page_accessed(struct page *page);
694
695 kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
696 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
697 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
698 bool *writable);
699 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
700 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
701 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
702 bool atomic, bool *async, bool write_fault,
703 bool *writable);
704
705 void kvm_release_pfn_clean(kvm_pfn_t pfn);
706 void kvm_release_pfn_dirty(kvm_pfn_t pfn);
707 void kvm_set_pfn_dirty(kvm_pfn_t pfn);
708 void kvm_set_pfn_accessed(kvm_pfn_t pfn);
709 void kvm_get_pfn(kvm_pfn_t pfn);
710
711 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
712 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
713 int len);
714 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
715 unsigned long len);
716 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
717 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
718 void *data, unsigned long len);
719 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
720 int offset, int len);
721 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
722 unsigned long len);
723 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
724 void *data, unsigned long len);
725 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
726 void *data, unsigned int offset,
727 unsigned long len);
728 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
729 gpa_t gpa, unsigned long len);
730 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
731 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
732 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
733 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
734 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
735 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
736
737 struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
738 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
739 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
740 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
741 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
742 int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
743 struct gfn_to_pfn_cache *cache, bool atomic);
744 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
745 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
746 int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
747 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
748 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
749 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
750 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
751 int len);
752 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
753 unsigned long len);
754 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
755 unsigned long len);
756 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
757 int offset, int len);
758 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
759 unsigned long len);
760 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
761
762 void kvm_sigset_activate(struct kvm_vcpu *vcpu);
763 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
764
765 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
766 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
767 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
768 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
769 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
770 int kvm_vcpu_yield_to(struct kvm_vcpu *target);
771 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
772
773 void kvm_flush_remote_tlbs(struct kvm *kvm);
774 void kvm_reload_remote_mmus(struct kvm *kvm);
775
776 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
777 unsigned long *vcpu_bitmap, cpumask_var_t tmp);
778 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
779
780 long kvm_arch_dev_ioctl(struct file *filp,
781 unsigned int ioctl, unsigned long arg);
782 long kvm_arch_vcpu_ioctl(struct file *filp,
783 unsigned int ioctl, unsigned long arg);
784 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
785
786 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
787
788 int kvm_get_dirty_log(struct kvm *kvm,
789 struct kvm_dirty_log *log, int *is_dirty);
790
791 int kvm_get_dirty_log_protect(struct kvm *kvm,
792 struct kvm_dirty_log *log, bool *is_dirty);
793
794 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
795 struct kvm_memory_slot *slot,
796 gfn_t gfn_offset,
797 unsigned long mask);
798
799 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
800 struct kvm_dirty_log *log);
801
802 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
803 bool line_status);
804 long kvm_arch_vm_ioctl(struct file *filp,
805 unsigned int ioctl, unsigned long arg);
806
807 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
808 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
809
810 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
811 struct kvm_translation *tr);
812
813 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
814 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
815 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
816 struct kvm_sregs *sregs);
817 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
818 struct kvm_sregs *sregs);
819 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
820 struct kvm_mp_state *mp_state);
821 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
822 struct kvm_mp_state *mp_state);
823 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
824 struct kvm_guest_debug *dbg);
825 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
826
827 int kvm_arch_init(void *opaque);
828 void kvm_arch_exit(void);
829
830 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
831 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
832
833 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
834
835 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
836 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
837 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
838 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
839 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
840 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
841 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
842
843 bool kvm_arch_has_vcpu_debugfs(void);
844 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
845
846 int kvm_arch_hardware_enable(void);
847 void kvm_arch_hardware_disable(void);
848 int kvm_arch_hardware_setup(void);
849 void kvm_arch_hardware_unsetup(void);
850 void kvm_arch_check_processor_compat(void *rtn);
851 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
852 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
853 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
854 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
855
856 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
857 /*
858 * All architectures that want to use vzalloc currently also
859 * need their own kvm_arch_alloc_vm implementation.
860 */
kvm_arch_alloc_vm(void)861 static inline struct kvm *kvm_arch_alloc_vm(void)
862 {
863 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
864 }
865
kvm_arch_free_vm(struct kvm * kvm)866 static inline void kvm_arch_free_vm(struct kvm *kvm)
867 {
868 kfree(kvm);
869 }
870 #endif
871
872 #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
kvm_arch_flush_remote_tlb(struct kvm * kvm)873 static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
874 {
875 return -ENOTSUPP;
876 }
877 #endif
878
879 #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
880 void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
881 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
882 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
883 #else
kvm_arch_register_noncoherent_dma(struct kvm * kvm)884 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
885 {
886 }
887
kvm_arch_unregister_noncoherent_dma(struct kvm * kvm)888 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
889 {
890 }
891
kvm_arch_has_noncoherent_dma(struct kvm * kvm)892 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
893 {
894 return false;
895 }
896 #endif
897 #ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
898 void kvm_arch_start_assignment(struct kvm *kvm);
899 void kvm_arch_end_assignment(struct kvm *kvm);
900 bool kvm_arch_has_assigned_device(struct kvm *kvm);
901 #else
kvm_arch_start_assignment(struct kvm * kvm)902 static inline void kvm_arch_start_assignment(struct kvm *kvm)
903 {
904 }
905
kvm_arch_end_assignment(struct kvm * kvm)906 static inline void kvm_arch_end_assignment(struct kvm *kvm)
907 {
908 }
909
kvm_arch_has_assigned_device(struct kvm * kvm)910 static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
911 {
912 return false;
913 }
914 #endif
915
kvm_arch_vcpu_wq(struct kvm_vcpu * vcpu)916 static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
917 {
918 #ifdef __KVM_HAVE_ARCH_WQP
919 return vcpu->arch.wqp;
920 #else
921 return &vcpu->wq;
922 #endif
923 }
924
925 #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
926 /*
927 * returns true if the virtual interrupt controller is initialized and
928 * ready to accept virtual IRQ. On some architectures the virtual interrupt
929 * controller is dynamically instantiated and this is not always true.
930 */
931 bool kvm_arch_intc_initialized(struct kvm *kvm);
932 #else
kvm_arch_intc_initialized(struct kvm * kvm)933 static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
934 {
935 return true;
936 }
937 #endif
938
939 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
940 void kvm_arch_destroy_vm(struct kvm *kvm);
941 void kvm_arch_sync_events(struct kvm *kvm);
942
943 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
944 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
945
946 bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
947 bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
948
949 struct kvm_irq_ack_notifier {
950 struct hlist_node link;
951 unsigned gsi;
952 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
953 };
954
955 int kvm_irq_map_gsi(struct kvm *kvm,
956 struct kvm_kernel_irq_routing_entry *entries, int gsi);
957 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
958
959 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
960 bool line_status);
961 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
962 int irq_source_id, int level, bool line_status);
963 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
964 struct kvm *kvm, int irq_source_id,
965 int level, bool line_status);
966 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
967 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
968 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
969 void kvm_register_irq_ack_notifier(struct kvm *kvm,
970 struct kvm_irq_ack_notifier *kian);
971 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
972 struct kvm_irq_ack_notifier *kian);
973 int kvm_request_irq_source_id(struct kvm *kvm);
974 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
975
976 /*
977 * search_memslots() and __gfn_to_memslot() are here because they are
978 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
979 * gfn_to_memslot() itself isn't here as an inline because that would
980 * bloat other code too much.
981 */
982 static inline struct kvm_memory_slot *
search_memslots(struct kvm_memslots * slots,gfn_t gfn)983 search_memslots(struct kvm_memslots *slots, gfn_t gfn)
984 {
985 int start = 0, end = slots->used_slots;
986 int slot = atomic_read(&slots->lru_slot);
987 struct kvm_memory_slot *memslots = slots->memslots;
988
989 if (gfn >= memslots[slot].base_gfn &&
990 gfn < memslots[slot].base_gfn + memslots[slot].npages)
991 return &memslots[slot];
992
993 while (start < end) {
994 slot = start + (end - start) / 2;
995
996 if (gfn >= memslots[slot].base_gfn)
997 end = slot;
998 else
999 start = slot + 1;
1000 }
1001
1002 if (start < slots->used_slots && gfn >= memslots[start].base_gfn &&
1003 gfn < memslots[start].base_gfn + memslots[start].npages) {
1004 atomic_set(&slots->lru_slot, start);
1005 return &memslots[start];
1006 }
1007
1008 return NULL;
1009 }
1010
1011 static inline struct kvm_memory_slot *
__gfn_to_memslot(struct kvm_memslots * slots,gfn_t gfn)1012 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
1013 {
1014 return search_memslots(slots, gfn);
1015 }
1016
1017 static inline unsigned long
__gfn_to_hva_memslot(struct kvm_memory_slot * slot,gfn_t gfn)1018 __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1019 {
1020 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
1021 }
1022
memslot_id(struct kvm * kvm,gfn_t gfn)1023 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
1024 {
1025 return gfn_to_memslot(kvm, gfn)->id;
1026 }
1027
1028 static inline gfn_t
hva_to_gfn_memslot(unsigned long hva,struct kvm_memory_slot * slot)1029 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
1030 {
1031 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
1032
1033 return slot->base_gfn + gfn_offset;
1034 }
1035
gfn_to_gpa(gfn_t gfn)1036 static inline gpa_t gfn_to_gpa(gfn_t gfn)
1037 {
1038 return (gpa_t)gfn << PAGE_SHIFT;
1039 }
1040
gpa_to_gfn(gpa_t gpa)1041 static inline gfn_t gpa_to_gfn(gpa_t gpa)
1042 {
1043 return (gfn_t)(gpa >> PAGE_SHIFT);
1044 }
1045
pfn_to_hpa(kvm_pfn_t pfn)1046 static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
1047 {
1048 return (hpa_t)pfn << PAGE_SHIFT;
1049 }
1050
kvm_vcpu_gpa_to_page(struct kvm_vcpu * vcpu,gpa_t gpa)1051 static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu,
1052 gpa_t gpa)
1053 {
1054 return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa));
1055 }
1056
kvm_is_error_gpa(struct kvm * kvm,gpa_t gpa)1057 static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
1058 {
1059 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
1060
1061 return kvm_is_error_hva(hva);
1062 }
1063
1064 enum kvm_stat_kind {
1065 KVM_STAT_VM,
1066 KVM_STAT_VCPU,
1067 };
1068
1069 struct kvm_stat_data {
1070 int offset;
1071 int mode;
1072 struct kvm *kvm;
1073 };
1074
1075 struct kvm_stats_debugfs_item {
1076 const char *name;
1077 int offset;
1078 enum kvm_stat_kind kind;
1079 int mode;
1080 };
1081 extern struct kvm_stats_debugfs_item debugfs_entries[];
1082 extern struct dentry *kvm_debugfs_dir;
1083
1084 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_retry(struct kvm * kvm,unsigned long mmu_seq)1085 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
1086 {
1087 if (unlikely(kvm->mmu_notifier_count))
1088 return 1;
1089 /*
1090 * Ensure the read of mmu_notifier_count happens before the read
1091 * of mmu_notifier_seq. This interacts with the smp_wmb() in
1092 * mmu_notifier_invalidate_range_end to make sure that the caller
1093 * either sees the old (non-zero) value of mmu_notifier_count or
1094 * the new (incremented) value of mmu_notifier_seq.
1095 * PowerPC Book3s HV KVM calls this under a per-page lock
1096 * rather than under kvm->mmu_lock, for scalability, so
1097 * can't rely on kvm->mmu_lock to keep things ordered.
1098 */
1099 smp_rmb();
1100 if (kvm->mmu_notifier_seq != mmu_seq)
1101 return 1;
1102 return 0;
1103 }
1104 #endif
1105
1106 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
1107
1108 #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
1109
1110 bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
1111 int kvm_set_irq_routing(struct kvm *kvm,
1112 const struct kvm_irq_routing_entry *entries,
1113 unsigned nr,
1114 unsigned flags);
1115 int kvm_set_routing_entry(struct kvm *kvm,
1116 struct kvm_kernel_irq_routing_entry *e,
1117 const struct kvm_irq_routing_entry *ue);
1118 void kvm_free_irq_routing(struct kvm *kvm);
1119
1120 #else
1121
kvm_free_irq_routing(struct kvm * kvm)1122 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
1123
1124 #endif
1125
1126 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
1127
1128 #ifdef CONFIG_HAVE_KVM_EVENTFD
1129
1130 void kvm_eventfd_init(struct kvm *kvm);
1131 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
1132
1133 #ifdef CONFIG_HAVE_KVM_IRQFD
1134 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
1135 void kvm_irqfd_release(struct kvm *kvm);
1136 void kvm_irq_routing_update(struct kvm *);
1137 #else
kvm_irqfd(struct kvm * kvm,struct kvm_irqfd * args)1138 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1139 {
1140 return -EINVAL;
1141 }
1142
kvm_irqfd_release(struct kvm * kvm)1143 static inline void kvm_irqfd_release(struct kvm *kvm) {}
1144 #endif
1145
1146 #else
1147
kvm_eventfd_init(struct kvm * kvm)1148 static inline void kvm_eventfd_init(struct kvm *kvm) {}
1149
kvm_irqfd(struct kvm * kvm,struct kvm_irqfd * args)1150 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1151 {
1152 return -EINVAL;
1153 }
1154
kvm_irqfd_release(struct kvm * kvm)1155 static inline void kvm_irqfd_release(struct kvm *kvm) {}
1156
1157 #ifdef CONFIG_HAVE_KVM_IRQCHIP
kvm_irq_routing_update(struct kvm * kvm)1158 static inline void kvm_irq_routing_update(struct kvm *kvm)
1159 {
1160 }
1161 #endif
1162
kvm_ioeventfd(struct kvm * kvm,struct kvm_ioeventfd * args)1163 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
1164 {
1165 return -ENOSYS;
1166 }
1167
1168 #endif /* CONFIG_HAVE_KVM_EVENTFD */
1169
1170 void kvm_arch_irq_routing_update(struct kvm *kvm);
1171
kvm_make_request(int req,struct kvm_vcpu * vcpu)1172 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1173 {
1174 /*
1175 * Ensure the rest of the request is published to kvm_check_request's
1176 * caller. Paired with the smp_mb__after_atomic in kvm_check_request.
1177 */
1178 smp_wmb();
1179 set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1180 }
1181
kvm_request_pending(struct kvm_vcpu * vcpu)1182 static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
1183 {
1184 return READ_ONCE(vcpu->requests);
1185 }
1186
kvm_test_request(int req,struct kvm_vcpu * vcpu)1187 static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
1188 {
1189 return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1190 }
1191
kvm_clear_request(int req,struct kvm_vcpu * vcpu)1192 static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
1193 {
1194 clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1195 }
1196
kvm_check_request(int req,struct kvm_vcpu * vcpu)1197 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1198 {
1199 if (kvm_test_request(req, vcpu)) {
1200 kvm_clear_request(req, vcpu);
1201
1202 /*
1203 * Ensure the rest of the request is visible to kvm_check_request's
1204 * caller. Paired with the smp_wmb in kvm_make_request.
1205 */
1206 smp_mb__after_atomic();
1207 return true;
1208 } else {
1209 return false;
1210 }
1211 }
1212
1213 extern bool kvm_rebooting;
1214
1215 extern unsigned int halt_poll_ns;
1216 extern unsigned int halt_poll_ns_grow;
1217 extern unsigned int halt_poll_ns_shrink;
1218
1219 struct kvm_device {
1220 struct kvm_device_ops *ops;
1221 struct kvm *kvm;
1222 void *private;
1223 struct list_head vm_node;
1224 };
1225
1226 /* create, destroy, and name are mandatory */
1227 struct kvm_device_ops {
1228 const char *name;
1229
1230 /*
1231 * create is called holding kvm->lock and any operations not suitable
1232 * to do while holding the lock should be deferred to init (see
1233 * below).
1234 */
1235 int (*create)(struct kvm_device *dev, u32 type);
1236
1237 /*
1238 * init is called after create if create is successful and is called
1239 * outside of holding kvm->lock.
1240 */
1241 void (*init)(struct kvm_device *dev);
1242
1243 /*
1244 * Destroy is responsible for freeing dev.
1245 *
1246 * Destroy may be called before or after destructors are called
1247 * on emulated I/O regions, depending on whether a reference is
1248 * held by a vcpu or other kvm component that gets destroyed
1249 * after the emulated I/O.
1250 */
1251 void (*destroy)(struct kvm_device *dev);
1252
1253 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1254 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1255 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1256 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1257 unsigned long arg);
1258 };
1259
1260 void kvm_device_get(struct kvm_device *dev);
1261 void kvm_device_put(struct kvm_device *dev);
1262 struct kvm_device *kvm_device_from_filp(struct file *filp);
1263 int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
1264 void kvm_unregister_device_ops(u32 type);
1265
1266 extern struct kvm_device_ops kvm_mpic_ops;
1267 extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
1268 extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
1269
1270 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1271
kvm_vcpu_set_in_spin_loop(struct kvm_vcpu * vcpu,bool val)1272 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1273 {
1274 vcpu->spin_loop.in_spin_loop = val;
1275 }
kvm_vcpu_set_dy_eligible(struct kvm_vcpu * vcpu,bool val)1276 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1277 {
1278 vcpu->spin_loop.dy_eligible = val;
1279 }
1280
1281 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1282
kvm_vcpu_set_in_spin_loop(struct kvm_vcpu * vcpu,bool val)1283 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1284 {
1285 }
1286
kvm_vcpu_set_dy_eligible(struct kvm_vcpu * vcpu,bool val)1287 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1288 {
1289 }
1290 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1291
1292 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
1293 bool kvm_arch_has_irq_bypass(void);
1294 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
1295 struct irq_bypass_producer *);
1296 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
1297 struct irq_bypass_producer *);
1298 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
1299 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
1300 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
1301 uint32_t guest_irq, bool set);
1302 #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
1303
1304 #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
1305 /* If we wakeup during the poll time, was it a sucessful poll? */
vcpu_valid_wakeup(struct kvm_vcpu * vcpu)1306 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
1307 {
1308 return vcpu->valid_wakeup;
1309 }
1310
1311 #else
vcpu_valid_wakeup(struct kvm_vcpu * vcpu)1312 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
1313 {
1314 return true;
1315 }
1316 #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
1317
1318 #ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
1319 long kvm_arch_vcpu_async_ioctl(struct file *filp,
1320 unsigned int ioctl, unsigned long arg);
1321 #else
kvm_arch_vcpu_async_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1322 static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
1323 unsigned int ioctl,
1324 unsigned long arg)
1325 {
1326 return -ENOIOCTLCMD;
1327 }
1328 #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
1329
1330 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
1331 unsigned long start, unsigned long end);
1332
1333 #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
1334 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
1335 #else
kvm_arch_vcpu_run_pid_change(struct kvm_vcpu * vcpu)1336 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
1337 {
1338 return 0;
1339 }
1340 #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
1341
1342 typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
1343
1344 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
1345 uintptr_t data, const char *name,
1346 struct task_struct **thread_ptr);
1347
1348 #endif
1349