• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef __KVM_HOST_H
2 #define __KVM_HOST_H
3 
4 /*
5  * This work is licensed under the terms of the GNU GPL, version 2.  See
6  * the COPYING file in the top-level directory.
7  */
8 
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/preempt.h>
18 #include <linux/marker.h>
19 #include <linux/msi.h>
20 #include <asm/signal.h>
21 
22 #include <linux/kvm.h>
23 #include <linux/kvm_para.h>
24 
25 #include <linux/kvm_types.h>
26 
27 #include <asm/kvm_host.h>
28 
29 /*
30  * vcpu->requests bit members
31  */
32 #define KVM_REQ_TLB_FLUSH          0
33 #define KVM_REQ_MIGRATE_TIMER      1
34 #define KVM_REQ_REPORT_TPR_ACCESS  2
35 #define KVM_REQ_MMU_RELOAD         3
36 #define KVM_REQ_TRIPLE_FAULT       4
37 #define KVM_REQ_PENDING_TIMER      5
38 #define KVM_REQ_UNHALT             6
39 #define KVM_REQ_MMU_SYNC           7
40 
41 #define KVM_USERSPACE_IRQ_SOURCE_ID	0
42 
43 struct kvm_vcpu;
44 extern struct kmem_cache *kvm_vcpu_cache;
45 
46 /*
47  * It would be nice to use something smarter than a linear search, TBD...
48  * Thankfully we dont expect many devices to register (famous last words :),
49  * so until then it will suffice.  At least its abstracted so we can change
50  * in one place.
51  */
52 struct kvm_io_bus {
53 	int                   dev_count;
54 #define NR_IOBUS_DEVS 6
55 	struct kvm_io_device *devs[NR_IOBUS_DEVS];
56 };
57 
58 void kvm_io_bus_init(struct kvm_io_bus *bus);
59 void kvm_io_bus_destroy(struct kvm_io_bus *bus);
60 struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
61 					  gpa_t addr, int len, int is_write);
62 void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
63 			     struct kvm_io_device *dev);
64 
65 struct kvm_vcpu {
66 	struct kvm *kvm;
67 #ifdef CONFIG_PREEMPT_NOTIFIERS
68 	struct preempt_notifier preempt_notifier;
69 #endif
70 	int vcpu_id;
71 	struct mutex mutex;
72 	int   cpu;
73 	struct kvm_run *run;
74 	int guest_mode;
75 	unsigned long requests;
76 	struct kvm_guest_debug guest_debug;
77 	int fpu_active;
78 	int guest_fpu_loaded;
79 	wait_queue_head_t wq;
80 	int sigset_active;
81 	sigset_t sigset;
82 	struct kvm_vcpu_stat stat;
83 
84 #ifdef CONFIG_HAS_IOMEM
85 	int mmio_needed;
86 	int mmio_read_completed;
87 	int mmio_is_write;
88 	int mmio_size;
89 	unsigned char mmio_data[8];
90 	gpa_t mmio_phys_addr;
91 #endif
92 
93 	struct kvm_vcpu_arch arch;
94 };
95 
96 struct kvm_memory_slot {
97 	gfn_t base_gfn;
98 	unsigned long npages;
99 	unsigned long flags;
100 	unsigned long *rmap;
101 	unsigned long *dirty_bitmap;
102 	struct {
103 		unsigned long rmap_pde;
104 		int write_count;
105 	} *lpage_info;
106 	unsigned long userspace_addr;
107 	int user_alloc;
108 };
109 
110 struct kvm {
111 	struct mutex lock; /* protects the vcpus array and APIC accesses */
112 	spinlock_t mmu_lock;
113 	struct rw_semaphore slots_lock;
114 	struct mm_struct *mm; /* userspace tied to this vm */
115 	int nmemslots;
116 	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
117 					KVM_PRIVATE_MEM_SLOTS];
118 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
119 	struct list_head vm_list;
120 	struct kvm_io_bus mmio_bus;
121 	struct kvm_io_bus pio_bus;
122 	struct kvm_vm_stat stat;
123 	struct kvm_arch arch;
124 	atomic_t users_count;
125 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
126 	struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
127 	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
128 #endif
129 
130 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
131 	struct mmu_notifier mmu_notifier;
132 	unsigned long mmu_notifier_seq;
133 	long mmu_notifier_count;
134 #endif
135 };
136 
137 /* The guest did something we don't support. */
138 #define pr_unimpl(vcpu, fmt, ...)					\
139  do {									\
140 	if (printk_ratelimit())						\
141 		printk(KERN_ERR "kvm: %i: cpu%i " fmt,			\
142 		       current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
143  } while (0)
144 
145 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
146 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
147 
148 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
149 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
150 
151 void vcpu_load(struct kvm_vcpu *vcpu);
152 void vcpu_put(struct kvm_vcpu *vcpu);
153 
154 int kvm_init(void *opaque, unsigned int vcpu_size,
155 		  struct module *module);
156 void kvm_exit(void);
157 
158 void kvm_get_kvm(struct kvm *kvm);
159 void kvm_put_kvm(struct kvm *kvm);
160 
161 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
162 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
is_error_hpa(hpa_t hpa)163 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
164 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
165 
166 extern struct page *bad_page;
167 extern pfn_t bad_pfn;
168 
169 int is_error_page(struct page *page);
170 int is_error_pfn(pfn_t pfn);
171 int kvm_is_error_hva(unsigned long addr);
172 int kvm_set_memory_region(struct kvm *kvm,
173 			  struct kvm_userspace_memory_region *mem,
174 			  int user_alloc);
175 int __kvm_set_memory_region(struct kvm *kvm,
176 			    struct kvm_userspace_memory_region *mem,
177 			    int user_alloc);
178 int kvm_arch_set_memory_region(struct kvm *kvm,
179 				struct kvm_userspace_memory_region *mem,
180 				struct kvm_memory_slot old,
181 				int user_alloc);
182 void kvm_arch_flush_shadow(struct kvm *kvm);
183 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
184 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
185 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
186 void kvm_release_page_clean(struct page *page);
187 void kvm_release_page_dirty(struct page *page);
188 void kvm_set_page_dirty(struct page *page);
189 void kvm_set_page_accessed(struct page *page);
190 
191 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
192 void kvm_release_pfn_dirty(pfn_t);
193 void kvm_release_pfn_clean(pfn_t pfn);
194 void kvm_set_pfn_dirty(pfn_t pfn);
195 void kvm_set_pfn_accessed(pfn_t pfn);
196 void kvm_get_pfn(pfn_t pfn);
197 
198 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
199 			int len);
200 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
201 			  unsigned long len);
202 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
203 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
204 			 int offset, int len);
205 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
206 		    unsigned long len);
207 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
208 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
209 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
210 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
211 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
212 
213 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
214 void kvm_resched(struct kvm_vcpu *vcpu);
215 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
216 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
217 void kvm_flush_remote_tlbs(struct kvm *kvm);
218 void kvm_reload_remote_mmus(struct kvm *kvm);
219 
220 long kvm_arch_dev_ioctl(struct file *filp,
221 			unsigned int ioctl, unsigned long arg);
222 long kvm_arch_vcpu_ioctl(struct file *filp,
223 			 unsigned int ioctl, unsigned long arg);
224 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
225 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
226 
227 int kvm_dev_ioctl_check_extension(long ext);
228 
229 int kvm_get_dirty_log(struct kvm *kvm,
230 			struct kvm_dirty_log *log, int *is_dirty);
231 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
232 				struct kvm_dirty_log *log);
233 
234 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
235 				   struct
236 				   kvm_userspace_memory_region *mem,
237 				   int user_alloc);
238 long kvm_arch_vm_ioctl(struct file *filp,
239 		       unsigned int ioctl, unsigned long arg);
240 void kvm_arch_destroy_vm(struct kvm *kvm);
241 
242 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
243 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
244 
245 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
246 				    struct kvm_translation *tr);
247 
248 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
249 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
250 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
251 				  struct kvm_sregs *sregs);
252 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
253 				  struct kvm_sregs *sregs);
254 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
255 				    struct kvm_mp_state *mp_state);
256 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
257 				    struct kvm_mp_state *mp_state);
258 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
259 				    struct kvm_debug_guest *dbg);
260 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
261 
262 int kvm_arch_init(void *opaque);
263 void kvm_arch_exit(void);
264 
265 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
266 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
267 
268 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
269 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
270 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
271 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
272 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
273 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
274 
275 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
276 void kvm_arch_hardware_enable(void *garbage);
277 void kvm_arch_hardware_disable(void *garbage);
278 int kvm_arch_hardware_setup(void);
279 void kvm_arch_hardware_unsetup(void);
280 void kvm_arch_check_processor_compat(void *rtn);
281 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
282 
283 void kvm_free_physmem(struct kvm *kvm);
284 
285 struct  kvm *kvm_arch_create_vm(void);
286 void kvm_arch_destroy_vm(struct kvm *kvm);
287 void kvm_free_all_assigned_devices(struct kvm *kvm);
288 void kvm_arch_sync_events(struct kvm *kvm);
289 
290 int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
291 int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
292 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
293 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
294 
295 int kvm_is_mmio_pfn(pfn_t pfn);
296 
297 struct kvm_irq_ack_notifier {
298 	struct hlist_node link;
299 	unsigned gsi;
300 	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
301 };
302 
303 struct kvm_assigned_dev_kernel {
304 	struct kvm_irq_ack_notifier ack_notifier;
305 	struct work_struct interrupt_work;
306 	struct list_head list;
307 	int assigned_dev_id;
308 	int host_busnr;
309 	int host_devfn;
310 	int host_irq;
311 	bool host_irq_disabled;
312 	int guest_irq;
313 	struct msi_msg guest_msi;
314 #define KVM_ASSIGNED_DEV_GUEST_INTX	(1 << 0)
315 #define KVM_ASSIGNED_DEV_GUEST_MSI	(1 << 1)
316 #define KVM_ASSIGNED_DEV_HOST_INTX	(1 << 8)
317 #define KVM_ASSIGNED_DEV_HOST_MSI	(1 << 9)
318 	unsigned long irq_requested_type;
319 	int irq_source_id;
320 	int flags;
321 	struct pci_dev *dev;
322 	struct kvm *kvm;
323 };
324 void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
325 void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
326 void kvm_register_irq_ack_notifier(struct kvm *kvm,
327 				   struct kvm_irq_ack_notifier *kian);
328 void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian);
329 int kvm_request_irq_source_id(struct kvm *kvm);
330 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
331 
332 #ifdef CONFIG_IOMMU_API
333 int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
334 			unsigned long npages);
335 int kvm_iommu_map_guest(struct kvm *kvm);
336 int kvm_iommu_unmap_guest(struct kvm *kvm);
337 int kvm_assign_device(struct kvm *kvm,
338 		      struct kvm_assigned_dev_kernel *assigned_dev);
339 int kvm_deassign_device(struct kvm *kvm,
340 			struct kvm_assigned_dev_kernel *assigned_dev);
341 #else /* CONFIG_IOMMU_API */
kvm_iommu_map_pages(struct kvm * kvm,gfn_t base_gfn,unsigned long npages)342 static inline int kvm_iommu_map_pages(struct kvm *kvm,
343 				      gfn_t base_gfn,
344 				      unsigned long npages)
345 {
346 	return 0;
347 }
348 
kvm_iommu_map_guest(struct kvm * kvm)349 static inline int kvm_iommu_map_guest(struct kvm *kvm)
350 {
351 	return -ENODEV;
352 }
353 
kvm_iommu_unmap_guest(struct kvm * kvm)354 static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
355 {
356 	return 0;
357 }
358 
kvm_assign_device(struct kvm * kvm,struct kvm_assigned_dev_kernel * assigned_dev)359 static inline int kvm_assign_device(struct kvm *kvm,
360 		struct kvm_assigned_dev_kernel *assigned_dev)
361 {
362 	return 0;
363 }
364 
kvm_deassign_device(struct kvm * kvm,struct kvm_assigned_dev_kernel * assigned_dev)365 static inline int kvm_deassign_device(struct kvm *kvm,
366 		struct kvm_assigned_dev_kernel *assigned_dev)
367 {
368 	return 0;
369 }
370 #endif /* CONFIG_IOMMU_API */
371 
kvm_guest_enter(void)372 static inline void kvm_guest_enter(void)
373 {
374 	account_system_vtime(current);
375 	current->flags |= PF_VCPU;
376 }
377 
kvm_guest_exit(void)378 static inline void kvm_guest_exit(void)
379 {
380 	account_system_vtime(current);
381 	current->flags &= ~PF_VCPU;
382 }
383 
memslot_id(struct kvm * kvm,struct kvm_memory_slot * slot)384 static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
385 {
386 	return slot - kvm->memslots;
387 }
388 
gfn_to_gpa(gfn_t gfn)389 static inline gpa_t gfn_to_gpa(gfn_t gfn)
390 {
391 	return (gpa_t)gfn << PAGE_SHIFT;
392 }
393 
pfn_to_hpa(pfn_t pfn)394 static inline hpa_t pfn_to_hpa(pfn_t pfn)
395 {
396 	return (hpa_t)pfn << PAGE_SHIFT;
397 }
398 
kvm_migrate_timers(struct kvm_vcpu * vcpu)399 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
400 {
401 	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
402 }
403 
404 enum kvm_stat_kind {
405 	KVM_STAT_VM,
406 	KVM_STAT_VCPU,
407 };
408 
409 struct kvm_stats_debugfs_item {
410 	const char *name;
411 	int offset;
412 	enum kvm_stat_kind kind;
413 	struct dentry *dentry;
414 };
415 extern struct kvm_stats_debugfs_item debugfs_entries[];
416 extern struct dentry *kvm_debugfs_dir;
417 
418 #define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \
419 	trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
420 						vcpu, 5, d1, d2, d3, d4, d5)
421 #define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \
422 	trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
423 						vcpu, 4, d1, d2, d3, d4, 0)
424 #define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \
425 	trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
426 						vcpu, 3, d1, d2, d3, 0, 0)
427 #define KVMTRACE_2D(evt, vcpu, d1, d2, name) \
428 	trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
429 						vcpu, 2, d1, d2, 0, 0, 0)
430 #define KVMTRACE_1D(evt, vcpu, d1, name) \
431 	trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
432 						vcpu, 1, d1, 0, 0, 0, 0)
433 #define KVMTRACE_0D(evt, vcpu, name) \
434 	trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
435 						vcpu, 0, 0, 0, 0, 0, 0)
436 
437 #ifdef CONFIG_KVM_TRACE
438 int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg);
439 void kvm_trace_cleanup(void);
440 #else
441 static inline
kvm_trace_ioctl(unsigned int ioctl,unsigned long arg)442 int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
443 {
444 	return -EINVAL;
445 }
446 #define kvm_trace_cleanup() ((void)0)
447 #endif
448 
449 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
mmu_notifier_retry(struct kvm_vcpu * vcpu,unsigned long mmu_seq)450 static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
451 {
452 	if (unlikely(vcpu->kvm->mmu_notifier_count))
453 		return 1;
454 	/*
455 	 * Both reads happen under the mmu_lock and both values are
456 	 * modified under mmu_lock, so there's no need of smb_rmb()
457 	 * here in between, otherwise mmu_notifier_count should be
458 	 * read before mmu_notifier_seq, see
459 	 * mmu_notifier_invalidate_range_end write side.
460 	 */
461 	if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
462 		return 1;
463 	return 0;
464 }
465 #endif
466 
467 #endif
468