1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * Copyright (C) 2006 Qumranet, Inc.
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 *
11 * Authors:
12 * Avi Kivity <avi@qumranet.com>
13 * Yaniv Kamay <yaniv@qumranet.com>
14 */
15
16 #include <kvm/iodev.h>
17
18 #include <linux/kvm_host.h>
19 #include <linux/kvm.h>
20 #include <linux/module.h>
21 #include <linux/errno.h>
22 #include <linux/percpu.h>
23 #include <linux/mm.h>
24 #include <linux/miscdevice.h>
25 #include <linux/vmalloc.h>
26 #include <linux/reboot.h>
27 #include <linux/debugfs.h>
28 #include <linux/highmem.h>
29 #include <linux/file.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/cpu.h>
32 #include <linux/sched/signal.h>
33 #include <linux/sched/mm.h>
34 #include <linux/sched/stat.h>
35 #include <linux/cpumask.h>
36 #include <linux/smp.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/profile.h>
39 #include <linux/kvm_para.h>
40 #include <linux/pagemap.h>
41 #include <linux/mman.h>
42 #include <linux/swap.h>
43 #include <linux/bitops.h>
44 #include <linux/spinlock.h>
45 #include <linux/compat.h>
46 #include <linux/srcu.h>
47 #include <linux/hugetlb.h>
48 #include <linux/slab.h>
49 #include <linux/sort.h>
50 #include <linux/bsearch.h>
51 #include <linux/io.h>
52 #include <linux/lockdep.h>
53 #include <linux/kthread.h>
54
55 #include <asm/processor.h>
56 #include <asm/ioctl.h>
57 #include <linux/uaccess.h>
58
59 #include "coalesced_mmio.h"
60 #include "async_pf.h"
61 #include "vfio.h"
62
63 #define CREATE_TRACE_POINTS
64 #include <trace/events/kvm.h>
65
66 /* Worst case buffer size needed for holding an integer. */
67 #define ITOA_MAX_LEN 12
68
69 MODULE_AUTHOR("Qumranet");
70 MODULE_LICENSE("GPL");
71
72 /* Architectures should define their poll value according to the halt latency */
73 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
74 module_param(halt_poll_ns, uint, 0644);
75 EXPORT_SYMBOL_GPL(halt_poll_ns);
76
77 /* Default doubles per-vcpu halt_poll_ns. */
78 unsigned int halt_poll_ns_grow = 2;
79 module_param(halt_poll_ns_grow, uint, 0644);
80 EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
81
82 /* The start value to grow halt_poll_ns from */
83 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
84 module_param(halt_poll_ns_grow_start, uint, 0644);
85 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
86
87 /* Default resets per-vcpu halt_poll_ns . */
88 unsigned int halt_poll_ns_shrink;
89 module_param(halt_poll_ns_shrink, uint, 0644);
90 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
91
92 /*
93 * Ordering of locks:
94 *
95 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
96 */
97
98 DEFINE_MUTEX(kvm_lock);
99 static DEFINE_RAW_SPINLOCK(kvm_count_lock);
100 LIST_HEAD(vm_list);
101
102 static cpumask_var_t cpus_hardware_enabled;
103 static int kvm_usage_count;
104 static atomic_t hardware_enable_failed;
105
106 static struct kmem_cache *kvm_vcpu_cache;
107
108 static __read_mostly struct preempt_ops kvm_preempt_ops;
109 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
110
111 struct dentry *kvm_debugfs_dir;
112 EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
113
114 static int kvm_debugfs_num_entries;
115 static const struct file_operations stat_fops_per_vm;
116
117 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
118 unsigned long arg);
119 #ifdef CONFIG_KVM_COMPAT
120 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
121 unsigned long arg);
122 #define KVM_COMPAT(c) .compat_ioctl = (c)
123 #else
124 /*
125 * For architectures that don't implement a compat infrastructure,
126 * adopt a double line of defense:
127 * - Prevent a compat task from opening /dev/kvm
128 * - If the open has been done by a 64bit task, and the KVM fd
129 * passed to a compat task, let the ioctls fail.
130 */
kvm_no_compat_ioctl(struct file * file,unsigned int ioctl,unsigned long arg)131 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
132 unsigned long arg) { return -EINVAL; }
133
kvm_no_compat_open(struct inode * inode,struct file * file)134 static int kvm_no_compat_open(struct inode *inode, struct file *file)
135 {
136 return is_compat_task() ? -ENODEV : 0;
137 }
138 #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
139 .open = kvm_no_compat_open
140 #endif
141 static int hardware_enable_all(void);
142 static void hardware_disable_all(void);
143
144 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
145
146 __visible bool kvm_rebooting;
147 EXPORT_SYMBOL_GPL(kvm_rebooting);
148
149 #define KVM_EVENT_CREATE_VM 0
150 #define KVM_EVENT_DESTROY_VM 1
151 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
152 static unsigned long long kvm_createvm_count;
153 static unsigned long long kvm_active_vms;
154
kvm_arch_mmu_notifier_invalidate_range(struct kvm * kvm,unsigned long start,unsigned long end)155 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
156 unsigned long start, unsigned long end)
157 {
158 }
159
kvm_is_zone_device_pfn(kvm_pfn_t pfn)160 bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
161 {
162 /*
163 * The metadata used by is_zone_device_page() to determine whether or
164 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
165 * the device has been pinned, e.g. by get_user_pages(). WARN if the
166 * page_count() is zero to help detect bad usage of this helper.
167 */
168 if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn))))
169 return false;
170
171 return is_zone_device_page(pfn_to_page(pfn));
172 }
173
kvm_is_reserved_pfn(kvm_pfn_t pfn)174 bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
175 {
176 /*
177 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
178 * perspective they are "normal" pages, albeit with slightly different
179 * usage rules.
180 */
181 if (pfn_valid(pfn))
182 return PageReserved(pfn_to_page(pfn)) &&
183 !is_zero_pfn(pfn) &&
184 !kvm_is_zone_device_pfn(pfn);
185
186 return true;
187 }
188
kvm_is_transparent_hugepage(kvm_pfn_t pfn)189 bool kvm_is_transparent_hugepage(kvm_pfn_t pfn)
190 {
191 struct page *page = pfn_to_page(pfn);
192
193 if (!PageTransCompoundMap(page))
194 return false;
195
196 return is_transparent_hugepage(compound_head(page));
197 }
198
199 /*
200 * Switches to specified vcpu, until a matching vcpu_put()
201 */
vcpu_load(struct kvm_vcpu * vcpu)202 void vcpu_load(struct kvm_vcpu *vcpu)
203 {
204 int cpu = get_cpu();
205
206 __this_cpu_write(kvm_running_vcpu, vcpu);
207 preempt_notifier_register(&vcpu->preempt_notifier);
208 kvm_arch_vcpu_load(vcpu, cpu);
209 put_cpu();
210 }
211 EXPORT_SYMBOL_GPL(vcpu_load);
212
vcpu_put(struct kvm_vcpu * vcpu)213 void vcpu_put(struct kvm_vcpu *vcpu)
214 {
215 preempt_disable();
216 kvm_arch_vcpu_put(vcpu);
217 preempt_notifier_unregister(&vcpu->preempt_notifier);
218 __this_cpu_write(kvm_running_vcpu, NULL);
219 preempt_enable();
220 }
221 EXPORT_SYMBOL_GPL(vcpu_put);
222
223 /* TODO: merge with kvm_arch_vcpu_should_kick */
kvm_request_needs_ipi(struct kvm_vcpu * vcpu,unsigned req)224 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
225 {
226 int mode = kvm_vcpu_exiting_guest_mode(vcpu);
227
228 /*
229 * We need to wait for the VCPU to reenable interrupts and get out of
230 * READING_SHADOW_PAGE_TABLES mode.
231 */
232 if (req & KVM_REQUEST_WAIT)
233 return mode != OUTSIDE_GUEST_MODE;
234
235 /*
236 * Need to kick a running VCPU, but otherwise there is nothing to do.
237 */
238 return mode == IN_GUEST_MODE;
239 }
240
ack_flush(void * _completed)241 static void ack_flush(void *_completed)
242 {
243 }
244
kvm_kick_many_cpus(const struct cpumask * cpus,bool wait)245 static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
246 {
247 if (unlikely(!cpus))
248 cpus = cpu_online_mask;
249
250 if (cpumask_empty(cpus))
251 return false;
252
253 smp_call_function_many(cpus, ack_flush, NULL, wait);
254 return true;
255 }
256
kvm_make_vcpus_request_mask(struct kvm * kvm,unsigned int req,struct kvm_vcpu * except,unsigned long * vcpu_bitmap,cpumask_var_t tmp)257 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
258 struct kvm_vcpu *except,
259 unsigned long *vcpu_bitmap, cpumask_var_t tmp)
260 {
261 int i, cpu, me;
262 struct kvm_vcpu *vcpu;
263 bool called;
264
265 me = get_cpu();
266
267 kvm_for_each_vcpu(i, vcpu, kvm) {
268 if ((vcpu_bitmap && !test_bit(i, vcpu_bitmap)) ||
269 vcpu == except)
270 continue;
271
272 kvm_make_request(req, vcpu);
273 cpu = vcpu->cpu;
274
275 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
276 continue;
277
278 if (tmp != NULL && cpu != -1 && cpu != me &&
279 kvm_request_needs_ipi(vcpu, req))
280 __cpumask_set_cpu(cpu, tmp);
281 }
282
283 called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
284 put_cpu();
285
286 return called;
287 }
288
kvm_make_all_cpus_request_except(struct kvm * kvm,unsigned int req,struct kvm_vcpu * except)289 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
290 struct kvm_vcpu *except)
291 {
292 cpumask_var_t cpus;
293 bool called;
294
295 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
296
297 called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus);
298
299 free_cpumask_var(cpus);
300 return called;
301 }
302
kvm_make_all_cpus_request(struct kvm * kvm,unsigned int req)303 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
304 {
305 return kvm_make_all_cpus_request_except(kvm, req, NULL);
306 }
307
308 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
kvm_flush_remote_tlbs(struct kvm * kvm)309 void kvm_flush_remote_tlbs(struct kvm *kvm)
310 {
311 /*
312 * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
313 * kvm_make_all_cpus_request.
314 */
315 long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
316
317 /*
318 * We want to publish modifications to the page tables before reading
319 * mode. Pairs with a memory barrier in arch-specific code.
320 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
321 * and smp_mb in walk_shadow_page_lockless_begin/end.
322 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
323 *
324 * There is already an smp_mb__after_atomic() before
325 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
326 * barrier here.
327 */
328 if (!kvm_arch_flush_remote_tlb(kvm)
329 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
330 ++kvm->stat.remote_tlb_flush;
331 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
332 }
333 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
334 #endif
335
kvm_reload_remote_mmus(struct kvm * kvm)336 void kvm_reload_remote_mmus(struct kvm *kvm)
337 {
338 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
339 }
340
341 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache * mc,gfp_t gfp_flags)342 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
343 gfp_t gfp_flags)
344 {
345 gfp_flags |= mc->gfp_zero;
346
347 if (mc->kmem_cache)
348 return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
349 else
350 return (void *)__get_free_page(gfp_flags);
351 }
352
kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache * mc,int min)353 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
354 {
355 void *obj;
356
357 if (mc->nobjs >= min)
358 return 0;
359 while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
360 obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT);
361 if (!obj)
362 return mc->nobjs >= min ? 0 : -ENOMEM;
363 mc->objects[mc->nobjs++] = obj;
364 }
365 return 0;
366 }
367
kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache * mc)368 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
369 {
370 return mc->nobjs;
371 }
372
kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache * mc)373 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
374 {
375 while (mc->nobjs) {
376 if (mc->kmem_cache)
377 kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
378 else
379 free_page((unsigned long)mc->objects[--mc->nobjs]);
380 }
381 }
382
kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache * mc)383 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
384 {
385 void *p;
386
387 if (WARN_ON(!mc->nobjs))
388 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
389 else
390 p = mc->objects[--mc->nobjs];
391 BUG_ON(!p);
392 return p;
393 }
394 #endif
395
kvm_vcpu_init(struct kvm_vcpu * vcpu,struct kvm * kvm,unsigned id)396 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
397 {
398 mutex_init(&vcpu->mutex);
399 vcpu->cpu = -1;
400 vcpu->kvm = kvm;
401 vcpu->vcpu_id = id;
402 vcpu->pid = NULL;
403 rcuwait_init(&vcpu->wait);
404 kvm_async_pf_vcpu_init(vcpu);
405
406 vcpu->pre_pcpu = -1;
407 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);
408
409 kvm_vcpu_set_in_spin_loop(vcpu, false);
410 kvm_vcpu_set_dy_eligible(vcpu, false);
411 vcpu->preempted = false;
412 vcpu->ready = false;
413 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
414 }
415
kvm_vcpu_destroy(struct kvm_vcpu * vcpu)416 void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
417 {
418 kvm_arch_vcpu_destroy(vcpu);
419
420 /*
421 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
422 * the vcpu->pid pointer, and at destruction time all file descriptors
423 * are already gone.
424 */
425 put_pid(rcu_dereference_protected(vcpu->pid, 1));
426
427 free_page((unsigned long)vcpu->run);
428 kmem_cache_free(kvm_vcpu_cache, vcpu);
429 }
430 EXPORT_SYMBOL_GPL(kvm_vcpu_destroy);
431
432 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_to_kvm(struct mmu_notifier * mn)433 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
434 {
435 return container_of(mn, struct kvm, mmu_notifier);
436 }
437
kvm_mmu_notifier_invalidate_range(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)438 static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
439 struct mm_struct *mm,
440 unsigned long start, unsigned long end)
441 {
442 struct kvm *kvm = mmu_notifier_to_kvm(mn);
443 int idx;
444
445 idx = srcu_read_lock(&kvm->srcu);
446 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
447 srcu_read_unlock(&kvm->srcu, idx);
448 }
449
kvm_mmu_notifier_change_pte(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long address,pte_t pte)450 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
451 struct mm_struct *mm,
452 unsigned long address,
453 pte_t pte)
454 {
455 struct kvm *kvm = mmu_notifier_to_kvm(mn);
456 int idx;
457
458 idx = srcu_read_lock(&kvm->srcu);
459 spin_lock(&kvm->mmu_lock);
460 kvm->mmu_notifier_seq++;
461
462 if (kvm_set_spte_hva(kvm, address, pte))
463 kvm_flush_remote_tlbs(kvm);
464
465 spin_unlock(&kvm->mmu_lock);
466 srcu_read_unlock(&kvm->srcu, idx);
467 }
468
kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier * mn,const struct mmu_notifier_range * range)469 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
470 const struct mmu_notifier_range *range)
471 {
472 struct kvm *kvm = mmu_notifier_to_kvm(mn);
473 int need_tlb_flush = 0, idx;
474
475 idx = srcu_read_lock(&kvm->srcu);
476 spin_lock(&kvm->mmu_lock);
477 /*
478 * The count increase must become visible at unlock time as no
479 * spte can be established without taking the mmu_lock and
480 * count is also read inside the mmu_lock critical section.
481 */
482 kvm->mmu_notifier_count++;
483 need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end,
484 range->flags);
485 /* we've to flush the tlb before the pages can be freed */
486 if (need_tlb_flush || kvm->tlbs_dirty)
487 kvm_flush_remote_tlbs(kvm);
488
489 spin_unlock(&kvm->mmu_lock);
490 srcu_read_unlock(&kvm->srcu, idx);
491
492 return 0;
493 }
494
kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier * mn,const struct mmu_notifier_range * range)495 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
496 const struct mmu_notifier_range *range)
497 {
498 struct kvm *kvm = mmu_notifier_to_kvm(mn);
499
500 spin_lock(&kvm->mmu_lock);
501 /*
502 * This sequence increase will notify the kvm page fault that
503 * the page that is going to be mapped in the spte could have
504 * been freed.
505 */
506 kvm->mmu_notifier_seq++;
507 smp_wmb();
508 /*
509 * The above sequence increase must be visible before the
510 * below count decrease, which is ensured by the smp_wmb above
511 * in conjunction with the smp_rmb in mmu_notifier_retry().
512 */
513 kvm->mmu_notifier_count--;
514 spin_unlock(&kvm->mmu_lock);
515
516 BUG_ON(kvm->mmu_notifier_count < 0);
517 }
518
kvm_mmu_notifier_clear_flush_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)519 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
520 struct mm_struct *mm,
521 unsigned long start,
522 unsigned long end)
523 {
524 struct kvm *kvm = mmu_notifier_to_kvm(mn);
525 int young, idx;
526
527 idx = srcu_read_lock(&kvm->srcu);
528 spin_lock(&kvm->mmu_lock);
529
530 young = kvm_age_hva(kvm, start, end);
531 if (young)
532 kvm_flush_remote_tlbs(kvm);
533
534 spin_unlock(&kvm->mmu_lock);
535 srcu_read_unlock(&kvm->srcu, idx);
536
537 return young;
538 }
539
kvm_mmu_notifier_clear_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)540 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
541 struct mm_struct *mm,
542 unsigned long start,
543 unsigned long end)
544 {
545 struct kvm *kvm = mmu_notifier_to_kvm(mn);
546 int young, idx;
547
548 idx = srcu_read_lock(&kvm->srcu);
549 spin_lock(&kvm->mmu_lock);
550 /*
551 * Even though we do not flush TLB, this will still adversely
552 * affect performance on pre-Haswell Intel EPT, where there is
553 * no EPT Access Bit to clear so that we have to tear down EPT
554 * tables instead. If we find this unacceptable, we can always
555 * add a parameter to kvm_age_hva so that it effectively doesn't
556 * do anything on clear_young.
557 *
558 * Also note that currently we never issue secondary TLB flushes
559 * from clear_young, leaving this job up to the regular system
560 * cadence. If we find this inaccurate, we might come up with a
561 * more sophisticated heuristic later.
562 */
563 young = kvm_age_hva(kvm, start, end);
564 spin_unlock(&kvm->mmu_lock);
565 srcu_read_unlock(&kvm->srcu, idx);
566
567 return young;
568 }
569
kvm_mmu_notifier_test_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long address)570 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
571 struct mm_struct *mm,
572 unsigned long address)
573 {
574 struct kvm *kvm = mmu_notifier_to_kvm(mn);
575 int young, idx;
576
577 idx = srcu_read_lock(&kvm->srcu);
578 spin_lock(&kvm->mmu_lock);
579 young = kvm_test_age_hva(kvm, address);
580 spin_unlock(&kvm->mmu_lock);
581 srcu_read_unlock(&kvm->srcu, idx);
582
583 return young;
584 }
585
kvm_mmu_notifier_release(struct mmu_notifier * mn,struct mm_struct * mm)586 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
587 struct mm_struct *mm)
588 {
589 struct kvm *kvm = mmu_notifier_to_kvm(mn);
590 int idx;
591
592 idx = srcu_read_lock(&kvm->srcu);
593 kvm_arch_flush_shadow_all(kvm);
594 srcu_read_unlock(&kvm->srcu, idx);
595 }
596
597 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
598 .invalidate_range = kvm_mmu_notifier_invalidate_range,
599 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
600 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
601 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
602 .clear_young = kvm_mmu_notifier_clear_young,
603 .test_young = kvm_mmu_notifier_test_young,
604 .change_pte = kvm_mmu_notifier_change_pte,
605 .release = kvm_mmu_notifier_release,
606 };
607
kvm_init_mmu_notifier(struct kvm * kvm)608 static int kvm_init_mmu_notifier(struct kvm *kvm)
609 {
610 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
611 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
612 }
613
614 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
615
kvm_init_mmu_notifier(struct kvm * kvm)616 static int kvm_init_mmu_notifier(struct kvm *kvm)
617 {
618 return 0;
619 }
620
621 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
622
kvm_alloc_memslots(void)623 static struct kvm_memslots *kvm_alloc_memslots(void)
624 {
625 int i;
626 struct kvm_memslots *slots;
627
628 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
629 if (!slots)
630 return NULL;
631
632 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
633 slots->id_to_index[i] = -1;
634
635 return slots;
636 }
637
kvm_destroy_dirty_bitmap(struct kvm_memory_slot * memslot)638 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
639 {
640 if (!memslot->dirty_bitmap)
641 return;
642
643 kvfree(memslot->dirty_bitmap);
644 memslot->dirty_bitmap = NULL;
645 }
646
kvm_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)647 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
648 {
649 kvm_destroy_dirty_bitmap(slot);
650
651 kvm_arch_free_memslot(kvm, slot);
652
653 slot->flags = 0;
654 slot->npages = 0;
655 }
656
kvm_free_memslots(struct kvm * kvm,struct kvm_memslots * slots)657 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
658 {
659 struct kvm_memory_slot *memslot;
660
661 if (!slots)
662 return;
663
664 kvm_for_each_memslot(memslot, slots)
665 kvm_free_memslot(kvm, memslot);
666
667 kvfree(slots);
668 }
669
kvm_destroy_vm_debugfs(struct kvm * kvm)670 static void kvm_destroy_vm_debugfs(struct kvm *kvm)
671 {
672 int i;
673
674 if (!kvm->debugfs_dentry)
675 return;
676
677 debugfs_remove_recursive(kvm->debugfs_dentry);
678
679 if (kvm->debugfs_stat_data) {
680 for (i = 0; i < kvm_debugfs_num_entries; i++)
681 kfree(kvm->debugfs_stat_data[i]);
682 kfree(kvm->debugfs_stat_data);
683 }
684 }
685
kvm_create_vm_debugfs(struct kvm * kvm,int fd)686 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
687 {
688 static DEFINE_MUTEX(kvm_debugfs_lock);
689 struct dentry *dent;
690 char dir_name[ITOA_MAX_LEN * 2];
691 struct kvm_stat_data *stat_data;
692 struct kvm_stats_debugfs_item *p;
693
694 if (!debugfs_initialized())
695 return 0;
696
697 snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
698 mutex_lock(&kvm_debugfs_lock);
699 dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
700 if (dent) {
701 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
702 dput(dent);
703 mutex_unlock(&kvm_debugfs_lock);
704 return 0;
705 }
706 dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
707 mutex_unlock(&kvm_debugfs_lock);
708 if (IS_ERR(dent))
709 return 0;
710
711 kvm->debugfs_dentry = dent;
712 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
713 sizeof(*kvm->debugfs_stat_data),
714 GFP_KERNEL_ACCOUNT);
715 if (!kvm->debugfs_stat_data)
716 return -ENOMEM;
717
718 for (p = debugfs_entries; p->name; p++) {
719 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
720 if (!stat_data)
721 return -ENOMEM;
722
723 stat_data->kvm = kvm;
724 stat_data->dbgfs_item = p;
725 kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
726 debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p),
727 kvm->debugfs_dentry, stat_data,
728 &stat_fops_per_vm);
729 }
730 return 0;
731 }
732
733 /*
734 * Called after the VM is otherwise initialized, but just before adding it to
735 * the vm_list.
736 */
kvm_arch_post_init_vm(struct kvm * kvm)737 int __weak kvm_arch_post_init_vm(struct kvm *kvm)
738 {
739 return 0;
740 }
741
742 /*
743 * Called just after removing the VM from the vm_list, but before doing any
744 * other destruction.
745 */
kvm_arch_pre_destroy_vm(struct kvm * kvm)746 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
747 {
748 }
749
kvm_create_vm(unsigned long type)750 static struct kvm *kvm_create_vm(unsigned long type)
751 {
752 struct kvm *kvm = kvm_arch_alloc_vm();
753 int r = -ENOMEM;
754 int i;
755
756 if (!kvm)
757 return ERR_PTR(-ENOMEM);
758
759 spin_lock_init(&kvm->mmu_lock);
760 mmgrab(current->mm);
761 kvm->mm = current->mm;
762 kvm_eventfd_init(kvm);
763 mutex_init(&kvm->lock);
764 mutex_init(&kvm->irq_lock);
765 mutex_init(&kvm->slots_lock);
766 INIT_LIST_HEAD(&kvm->devices);
767
768 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
769
770 if (init_srcu_struct(&kvm->srcu))
771 goto out_err_no_srcu;
772 if (init_srcu_struct(&kvm->irq_srcu))
773 goto out_err_no_irq_srcu;
774
775 refcount_set(&kvm->users_count, 1);
776 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
777 struct kvm_memslots *slots = kvm_alloc_memslots();
778
779 if (!slots)
780 goto out_err_no_arch_destroy_vm;
781 /* Generations must be different for each address space. */
782 slots->generation = i;
783 rcu_assign_pointer(kvm->memslots[i], slots);
784 }
785
786 for (i = 0; i < KVM_NR_BUSES; i++) {
787 rcu_assign_pointer(kvm->buses[i],
788 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
789 if (!kvm->buses[i])
790 goto out_err_no_arch_destroy_vm;
791 }
792
793 kvm->max_halt_poll_ns = halt_poll_ns;
794
795 r = kvm_arch_init_vm(kvm, type);
796 if (r)
797 goto out_err_no_arch_destroy_vm;
798
799 r = hardware_enable_all();
800 if (r)
801 goto out_err_no_disable;
802
803 #ifdef CONFIG_HAVE_KVM_IRQFD
804 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
805 #endif
806
807 r = kvm_init_mmu_notifier(kvm);
808 if (r)
809 goto out_err_no_mmu_notifier;
810
811 r = kvm_arch_post_init_vm(kvm);
812 if (r)
813 goto out_err;
814
815 mutex_lock(&kvm_lock);
816 list_add(&kvm->vm_list, &vm_list);
817 mutex_unlock(&kvm_lock);
818
819 preempt_notifier_inc();
820
821 return kvm;
822
823 out_err:
824 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
825 if (kvm->mmu_notifier.ops)
826 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
827 #endif
828 out_err_no_mmu_notifier:
829 hardware_disable_all();
830 out_err_no_disable:
831 kvm_arch_destroy_vm(kvm);
832 out_err_no_arch_destroy_vm:
833 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
834 for (i = 0; i < KVM_NR_BUSES; i++)
835 kfree(kvm_get_bus(kvm, i));
836 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
837 kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
838 cleanup_srcu_struct(&kvm->irq_srcu);
839 out_err_no_irq_srcu:
840 cleanup_srcu_struct(&kvm->srcu);
841 out_err_no_srcu:
842 kvm_arch_free_vm(kvm);
843 mmdrop(current->mm);
844 return ERR_PTR(r);
845 }
846
kvm_destroy_devices(struct kvm * kvm)847 static void kvm_destroy_devices(struct kvm *kvm)
848 {
849 struct kvm_device *dev, *tmp;
850
851 /*
852 * We do not need to take the kvm->lock here, because nobody else
853 * has a reference to the struct kvm at this point and therefore
854 * cannot access the devices list anyhow.
855 */
856 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
857 list_del(&dev->vm_node);
858 dev->ops->destroy(dev);
859 }
860 }
861
kvm_destroy_vm(struct kvm * kvm)862 static void kvm_destroy_vm(struct kvm *kvm)
863 {
864 int i;
865 struct mm_struct *mm = kvm->mm;
866
867 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
868 kvm_destroy_vm_debugfs(kvm);
869 kvm_arch_sync_events(kvm);
870 mutex_lock(&kvm_lock);
871 list_del(&kvm->vm_list);
872 mutex_unlock(&kvm_lock);
873 kvm_arch_pre_destroy_vm(kvm);
874
875 kvm_free_irq_routing(kvm);
876 for (i = 0; i < KVM_NR_BUSES; i++) {
877 struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
878
879 if (bus)
880 kvm_io_bus_destroy(bus);
881 kvm->buses[i] = NULL;
882 }
883 kvm_coalesced_mmio_free(kvm);
884 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
885 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
886 #else
887 kvm_arch_flush_shadow_all(kvm);
888 #endif
889 kvm_arch_destroy_vm(kvm);
890 kvm_destroy_devices(kvm);
891 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
892 kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
893 cleanup_srcu_struct(&kvm->irq_srcu);
894 cleanup_srcu_struct(&kvm->srcu);
895 kvm_arch_free_vm(kvm);
896 preempt_notifier_dec();
897 hardware_disable_all();
898 mmdrop(mm);
899 }
900
kvm_get_kvm(struct kvm * kvm)901 void kvm_get_kvm(struct kvm *kvm)
902 {
903 refcount_inc(&kvm->users_count);
904 }
905 EXPORT_SYMBOL_GPL(kvm_get_kvm);
906
kvm_put_kvm(struct kvm * kvm)907 void kvm_put_kvm(struct kvm *kvm)
908 {
909 if (refcount_dec_and_test(&kvm->users_count))
910 kvm_destroy_vm(kvm);
911 }
912 EXPORT_SYMBOL_GPL(kvm_put_kvm);
913
914 /*
915 * Used to put a reference that was taken on behalf of an object associated
916 * with a user-visible file descriptor, e.g. a vcpu or device, if installation
917 * of the new file descriptor fails and the reference cannot be transferred to
918 * its final owner. In such cases, the caller is still actively using @kvm and
919 * will fail miserably if the refcount unexpectedly hits zero.
920 */
kvm_put_kvm_no_destroy(struct kvm * kvm)921 void kvm_put_kvm_no_destroy(struct kvm *kvm)
922 {
923 WARN_ON(refcount_dec_and_test(&kvm->users_count));
924 }
925 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
926
kvm_vm_release(struct inode * inode,struct file * filp)927 static int kvm_vm_release(struct inode *inode, struct file *filp)
928 {
929 struct kvm *kvm = filp->private_data;
930
931 kvm_irqfd_release(kvm);
932
933 kvm_put_kvm(kvm);
934 return 0;
935 }
936
937 /*
938 * Allocation size is twice as large as the actual dirty bitmap size.
939 * See kvm_vm_ioctl_get_dirty_log() why this is needed.
940 */
kvm_alloc_dirty_bitmap(struct kvm_memory_slot * memslot)941 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
942 {
943 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
944
945 memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT);
946 if (!memslot->dirty_bitmap)
947 return -ENOMEM;
948
949 return 0;
950 }
951
952 /*
953 * Delete a memslot by decrementing the number of used slots and shifting all
954 * other entries in the array forward one spot.
955 */
kvm_memslot_delete(struct kvm_memslots * slots,struct kvm_memory_slot * memslot)956 static inline void kvm_memslot_delete(struct kvm_memslots *slots,
957 struct kvm_memory_slot *memslot)
958 {
959 struct kvm_memory_slot *mslots = slots->memslots;
960 int i;
961
962 if (WARN_ON(slots->id_to_index[memslot->id] == -1))
963 return;
964
965 slots->used_slots--;
966
967 if (atomic_read(&slots->lru_slot) >= slots->used_slots)
968 atomic_set(&slots->lru_slot, 0);
969
970 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) {
971 mslots[i] = mslots[i + 1];
972 slots->id_to_index[mslots[i].id] = i;
973 }
974 mslots[i] = *memslot;
975 slots->id_to_index[memslot->id] = -1;
976 }
977
978 /*
979 * "Insert" a new memslot by incrementing the number of used slots. Returns
980 * the new slot's initial index into the memslots array.
981 */
kvm_memslot_insert_back(struct kvm_memslots * slots)982 static inline int kvm_memslot_insert_back(struct kvm_memslots *slots)
983 {
984 return slots->used_slots++;
985 }
986
987 /*
988 * Move a changed memslot backwards in the array by shifting existing slots
989 * with a higher GFN toward the front of the array. Note, the changed memslot
990 * itself is not preserved in the array, i.e. not swapped at this time, only
991 * its new index into the array is tracked. Returns the changed memslot's
992 * current index into the memslots array.
993 */
kvm_memslot_move_backward(struct kvm_memslots * slots,struct kvm_memory_slot * memslot)994 static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
995 struct kvm_memory_slot *memslot)
996 {
997 struct kvm_memory_slot *mslots = slots->memslots;
998 int i;
999
1000 if (WARN_ON_ONCE(slots->id_to_index[memslot->id] == -1) ||
1001 WARN_ON_ONCE(!slots->used_slots))
1002 return -1;
1003
1004 /*
1005 * Move the target memslot backward in the array by shifting existing
1006 * memslots with a higher GFN (than the target memslot) towards the
1007 * front of the array.
1008 */
1009 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) {
1010 if (memslot->base_gfn > mslots[i + 1].base_gfn)
1011 break;
1012
1013 WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn);
1014
1015 /* Shift the next memslot forward one and update its index. */
1016 mslots[i] = mslots[i + 1];
1017 slots->id_to_index[mslots[i].id] = i;
1018 }
1019 return i;
1020 }
1021
1022 /*
1023 * Move a changed memslot forwards in the array by shifting existing slots with
1024 * a lower GFN toward the back of the array. Note, the changed memslot itself
1025 * is not preserved in the array, i.e. not swapped at this time, only its new
1026 * index into the array is tracked. Returns the changed memslot's final index
1027 * into the memslots array.
1028 */
kvm_memslot_move_forward(struct kvm_memslots * slots,struct kvm_memory_slot * memslot,int start)1029 static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
1030 struct kvm_memory_slot *memslot,
1031 int start)
1032 {
1033 struct kvm_memory_slot *mslots = slots->memslots;
1034 int i;
1035
1036 for (i = start; i > 0; i--) {
1037 if (memslot->base_gfn < mslots[i - 1].base_gfn)
1038 break;
1039
1040 WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn);
1041
1042 /* Shift the next memslot back one and update its index. */
1043 mslots[i] = mslots[i - 1];
1044 slots->id_to_index[mslots[i].id] = i;
1045 }
1046 return i;
1047 }
1048
1049 /*
1050 * Re-sort memslots based on their GFN to account for an added, deleted, or
1051 * moved memslot. Sorting memslots by GFN allows using a binary search during
1052 * memslot lookup.
1053 *
1054 * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! I.e. the entry
1055 * at memslots[0] has the highest GFN.
1056 *
1057 * The sorting algorithm takes advantage of having initially sorted memslots
1058 * and knowing the position of the changed memslot. Sorting is also optimized
1059 * by not swapping the updated memslot and instead only shifting other memslots
1060 * and tracking the new index for the update memslot. Only once its final
1061 * index is known is the updated memslot copied into its position in the array.
1062 *
1063 * - When deleting a memslot, the deleted memslot simply needs to be moved to
1064 * the end of the array.
1065 *
1066 * - When creating a memslot, the algorithm "inserts" the new memslot at the
1067 * end of the array and then it forward to its correct location.
1068 *
1069 * - When moving a memslot, the algorithm first moves the updated memslot
1070 * backward to handle the scenario where the memslot's GFN was changed to a
1071 * lower value. update_memslots() then falls through and runs the same flow
1072 * as creating a memslot to move the memslot forward to handle the scenario
1073 * where its GFN was changed to a higher value.
1074 *
1075 * Note, slots are sorted from highest->lowest instead of lowest->highest for
1076 * historical reasons. Originally, invalid memslots where denoted by having
1077 * GFN=0, thus sorting from highest->lowest naturally sorted invalid memslots
1078 * to the end of the array. The current algorithm uses dedicated logic to
1079 * delete a memslot and thus does not rely on invalid memslots having GFN=0.
1080 *
1081 * The other historical motiviation for highest->lowest was to improve the
1082 * performance of memslot lookup. KVM originally used a linear search starting
1083 * at memslots[0]. On x86, the largest memslot usually has one of the highest,
1084 * if not *the* highest, GFN, as the bulk of the guest's RAM is located in a
1085 * single memslot above the 4gb boundary. As the largest memslot is also the
1086 * most likely to be referenced, sorting it to the front of the array was
1087 * advantageous. The current binary search starts from the middle of the array
1088 * and uses an LRU pointer to improve performance for all memslots and GFNs.
1089 */
update_memslots(struct kvm_memslots * slots,struct kvm_memory_slot * memslot,enum kvm_mr_change change)1090 static void update_memslots(struct kvm_memslots *slots,
1091 struct kvm_memory_slot *memslot,
1092 enum kvm_mr_change change)
1093 {
1094 int i;
1095
1096 if (change == KVM_MR_DELETE) {
1097 kvm_memslot_delete(slots, memslot);
1098 } else {
1099 if (change == KVM_MR_CREATE)
1100 i = kvm_memslot_insert_back(slots);
1101 else
1102 i = kvm_memslot_move_backward(slots, memslot);
1103 i = kvm_memslot_move_forward(slots, memslot, i);
1104
1105 /*
1106 * Copy the memslot to its new position in memslots and update
1107 * its index accordingly.
1108 */
1109 slots->memslots[i] = *memslot;
1110 slots->id_to_index[memslot->id] = i;
1111 }
1112 }
1113
check_memory_region_flags(const struct kvm_userspace_memory_region * mem)1114 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
1115 {
1116 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1117
1118 #ifdef __KVM_HAVE_READONLY_MEM
1119 valid_flags |= KVM_MEM_READONLY;
1120 #endif
1121
1122 if (mem->flags & ~valid_flags)
1123 return -EINVAL;
1124
1125 return 0;
1126 }
1127
install_new_memslots(struct kvm * kvm,int as_id,struct kvm_memslots * slots)1128 static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
1129 int as_id, struct kvm_memslots *slots)
1130 {
1131 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
1132 u64 gen = old_memslots->generation;
1133
1134 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1135 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1136
1137 rcu_assign_pointer(kvm->memslots[as_id], slots);
1138 synchronize_srcu_expedited(&kvm->srcu);
1139
1140 /*
1141 * Increment the new memslot generation a second time, dropping the
1142 * update in-progress flag and incrementing the generation based on
1143 * the number of address spaces. This provides a unique and easily
1144 * identifiable generation number while the memslots are in flux.
1145 */
1146 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1147
1148 /*
1149 * Generations must be unique even across address spaces. We do not need
1150 * a global counter for that, instead the generation space is evenly split
1151 * across address spaces. For example, with two address spaces, address
1152 * space 0 will use generations 0, 2, 4, ... while address space 1 will
1153 * use generations 1, 3, 5, ...
1154 */
1155 gen += KVM_ADDRESS_SPACE_NUM;
1156
1157 kvm_arch_memslots_updated(kvm, gen);
1158
1159 slots->generation = gen;
1160
1161 return old_memslots;
1162 }
1163
1164 /*
1165 * Note, at a minimum, the current number of used slots must be allocated, even
1166 * when deleting a memslot, as we need a complete duplicate of the memslots for
1167 * use when invalidating a memslot prior to deleting/moving the memslot.
1168 */
kvm_dup_memslots(struct kvm_memslots * old,enum kvm_mr_change change)1169 static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
1170 enum kvm_mr_change change)
1171 {
1172 struct kvm_memslots *slots;
1173 size_t old_size, new_size;
1174
1175 old_size = sizeof(struct kvm_memslots) +
1176 (sizeof(struct kvm_memory_slot) * old->used_slots);
1177
1178 if (change == KVM_MR_CREATE)
1179 new_size = old_size + sizeof(struct kvm_memory_slot);
1180 else
1181 new_size = old_size;
1182
1183 slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
1184 if (likely(slots))
1185 memcpy(slots, old, old_size);
1186
1187 return slots;
1188 }
1189
kvm_set_memslot(struct kvm * kvm,const struct kvm_userspace_memory_region * mem,struct kvm_memory_slot * old,struct kvm_memory_slot * new,int as_id,enum kvm_mr_change change)1190 static int kvm_set_memslot(struct kvm *kvm,
1191 const struct kvm_userspace_memory_region *mem,
1192 struct kvm_memory_slot *old,
1193 struct kvm_memory_slot *new, int as_id,
1194 enum kvm_mr_change change)
1195 {
1196 struct kvm_memory_slot *slot;
1197 struct kvm_memslots *slots;
1198 int r;
1199
1200 slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change);
1201 if (!slots)
1202 return -ENOMEM;
1203
1204 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1205 /*
1206 * Note, the INVALID flag needs to be in the appropriate entry
1207 * in the freshly allocated memslots, not in @old or @new.
1208 */
1209 slot = id_to_memslot(slots, old->id);
1210 slot->flags |= KVM_MEMSLOT_INVALID;
1211
1212 /*
1213 * We can re-use the old memslots, the only difference from the
1214 * newly installed memslots is the invalid flag, which will get
1215 * dropped by update_memslots anyway. We'll also revert to the
1216 * old memslots if preparing the new memory region fails.
1217 */
1218 slots = install_new_memslots(kvm, as_id, slots);
1219
1220 /* From this point no new shadow pages pointing to a deleted,
1221 * or moved, memslot will be created.
1222 *
1223 * validation of sp->gfn happens in:
1224 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1225 * - kvm_is_visible_gfn (mmu_check_root)
1226 */
1227 kvm_arch_flush_shadow_memslot(kvm, slot);
1228 }
1229
1230 r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
1231 if (r)
1232 goto out_slots;
1233
1234 update_memslots(slots, new, change);
1235 slots = install_new_memslots(kvm, as_id, slots);
1236
1237 kvm_arch_commit_memory_region(kvm, mem, old, new, change);
1238
1239 kvfree(slots);
1240 return 0;
1241
1242 out_slots:
1243 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1244 slots = install_new_memslots(kvm, as_id, slots);
1245 kvfree(slots);
1246 return r;
1247 }
1248
kvm_delete_memslot(struct kvm * kvm,const struct kvm_userspace_memory_region * mem,struct kvm_memory_slot * old,int as_id)1249 static int kvm_delete_memslot(struct kvm *kvm,
1250 const struct kvm_userspace_memory_region *mem,
1251 struct kvm_memory_slot *old, int as_id)
1252 {
1253 struct kvm_memory_slot new;
1254 int r;
1255
1256 if (!old->npages)
1257 return -EINVAL;
1258
1259 memset(&new, 0, sizeof(new));
1260 new.id = old->id;
1261 /*
1262 * This is only for debugging purpose; it should never be referenced
1263 * for a removed memslot.
1264 */
1265 new.as_id = as_id;
1266
1267 r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
1268 if (r)
1269 return r;
1270
1271 kvm_free_memslot(kvm, old);
1272 return 0;
1273 }
1274
1275 /*
1276 * Allocate some memory and give it an address in the guest physical address
1277 * space.
1278 *
1279 * Discontiguous memory is allowed, mostly for framebuffers.
1280 *
1281 * Must be called holding kvm->slots_lock for write.
1282 */
__kvm_set_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region * mem)1283 int __kvm_set_memory_region(struct kvm *kvm,
1284 const struct kvm_userspace_memory_region *mem)
1285 {
1286 struct kvm_memory_slot old, new;
1287 struct kvm_memory_slot *tmp;
1288 enum kvm_mr_change change;
1289 int as_id, id;
1290 int r;
1291
1292 r = check_memory_region_flags(mem);
1293 if (r)
1294 return r;
1295
1296 as_id = mem->slot >> 16;
1297 id = (u16)mem->slot;
1298
1299 /* General sanity checks */
1300 if (mem->memory_size & (PAGE_SIZE - 1))
1301 return -EINVAL;
1302 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
1303 return -EINVAL;
1304 /* We can read the guest memory with __xxx_user() later on. */
1305 if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
1306 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
1307 !access_ok((void __user *)(unsigned long)mem->userspace_addr,
1308 mem->memory_size))
1309 return -EINVAL;
1310 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
1311 return -EINVAL;
1312 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
1313 return -EINVAL;
1314
1315 /*
1316 * Make a full copy of the old memslot, the pointer will become stale
1317 * when the memslots are re-sorted by update_memslots(), and the old
1318 * memslot needs to be referenced after calling update_memslots(), e.g.
1319 * to free its resources and for arch specific behavior.
1320 */
1321 tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id);
1322 if (tmp) {
1323 old = *tmp;
1324 tmp = NULL;
1325 } else {
1326 memset(&old, 0, sizeof(old));
1327 old.id = id;
1328 }
1329
1330 if (!mem->memory_size)
1331 return kvm_delete_memslot(kvm, mem, &old, as_id);
1332
1333 new.as_id = as_id;
1334 new.id = id;
1335 new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
1336 new.npages = mem->memory_size >> PAGE_SHIFT;
1337 new.flags = mem->flags;
1338 new.userspace_addr = mem->userspace_addr;
1339
1340 if (new.npages > KVM_MEM_MAX_NR_PAGES)
1341 return -EINVAL;
1342
1343 if (!old.npages) {
1344 change = KVM_MR_CREATE;
1345 new.dirty_bitmap = NULL;
1346 memset(&new.arch, 0, sizeof(new.arch));
1347 } else { /* Modify an existing slot. */
1348 if ((new.userspace_addr != old.userspace_addr) ||
1349 (new.npages != old.npages) ||
1350 ((new.flags ^ old.flags) & KVM_MEM_READONLY))
1351 return -EINVAL;
1352
1353 if (new.base_gfn != old.base_gfn)
1354 change = KVM_MR_MOVE;
1355 else if (new.flags != old.flags)
1356 change = KVM_MR_FLAGS_ONLY;
1357 else /* Nothing to change. */
1358 return 0;
1359
1360 /* Copy dirty_bitmap and arch from the current memslot. */
1361 new.dirty_bitmap = old.dirty_bitmap;
1362 memcpy(&new.arch, &old.arch, sizeof(new.arch));
1363 }
1364
1365 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
1366 /* Check for overlaps */
1367 kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) {
1368 if (tmp->id == id)
1369 continue;
1370 if (!((new.base_gfn + new.npages <= tmp->base_gfn) ||
1371 (new.base_gfn >= tmp->base_gfn + tmp->npages)))
1372 return -EEXIST;
1373 }
1374 }
1375
1376 /* Allocate/free page dirty bitmap as needed */
1377 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
1378 new.dirty_bitmap = NULL;
1379 else if (!new.dirty_bitmap) {
1380 r = kvm_alloc_dirty_bitmap(&new);
1381 if (r)
1382 return r;
1383
1384 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1385 bitmap_set(new.dirty_bitmap, 0, new.npages);
1386 }
1387
1388 r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
1389 if (r)
1390 goto out_bitmap;
1391
1392 if (old.dirty_bitmap && !new.dirty_bitmap)
1393 kvm_destroy_dirty_bitmap(&old);
1394 return 0;
1395
1396 out_bitmap:
1397 if (new.dirty_bitmap && !old.dirty_bitmap)
1398 kvm_destroy_dirty_bitmap(&new);
1399 return r;
1400 }
1401 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1402
kvm_set_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region * mem)1403 int kvm_set_memory_region(struct kvm *kvm,
1404 const struct kvm_userspace_memory_region *mem)
1405 {
1406 int r;
1407
1408 mutex_lock(&kvm->slots_lock);
1409 r = __kvm_set_memory_region(kvm, mem);
1410 mutex_unlock(&kvm->slots_lock);
1411 return r;
1412 }
1413 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1414
kvm_vm_ioctl_set_memory_region(struct kvm * kvm,struct kvm_userspace_memory_region * mem)1415 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1416 struct kvm_userspace_memory_region *mem)
1417 {
1418 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
1419 return -EINVAL;
1420
1421 return kvm_set_memory_region(kvm, mem);
1422 }
1423
1424 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1425 /**
1426 * kvm_get_dirty_log - get a snapshot of dirty pages
1427 * @kvm: pointer to kvm instance
1428 * @log: slot id and address to which we copy the log
1429 * @is_dirty: set to '1' if any dirty pages were found
1430 * @memslot: set to the associated memslot, always valid on success
1431 */
kvm_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log,int * is_dirty,struct kvm_memory_slot ** memslot)1432 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1433 int *is_dirty, struct kvm_memory_slot **memslot)
1434 {
1435 struct kvm_memslots *slots;
1436 int i, as_id, id;
1437 unsigned long n;
1438 unsigned long any = 0;
1439
1440 *memslot = NULL;
1441 *is_dirty = 0;
1442
1443 as_id = log->slot >> 16;
1444 id = (u16)log->slot;
1445 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1446 return -EINVAL;
1447
1448 slots = __kvm_memslots(kvm, as_id);
1449 *memslot = id_to_memslot(slots, id);
1450 if (!(*memslot) || !(*memslot)->dirty_bitmap)
1451 return -ENOENT;
1452
1453 kvm_arch_sync_dirty_log(kvm, *memslot);
1454
1455 n = kvm_dirty_bitmap_bytes(*memslot);
1456
1457 for (i = 0; !any && i < n/sizeof(long); ++i)
1458 any = (*memslot)->dirty_bitmap[i];
1459
1460 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
1461 return -EFAULT;
1462
1463 if (any)
1464 *is_dirty = 1;
1465 return 0;
1466 }
1467 EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
1468
1469 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
1470 /**
1471 * kvm_get_dirty_log_protect - get a snapshot of dirty pages
1472 * and reenable dirty page tracking for the corresponding pages.
1473 * @kvm: pointer to kvm instance
1474 * @log: slot id and address to which we copy the log
1475 *
1476 * We need to keep it in mind that VCPU threads can write to the bitmap
1477 * concurrently. So, to avoid losing track of dirty pages we keep the
1478 * following order:
1479 *
1480 * 1. Take a snapshot of the bit and clear it if needed.
1481 * 2. Write protect the corresponding page.
1482 * 3. Copy the snapshot to the userspace.
1483 * 4. Upon return caller flushes TLB's if needed.
1484 *
1485 * Between 2 and 4, the guest may write to the page using the remaining TLB
1486 * entry. This is not a problem because the page is reported dirty using
1487 * the snapshot taken before and step 4 ensures that writes done after
1488 * exiting to userspace will be logged for the next call.
1489 *
1490 */
kvm_get_dirty_log_protect(struct kvm * kvm,struct kvm_dirty_log * log)1491 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
1492 {
1493 struct kvm_memslots *slots;
1494 struct kvm_memory_slot *memslot;
1495 int i, as_id, id;
1496 unsigned long n;
1497 unsigned long *dirty_bitmap;
1498 unsigned long *dirty_bitmap_buffer;
1499 bool flush;
1500
1501 as_id = log->slot >> 16;
1502 id = (u16)log->slot;
1503 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1504 return -EINVAL;
1505
1506 slots = __kvm_memslots(kvm, as_id);
1507 memslot = id_to_memslot(slots, id);
1508 if (!memslot || !memslot->dirty_bitmap)
1509 return -ENOENT;
1510
1511 dirty_bitmap = memslot->dirty_bitmap;
1512
1513 kvm_arch_sync_dirty_log(kvm, memslot);
1514
1515 n = kvm_dirty_bitmap_bytes(memslot);
1516 flush = false;
1517 if (kvm->manual_dirty_log_protect) {
1518 /*
1519 * Unlike kvm_get_dirty_log, we always return false in *flush,
1520 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There
1521 * is some code duplication between this function and
1522 * kvm_get_dirty_log, but hopefully all architecture
1523 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
1524 * can be eliminated.
1525 */
1526 dirty_bitmap_buffer = dirty_bitmap;
1527 } else {
1528 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
1529 memset(dirty_bitmap_buffer, 0, n);
1530
1531 spin_lock(&kvm->mmu_lock);
1532 for (i = 0; i < n / sizeof(long); i++) {
1533 unsigned long mask;
1534 gfn_t offset;
1535
1536 if (!dirty_bitmap[i])
1537 continue;
1538
1539 flush = true;
1540 mask = xchg(&dirty_bitmap[i], 0);
1541 dirty_bitmap_buffer[i] = mask;
1542
1543 offset = i * BITS_PER_LONG;
1544 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1545 offset, mask);
1546 }
1547 spin_unlock(&kvm->mmu_lock);
1548 }
1549
1550 if (flush)
1551 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
1552
1553 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
1554 return -EFAULT;
1555 return 0;
1556 }
1557
1558
1559 /**
1560 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
1561 * @kvm: kvm instance
1562 * @log: slot id and address to which we copy the log
1563 *
1564 * Steps 1-4 below provide general overview of dirty page logging. See
1565 * kvm_get_dirty_log_protect() function description for additional details.
1566 *
1567 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
1568 * always flush the TLB (step 4) even if previous step failed and the dirty
1569 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
1570 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
1571 * writes will be marked dirty for next log read.
1572 *
1573 * 1. Take a snapshot of the bit and clear it if needed.
1574 * 2. Write protect the corresponding page.
1575 * 3. Copy the snapshot to the userspace.
1576 * 4. Flush TLB's if needed.
1577 */
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)1578 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1579 struct kvm_dirty_log *log)
1580 {
1581 int r;
1582
1583 mutex_lock(&kvm->slots_lock);
1584
1585 r = kvm_get_dirty_log_protect(kvm, log);
1586
1587 mutex_unlock(&kvm->slots_lock);
1588 return r;
1589 }
1590
1591 /**
1592 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
1593 * and reenable dirty page tracking for the corresponding pages.
1594 * @kvm: pointer to kvm instance
1595 * @log: slot id and address from which to fetch the bitmap of dirty pages
1596 */
kvm_clear_dirty_log_protect(struct kvm * kvm,struct kvm_clear_dirty_log * log)1597 static int kvm_clear_dirty_log_protect(struct kvm *kvm,
1598 struct kvm_clear_dirty_log *log)
1599 {
1600 struct kvm_memslots *slots;
1601 struct kvm_memory_slot *memslot;
1602 int as_id, id;
1603 gfn_t offset;
1604 unsigned long i, n;
1605 unsigned long *dirty_bitmap;
1606 unsigned long *dirty_bitmap_buffer;
1607 bool flush;
1608
1609 as_id = log->slot >> 16;
1610 id = (u16)log->slot;
1611 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1612 return -EINVAL;
1613
1614 if (log->first_page & 63)
1615 return -EINVAL;
1616
1617 slots = __kvm_memslots(kvm, as_id);
1618 memslot = id_to_memslot(slots, id);
1619 if (!memslot || !memslot->dirty_bitmap)
1620 return -ENOENT;
1621
1622 dirty_bitmap = memslot->dirty_bitmap;
1623
1624 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
1625
1626 if (log->first_page > memslot->npages ||
1627 log->num_pages > memslot->npages - log->first_page ||
1628 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
1629 return -EINVAL;
1630
1631 kvm_arch_sync_dirty_log(kvm, memslot);
1632
1633 flush = false;
1634 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
1635 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
1636 return -EFAULT;
1637
1638 spin_lock(&kvm->mmu_lock);
1639 for (offset = log->first_page, i = offset / BITS_PER_LONG,
1640 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
1641 i++, offset += BITS_PER_LONG) {
1642 unsigned long mask = *dirty_bitmap_buffer++;
1643 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
1644 if (!mask)
1645 continue;
1646
1647 mask &= atomic_long_fetch_andnot(mask, p);
1648
1649 /*
1650 * mask contains the bits that really have been cleared. This
1651 * never includes any bits beyond the length of the memslot (if
1652 * the length is not aligned to 64 pages), therefore it is not
1653 * a problem if userspace sets them in log->dirty_bitmap.
1654 */
1655 if (mask) {
1656 flush = true;
1657 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1658 offset, mask);
1659 }
1660 }
1661 spin_unlock(&kvm->mmu_lock);
1662
1663 if (flush)
1664 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
1665
1666 return 0;
1667 }
1668
kvm_vm_ioctl_clear_dirty_log(struct kvm * kvm,struct kvm_clear_dirty_log * log)1669 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
1670 struct kvm_clear_dirty_log *log)
1671 {
1672 int r;
1673
1674 mutex_lock(&kvm->slots_lock);
1675
1676 r = kvm_clear_dirty_log_protect(kvm, log);
1677
1678 mutex_unlock(&kvm->slots_lock);
1679 return r;
1680 }
1681 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
1682
gfn_to_memslot(struct kvm * kvm,gfn_t gfn)1683 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1684 {
1685 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
1686 }
1687 EXPORT_SYMBOL_GPL(gfn_to_memslot);
1688
kvm_vcpu_gfn_to_memslot(struct kvm_vcpu * vcpu,gfn_t gfn)1689 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
1690 {
1691 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
1692 }
1693 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot);
1694
kvm_is_visible_gfn(struct kvm * kvm,gfn_t gfn)1695 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1696 {
1697 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
1698
1699 return kvm_is_visible_memslot(memslot);
1700 }
1701 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1702
kvm_vcpu_is_visible_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)1703 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
1704 {
1705 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1706
1707 return kvm_is_visible_memslot(memslot);
1708 }
1709 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
1710
kvm_host_page_size(struct kvm_vcpu * vcpu,gfn_t gfn)1711 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
1712 {
1713 struct vm_area_struct *vma;
1714 unsigned long addr, size;
1715
1716 size = PAGE_SIZE;
1717
1718 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
1719 if (kvm_is_error_hva(addr))
1720 return PAGE_SIZE;
1721
1722 mmap_read_lock(current->mm);
1723 vma = find_vma(current->mm, addr);
1724 if (!vma)
1725 goto out;
1726
1727 size = vma_kernel_pagesize(vma);
1728
1729 out:
1730 mmap_read_unlock(current->mm);
1731
1732 return size;
1733 }
1734
memslot_is_readonly(struct kvm_memory_slot * slot)1735 static bool memslot_is_readonly(struct kvm_memory_slot *slot)
1736 {
1737 return slot->flags & KVM_MEM_READONLY;
1738 }
1739
__gfn_to_hva_many(struct kvm_memory_slot * slot,gfn_t gfn,gfn_t * nr_pages,bool write)1740 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1741 gfn_t *nr_pages, bool write)
1742 {
1743 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
1744 return KVM_HVA_ERR_BAD;
1745
1746 if (memslot_is_readonly(slot) && write)
1747 return KVM_HVA_ERR_RO_BAD;
1748
1749 if (nr_pages)
1750 *nr_pages = slot->npages - (gfn - slot->base_gfn);
1751
1752 return __gfn_to_hva_memslot(slot, gfn);
1753 }
1754
gfn_to_hva_many(struct kvm_memory_slot * slot,gfn_t gfn,gfn_t * nr_pages)1755 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1756 gfn_t *nr_pages)
1757 {
1758 return __gfn_to_hva_many(slot, gfn, nr_pages, true);
1759 }
1760
gfn_to_hva_memslot(struct kvm_memory_slot * slot,gfn_t gfn)1761 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
1762 gfn_t gfn)
1763 {
1764 return gfn_to_hva_many(slot, gfn, NULL);
1765 }
1766 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
1767
gfn_to_hva(struct kvm * kvm,gfn_t gfn)1768 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1769 {
1770 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
1771 }
1772 EXPORT_SYMBOL_GPL(gfn_to_hva);
1773
kvm_vcpu_gfn_to_hva(struct kvm_vcpu * vcpu,gfn_t gfn)1774 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
1775 {
1776 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
1777 }
1778 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
1779
1780 /*
1781 * Return the hva of a @gfn and the R/W attribute if possible.
1782 *
1783 * @slot: the kvm_memory_slot which contains @gfn
1784 * @gfn: the gfn to be translated
1785 * @writable: used to return the read/write attribute of the @slot if the hva
1786 * is valid and @writable is not NULL
1787 */
gfn_to_hva_memslot_prot(struct kvm_memory_slot * slot,gfn_t gfn,bool * writable)1788 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
1789 gfn_t gfn, bool *writable)
1790 {
1791 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
1792
1793 if (!kvm_is_error_hva(hva) && writable)
1794 *writable = !memslot_is_readonly(slot);
1795
1796 return hva;
1797 }
1798
gfn_to_hva_prot(struct kvm * kvm,gfn_t gfn,bool * writable)1799 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
1800 {
1801 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1802
1803 return gfn_to_hva_memslot_prot(slot, gfn, writable);
1804 }
1805
kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu * vcpu,gfn_t gfn,bool * writable)1806 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
1807 {
1808 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1809
1810 return gfn_to_hva_memslot_prot(slot, gfn, writable);
1811 }
1812
check_user_page_hwpoison(unsigned long addr)1813 static inline int check_user_page_hwpoison(unsigned long addr)
1814 {
1815 int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
1816
1817 rc = get_user_pages(addr, 1, flags, NULL, NULL);
1818 return rc == -EHWPOISON;
1819 }
1820
1821 /*
1822 * The fast path to get the writable pfn which will be stored in @pfn,
1823 * true indicates success, otherwise false is returned. It's also the
1824 * only part that runs if we can in atomic context.
1825 */
hva_to_pfn_fast(unsigned long addr,bool write_fault,bool * writable,kvm_pfn_t * pfn)1826 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
1827 bool *writable, kvm_pfn_t *pfn)
1828 {
1829 struct page *page[1];
1830
1831 /*
1832 * Fast pin a writable pfn only if it is a write fault request
1833 * or the caller allows to map a writable pfn for a read fault
1834 * request.
1835 */
1836 if (!(write_fault || writable))
1837 return false;
1838
1839 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
1840 *pfn = page_to_pfn(page[0]);
1841
1842 if (writable)
1843 *writable = true;
1844 return true;
1845 }
1846
1847 return false;
1848 }
1849
1850 /*
1851 * The slow path to get the pfn of the specified host virtual address,
1852 * 1 indicates success, -errno is returned if error is detected.
1853 */
hva_to_pfn_slow(unsigned long addr,bool * async,bool write_fault,bool * writable,kvm_pfn_t * pfn)1854 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
1855 bool *writable, kvm_pfn_t *pfn)
1856 {
1857 unsigned int flags = FOLL_HWPOISON;
1858 struct page *page;
1859 int npages = 0;
1860
1861 might_sleep();
1862
1863 if (writable)
1864 *writable = write_fault;
1865
1866 if (write_fault)
1867 flags |= FOLL_WRITE;
1868 if (async)
1869 flags |= FOLL_NOWAIT;
1870
1871 npages = get_user_pages_unlocked(addr, 1, &page, flags);
1872 if (npages != 1)
1873 return npages;
1874
1875 /* map read fault as writable if possible */
1876 if (unlikely(!write_fault) && writable) {
1877 struct page *wpage;
1878
1879 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
1880 *writable = true;
1881 put_page(page);
1882 page = wpage;
1883 }
1884 }
1885 *pfn = page_to_pfn(page);
1886 return npages;
1887 }
1888
vma_is_valid(struct vm_area_struct * vma,bool write_fault)1889 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
1890 {
1891 if (unlikely(!(vma->vm_flags & VM_READ)))
1892 return false;
1893
1894 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
1895 return false;
1896
1897 return true;
1898 }
1899
kvm_try_get_pfn(kvm_pfn_t pfn)1900 static int kvm_try_get_pfn(kvm_pfn_t pfn)
1901 {
1902 if (kvm_is_reserved_pfn(pfn))
1903 return 1;
1904 return get_page_unless_zero(pfn_to_page(pfn));
1905 }
1906
hva_to_pfn_remapped(struct vm_area_struct * vma,unsigned long addr,bool * async,bool write_fault,bool * writable,kvm_pfn_t * p_pfn)1907 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
1908 unsigned long addr, bool *async,
1909 bool write_fault, bool *writable,
1910 kvm_pfn_t *p_pfn)
1911 {
1912 kvm_pfn_t pfn;
1913 pte_t *ptep;
1914 spinlock_t *ptl;
1915 int r;
1916
1917 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
1918 if (r) {
1919 /*
1920 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
1921 * not call the fault handler, so do it here.
1922 */
1923 bool unlocked = false;
1924 r = fixup_user_fault(current->mm, addr,
1925 (write_fault ? FAULT_FLAG_WRITE : 0),
1926 &unlocked);
1927 if (unlocked)
1928 return -EAGAIN;
1929 if (r)
1930 return r;
1931
1932 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
1933 if (r)
1934 return r;
1935 }
1936
1937 if (write_fault && !pte_write(*ptep)) {
1938 pfn = KVM_PFN_ERR_RO_FAULT;
1939 goto out;
1940 }
1941
1942 if (writable)
1943 *writable = pte_write(*ptep);
1944 pfn = pte_pfn(*ptep);
1945
1946 /*
1947 * Get a reference here because callers of *hva_to_pfn* and
1948 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
1949 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP
1950 * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will
1951 * simply do nothing for reserved pfns.
1952 *
1953 * Whoever called remap_pfn_range is also going to call e.g.
1954 * unmap_mapping_range before the underlying pages are freed,
1955 * causing a call to our MMU notifier.
1956 *
1957 * Certain IO or PFNMAP mappings can be backed with valid
1958 * struct pages, but be allocated without refcounting e.g.,
1959 * tail pages of non-compound higher order allocations, which
1960 * would then underflow the refcount when the caller does the
1961 * required put_page. Don't allow those pages here.
1962 */
1963 if (!kvm_try_get_pfn(pfn))
1964 r = -EFAULT;
1965
1966 out:
1967 pte_unmap_unlock(ptep, ptl);
1968 *p_pfn = pfn;
1969
1970 return r;
1971 }
1972
1973 /*
1974 * Pin guest page in memory and return its pfn.
1975 * @addr: host virtual address which maps memory to the guest
1976 * @atomic: whether this function can sleep
1977 * @async: whether this function need to wait IO complete if the
1978 * host page is not in the memory
1979 * @write_fault: whether we should get a writable host page
1980 * @writable: whether it allows to map a writable host page for !@write_fault
1981 *
1982 * The function will map a writable host page for these two cases:
1983 * 1): @write_fault = true
1984 * 2): @write_fault = false && @writable, @writable will tell the caller
1985 * whether the mapping is writable.
1986 */
hva_to_pfn(unsigned long addr,bool atomic,bool * async,bool write_fault,bool * writable)1987 static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
1988 bool write_fault, bool *writable)
1989 {
1990 struct vm_area_struct *vma;
1991 kvm_pfn_t pfn = 0;
1992 int npages, r;
1993
1994 /* we can do it either atomically or asynchronously, not both */
1995 BUG_ON(atomic && async);
1996
1997 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
1998 return pfn;
1999
2000 if (atomic)
2001 return KVM_PFN_ERR_FAULT;
2002
2003 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
2004 if (npages == 1)
2005 return pfn;
2006
2007 mmap_read_lock(current->mm);
2008 if (npages == -EHWPOISON ||
2009 (!async && check_user_page_hwpoison(addr))) {
2010 pfn = KVM_PFN_ERR_HWPOISON;
2011 goto exit;
2012 }
2013
2014 retry:
2015 vma = find_vma_intersection(current->mm, addr, addr + 1);
2016
2017 if (vma == NULL)
2018 pfn = KVM_PFN_ERR_FAULT;
2019 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
2020 r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn);
2021 if (r == -EAGAIN)
2022 goto retry;
2023 if (r < 0)
2024 pfn = KVM_PFN_ERR_FAULT;
2025 } else {
2026 if (async && vma_is_valid(vma, write_fault))
2027 *async = true;
2028 pfn = KVM_PFN_ERR_FAULT;
2029 }
2030 exit:
2031 mmap_read_unlock(current->mm);
2032 return pfn;
2033 }
2034
__gfn_to_pfn_memslot(struct kvm_memory_slot * slot,gfn_t gfn,bool atomic,bool * async,bool write_fault,bool * writable)2035 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
2036 bool atomic, bool *async, bool write_fault,
2037 bool *writable)
2038 {
2039 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
2040
2041 if (addr == KVM_HVA_ERR_RO_BAD) {
2042 if (writable)
2043 *writable = false;
2044 return KVM_PFN_ERR_RO_FAULT;
2045 }
2046
2047 if (kvm_is_error_hva(addr)) {
2048 if (writable)
2049 *writable = false;
2050 return KVM_PFN_NOSLOT;
2051 }
2052
2053 /* Do not map writable pfn in the readonly memslot. */
2054 if (writable && memslot_is_readonly(slot)) {
2055 *writable = false;
2056 writable = NULL;
2057 }
2058
2059 return hva_to_pfn(addr, atomic, async, write_fault,
2060 writable);
2061 }
2062 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
2063
gfn_to_pfn_prot(struct kvm * kvm,gfn_t gfn,bool write_fault,bool * writable)2064 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
2065 bool *writable)
2066 {
2067 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
2068 write_fault, writable);
2069 }
2070 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
2071
gfn_to_pfn_memslot(struct kvm_memory_slot * slot,gfn_t gfn)2072 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
2073 {
2074 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
2075 }
2076 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
2077
gfn_to_pfn_memslot_atomic(struct kvm_memory_slot * slot,gfn_t gfn)2078 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
2079 {
2080 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
2081 }
2082 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
2083
kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu * vcpu,gfn_t gfn)2084 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
2085 {
2086 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2087 }
2088 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
2089
gfn_to_pfn(struct kvm * kvm,gfn_t gfn)2090 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
2091 {
2092 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
2093 }
2094 EXPORT_SYMBOL_GPL(gfn_to_pfn);
2095
kvm_vcpu_gfn_to_pfn(struct kvm_vcpu * vcpu,gfn_t gfn)2096 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2097 {
2098 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2099 }
2100 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
2101
gfn_to_page_many_atomic(struct kvm_memory_slot * slot,gfn_t gfn,struct page ** pages,int nr_pages)2102 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
2103 struct page **pages, int nr_pages)
2104 {
2105 unsigned long addr;
2106 gfn_t entry = 0;
2107
2108 addr = gfn_to_hva_many(slot, gfn, &entry);
2109 if (kvm_is_error_hva(addr))
2110 return -1;
2111
2112 if (entry < nr_pages)
2113 return 0;
2114
2115 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
2116 }
2117 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
2118
kvm_pfn_to_page(kvm_pfn_t pfn)2119 static struct page *kvm_pfn_to_page(kvm_pfn_t pfn)
2120 {
2121 if (is_error_noslot_pfn(pfn))
2122 return KVM_ERR_PTR_BAD_PAGE;
2123
2124 if (kvm_is_reserved_pfn(pfn)) {
2125 WARN_ON(1);
2126 return KVM_ERR_PTR_BAD_PAGE;
2127 }
2128
2129 return pfn_to_page(pfn);
2130 }
2131
gfn_to_page(struct kvm * kvm,gfn_t gfn)2132 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
2133 {
2134 kvm_pfn_t pfn;
2135
2136 pfn = gfn_to_pfn(kvm, gfn);
2137
2138 return kvm_pfn_to_page(pfn);
2139 }
2140 EXPORT_SYMBOL_GPL(gfn_to_page);
2141
kvm_release_pfn(kvm_pfn_t pfn,bool dirty,struct gfn_to_pfn_cache * cache)2142 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
2143 {
2144 if (pfn == 0)
2145 return;
2146
2147 if (cache)
2148 cache->pfn = cache->gfn = 0;
2149
2150 if (dirty)
2151 kvm_release_pfn_dirty(pfn);
2152 else
2153 kvm_release_pfn_clean(pfn);
2154 }
2155
kvm_cache_gfn_to_pfn(struct kvm_memory_slot * slot,gfn_t gfn,struct gfn_to_pfn_cache * cache,u64 gen)2156 static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
2157 struct gfn_to_pfn_cache *cache, u64 gen)
2158 {
2159 kvm_release_pfn(cache->pfn, cache->dirty, cache);
2160
2161 cache->pfn = gfn_to_pfn_memslot(slot, gfn);
2162 cache->gfn = gfn;
2163 cache->dirty = false;
2164 cache->generation = gen;
2165 }
2166
__kvm_map_gfn(struct kvm_memslots * slots,gfn_t gfn,struct kvm_host_map * map,struct gfn_to_pfn_cache * cache,bool atomic)2167 static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
2168 struct kvm_host_map *map,
2169 struct gfn_to_pfn_cache *cache,
2170 bool atomic)
2171 {
2172 kvm_pfn_t pfn;
2173 void *hva = NULL;
2174 struct page *page = KVM_UNMAPPED_PAGE;
2175 struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
2176 u64 gen = slots->generation;
2177
2178 if (!map)
2179 return -EINVAL;
2180
2181 if (cache) {
2182 if (!cache->pfn || cache->gfn != gfn ||
2183 cache->generation != gen) {
2184 if (atomic)
2185 return -EAGAIN;
2186 kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
2187 }
2188 pfn = cache->pfn;
2189 } else {
2190 if (atomic)
2191 return -EAGAIN;
2192 pfn = gfn_to_pfn_memslot(slot, gfn);
2193 }
2194 if (is_error_noslot_pfn(pfn))
2195 return -EINVAL;
2196
2197 if (pfn_valid(pfn)) {
2198 page = pfn_to_page(pfn);
2199 if (atomic)
2200 hva = kmap_atomic(page);
2201 else
2202 hva = kmap(page);
2203 #ifdef CONFIG_HAS_IOMEM
2204 } else if (!atomic) {
2205 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
2206 } else {
2207 return -EINVAL;
2208 #endif
2209 }
2210
2211 if (!hva)
2212 return -EFAULT;
2213
2214 map->page = page;
2215 map->hva = hva;
2216 map->pfn = pfn;
2217 map->gfn = gfn;
2218
2219 return 0;
2220 }
2221
kvm_map_gfn(struct kvm_vcpu * vcpu,gfn_t gfn,struct kvm_host_map * map,struct gfn_to_pfn_cache * cache,bool atomic)2222 int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
2223 struct gfn_to_pfn_cache *cache, bool atomic)
2224 {
2225 return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
2226 cache, atomic);
2227 }
2228 EXPORT_SYMBOL_GPL(kvm_map_gfn);
2229
kvm_vcpu_map(struct kvm_vcpu * vcpu,gfn_t gfn,struct kvm_host_map * map)2230 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
2231 {
2232 return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
2233 NULL, false);
2234 }
2235 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
2236
__kvm_unmap_gfn(struct kvm_memory_slot * memslot,struct kvm_host_map * map,struct gfn_to_pfn_cache * cache,bool dirty,bool atomic)2237 static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
2238 struct kvm_host_map *map,
2239 struct gfn_to_pfn_cache *cache,
2240 bool dirty, bool atomic)
2241 {
2242 if (!map)
2243 return;
2244
2245 if (!map->hva)
2246 return;
2247
2248 if (map->page != KVM_UNMAPPED_PAGE) {
2249 if (atomic)
2250 kunmap_atomic(map->hva);
2251 else
2252 kunmap(map->page);
2253 }
2254 #ifdef CONFIG_HAS_IOMEM
2255 else if (!atomic)
2256 memunmap(map->hva);
2257 else
2258 WARN_ONCE(1, "Unexpected unmapping in atomic context");
2259 #endif
2260
2261 if (dirty)
2262 mark_page_dirty_in_slot(memslot, map->gfn);
2263
2264 if (cache)
2265 cache->dirty |= dirty;
2266 else
2267 kvm_release_pfn(map->pfn, dirty, NULL);
2268
2269 map->hva = NULL;
2270 map->page = NULL;
2271 }
2272
kvm_unmap_gfn(struct kvm_vcpu * vcpu,struct kvm_host_map * map,struct gfn_to_pfn_cache * cache,bool dirty,bool atomic)2273 int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
2274 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
2275 {
2276 __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
2277 cache, dirty, atomic);
2278 return 0;
2279 }
2280 EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
2281
kvm_vcpu_unmap(struct kvm_vcpu * vcpu,struct kvm_host_map * map,bool dirty)2282 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
2283 {
2284 __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
2285 dirty, false);
2286 }
2287 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
2288
kvm_vcpu_gfn_to_page(struct kvm_vcpu * vcpu,gfn_t gfn)2289 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2290 {
2291 kvm_pfn_t pfn;
2292
2293 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
2294
2295 return kvm_pfn_to_page(pfn);
2296 }
2297 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);
2298
kvm_release_page_clean(struct page * page)2299 void kvm_release_page_clean(struct page *page)
2300 {
2301 WARN_ON(is_error_page(page));
2302
2303 kvm_release_pfn_clean(page_to_pfn(page));
2304 }
2305 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
2306
kvm_release_pfn_clean(kvm_pfn_t pfn)2307 void kvm_release_pfn_clean(kvm_pfn_t pfn)
2308 {
2309 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
2310 put_page(pfn_to_page(pfn));
2311 }
2312 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
2313
kvm_release_page_dirty(struct page * page)2314 void kvm_release_page_dirty(struct page *page)
2315 {
2316 WARN_ON(is_error_page(page));
2317
2318 kvm_release_pfn_dirty(page_to_pfn(page));
2319 }
2320 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
2321
kvm_release_pfn_dirty(kvm_pfn_t pfn)2322 void kvm_release_pfn_dirty(kvm_pfn_t pfn)
2323 {
2324 kvm_set_pfn_dirty(pfn);
2325 kvm_release_pfn_clean(pfn);
2326 }
2327 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
2328
kvm_set_pfn_dirty(kvm_pfn_t pfn)2329 void kvm_set_pfn_dirty(kvm_pfn_t pfn)
2330 {
2331 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
2332 SetPageDirty(pfn_to_page(pfn));
2333 }
2334 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
2335
kvm_set_pfn_accessed(kvm_pfn_t pfn)2336 void kvm_set_pfn_accessed(kvm_pfn_t pfn)
2337 {
2338 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
2339 mark_page_accessed(pfn_to_page(pfn));
2340 }
2341 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
2342
kvm_get_pfn(kvm_pfn_t pfn)2343 void kvm_get_pfn(kvm_pfn_t pfn)
2344 {
2345 if (!kvm_is_reserved_pfn(pfn))
2346 get_page(pfn_to_page(pfn));
2347 }
2348 EXPORT_SYMBOL_GPL(kvm_get_pfn);
2349
next_segment(unsigned long len,int offset)2350 static int next_segment(unsigned long len, int offset)
2351 {
2352 if (len > PAGE_SIZE - offset)
2353 return PAGE_SIZE - offset;
2354 else
2355 return len;
2356 }
2357
__kvm_read_guest_page(struct kvm_memory_slot * slot,gfn_t gfn,void * data,int offset,int len)2358 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
2359 void *data, int offset, int len)
2360 {
2361 int r;
2362 unsigned long addr;
2363
2364 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
2365 if (kvm_is_error_hva(addr))
2366 return -EFAULT;
2367 r = __copy_from_user(data, (void __user *)addr + offset, len);
2368 if (r)
2369 return -EFAULT;
2370 return 0;
2371 }
2372
kvm_read_guest_page(struct kvm * kvm,gfn_t gfn,void * data,int offset,int len)2373 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
2374 int len)
2375 {
2376 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2377
2378 return __kvm_read_guest_page(slot, gfn, data, offset, len);
2379 }
2380 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
2381
kvm_vcpu_read_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,void * data,int offset,int len)2382 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
2383 int offset, int len)
2384 {
2385 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2386
2387 return __kvm_read_guest_page(slot, gfn, data, offset, len);
2388 }
2389 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
2390
kvm_read_guest(struct kvm * kvm,gpa_t gpa,void * data,unsigned long len)2391 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
2392 {
2393 gfn_t gfn = gpa >> PAGE_SHIFT;
2394 int seg;
2395 int offset = offset_in_page(gpa);
2396 int ret;
2397
2398 while ((seg = next_segment(len, offset)) != 0) {
2399 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
2400 if (ret < 0)
2401 return ret;
2402 offset = 0;
2403 len -= seg;
2404 data += seg;
2405 ++gfn;
2406 }
2407 return 0;
2408 }
2409 EXPORT_SYMBOL_GPL(kvm_read_guest);
2410
kvm_vcpu_read_guest(struct kvm_vcpu * vcpu,gpa_t gpa,void * data,unsigned long len)2411 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
2412 {
2413 gfn_t gfn = gpa >> PAGE_SHIFT;
2414 int seg;
2415 int offset = offset_in_page(gpa);
2416 int ret;
2417
2418 while ((seg = next_segment(len, offset)) != 0) {
2419 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
2420 if (ret < 0)
2421 return ret;
2422 offset = 0;
2423 len -= seg;
2424 data += seg;
2425 ++gfn;
2426 }
2427 return 0;
2428 }
2429 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
2430
__kvm_read_guest_atomic(struct kvm_memory_slot * slot,gfn_t gfn,void * data,int offset,unsigned long len)2431 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
2432 void *data, int offset, unsigned long len)
2433 {
2434 int r;
2435 unsigned long addr;
2436
2437 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
2438 if (kvm_is_error_hva(addr))
2439 return -EFAULT;
2440 pagefault_disable();
2441 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
2442 pagefault_enable();
2443 if (r)
2444 return -EFAULT;
2445 return 0;
2446 }
2447
kvm_vcpu_read_guest_atomic(struct kvm_vcpu * vcpu,gpa_t gpa,void * data,unsigned long len)2448 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
2449 void *data, unsigned long len)
2450 {
2451 gfn_t gfn = gpa >> PAGE_SHIFT;
2452 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2453 int offset = offset_in_page(gpa);
2454
2455 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
2456 }
2457 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
2458
__kvm_write_guest_page(struct kvm_memory_slot * memslot,gfn_t gfn,const void * data,int offset,int len)2459 static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
2460 const void *data, int offset, int len)
2461 {
2462 int r;
2463 unsigned long addr;
2464
2465 addr = gfn_to_hva_memslot(memslot, gfn);
2466 if (kvm_is_error_hva(addr))
2467 return -EFAULT;
2468 r = __copy_to_user((void __user *)addr + offset, data, len);
2469 if (r)
2470 return -EFAULT;
2471 mark_page_dirty_in_slot(memslot, gfn);
2472 return 0;
2473 }
2474
kvm_write_guest_page(struct kvm * kvm,gfn_t gfn,const void * data,int offset,int len)2475 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
2476 const void *data, int offset, int len)
2477 {
2478 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2479
2480 return __kvm_write_guest_page(slot, gfn, data, offset, len);
2481 }
2482 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
2483
kvm_vcpu_write_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,const void * data,int offset,int len)2484 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
2485 const void *data, int offset, int len)
2486 {
2487 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2488
2489 return __kvm_write_guest_page(slot, gfn, data, offset, len);
2490 }
2491 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
2492
kvm_write_guest(struct kvm * kvm,gpa_t gpa,const void * data,unsigned long len)2493 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
2494 unsigned long len)
2495 {
2496 gfn_t gfn = gpa >> PAGE_SHIFT;
2497 int seg;
2498 int offset = offset_in_page(gpa);
2499 int ret;
2500
2501 while ((seg = next_segment(len, offset)) != 0) {
2502 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
2503 if (ret < 0)
2504 return ret;
2505 offset = 0;
2506 len -= seg;
2507 data += seg;
2508 ++gfn;
2509 }
2510 return 0;
2511 }
2512 EXPORT_SYMBOL_GPL(kvm_write_guest);
2513
kvm_vcpu_write_guest(struct kvm_vcpu * vcpu,gpa_t gpa,const void * data,unsigned long len)2514 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
2515 unsigned long len)
2516 {
2517 gfn_t gfn = gpa >> PAGE_SHIFT;
2518 int seg;
2519 int offset = offset_in_page(gpa);
2520 int ret;
2521
2522 while ((seg = next_segment(len, offset)) != 0) {
2523 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
2524 if (ret < 0)
2525 return ret;
2526 offset = 0;
2527 len -= seg;
2528 data += seg;
2529 ++gfn;
2530 }
2531 return 0;
2532 }
2533 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
2534
__kvm_gfn_to_hva_cache_init(struct kvm_memslots * slots,struct gfn_to_hva_cache * ghc,gpa_t gpa,unsigned long len)2535 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
2536 struct gfn_to_hva_cache *ghc,
2537 gpa_t gpa, unsigned long len)
2538 {
2539 int offset = offset_in_page(gpa);
2540 gfn_t start_gfn = gpa >> PAGE_SHIFT;
2541 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
2542 gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
2543 gfn_t nr_pages_avail;
2544
2545 /* Update ghc->generation before performing any error checks. */
2546 ghc->generation = slots->generation;
2547
2548 if (start_gfn > end_gfn) {
2549 ghc->hva = KVM_HVA_ERR_BAD;
2550 return -EINVAL;
2551 }
2552
2553 /*
2554 * If the requested region crosses two memslots, we still
2555 * verify that the entire region is valid here.
2556 */
2557 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
2558 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
2559 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
2560 &nr_pages_avail);
2561 if (kvm_is_error_hva(ghc->hva))
2562 return -EFAULT;
2563 }
2564
2565 /* Use the slow path for cross page reads and writes. */
2566 if (nr_pages_needed == 1)
2567 ghc->hva += offset;
2568 else
2569 ghc->memslot = NULL;
2570
2571 ghc->gpa = gpa;
2572 ghc->len = len;
2573 return 0;
2574 }
2575
kvm_gfn_to_hva_cache_init(struct kvm * kvm,struct gfn_to_hva_cache * ghc,gpa_t gpa,unsigned long len)2576 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2577 gpa_t gpa, unsigned long len)
2578 {
2579 struct kvm_memslots *slots = kvm_memslots(kvm);
2580 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
2581 }
2582 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
2583
kvm_write_guest_offset_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned int offset,unsigned long len)2584 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2585 void *data, unsigned int offset,
2586 unsigned long len)
2587 {
2588 struct kvm_memslots *slots = kvm_memslots(kvm);
2589 int r;
2590 gpa_t gpa = ghc->gpa + offset;
2591
2592 BUG_ON(len + offset > ghc->len);
2593
2594 if (slots->generation != ghc->generation) {
2595 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
2596 return -EFAULT;
2597 }
2598
2599 if (kvm_is_error_hva(ghc->hva))
2600 return -EFAULT;
2601
2602 if (unlikely(!ghc->memslot))
2603 return kvm_write_guest(kvm, gpa, data, len);
2604
2605 r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
2606 if (r)
2607 return -EFAULT;
2608 mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT);
2609
2610 return 0;
2611 }
2612 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
2613
kvm_write_guest_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned long len)2614 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2615 void *data, unsigned long len)
2616 {
2617 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
2618 }
2619 EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
2620
kvm_read_guest_offset_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned int offset,unsigned long len)2621 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2622 void *data, unsigned int offset,
2623 unsigned long len)
2624 {
2625 struct kvm_memslots *slots = kvm_memslots(kvm);
2626 int r;
2627 gpa_t gpa = ghc->gpa + offset;
2628
2629 BUG_ON(len + offset > ghc->len);
2630
2631 if (slots->generation != ghc->generation) {
2632 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
2633 return -EFAULT;
2634 }
2635
2636 if (kvm_is_error_hva(ghc->hva))
2637 return -EFAULT;
2638
2639 if (unlikely(!ghc->memslot))
2640 return kvm_read_guest(kvm, gpa, data, len);
2641
2642 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
2643 if (r)
2644 return -EFAULT;
2645
2646 return 0;
2647 }
2648 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
2649
kvm_read_guest_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned long len)2650 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2651 void *data, unsigned long len)
2652 {
2653 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
2654 }
2655 EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
2656
kvm_clear_guest_page(struct kvm * kvm,gfn_t gfn,int offset,int len)2657 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
2658 {
2659 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
2660
2661 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
2662 }
2663 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
2664
kvm_clear_guest(struct kvm * kvm,gpa_t gpa,unsigned long len)2665 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
2666 {
2667 gfn_t gfn = gpa >> PAGE_SHIFT;
2668 int seg;
2669 int offset = offset_in_page(gpa);
2670 int ret;
2671
2672 while ((seg = next_segment(len, offset)) != 0) {
2673 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
2674 if (ret < 0)
2675 return ret;
2676 offset = 0;
2677 len -= seg;
2678 ++gfn;
2679 }
2680 return 0;
2681 }
2682 EXPORT_SYMBOL_GPL(kvm_clear_guest);
2683
mark_page_dirty_in_slot(struct kvm_memory_slot * memslot,gfn_t gfn)2684 void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn)
2685 {
2686 if (memslot && memslot->dirty_bitmap) {
2687 unsigned long rel_gfn = gfn - memslot->base_gfn;
2688
2689 set_bit_le(rel_gfn, memslot->dirty_bitmap);
2690 }
2691 }
2692 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
2693
mark_page_dirty(struct kvm * kvm,gfn_t gfn)2694 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
2695 {
2696 struct kvm_memory_slot *memslot;
2697
2698 memslot = gfn_to_memslot(kvm, gfn);
2699 mark_page_dirty_in_slot(memslot, gfn);
2700 }
2701 EXPORT_SYMBOL_GPL(mark_page_dirty);
2702
kvm_vcpu_mark_page_dirty(struct kvm_vcpu * vcpu,gfn_t gfn)2703 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
2704 {
2705 struct kvm_memory_slot *memslot;
2706
2707 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2708 mark_page_dirty_in_slot(memslot, gfn);
2709 }
2710 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
2711
kvm_sigset_activate(struct kvm_vcpu * vcpu)2712 void kvm_sigset_activate(struct kvm_vcpu *vcpu)
2713 {
2714 if (!vcpu->sigset_active)
2715 return;
2716
2717 /*
2718 * This does a lockless modification of ->real_blocked, which is fine
2719 * because, only current can change ->real_blocked and all readers of
2720 * ->real_blocked don't care as long ->real_blocked is always a subset
2721 * of ->blocked.
2722 */
2723 sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked);
2724 }
2725
kvm_sigset_deactivate(struct kvm_vcpu * vcpu)2726 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
2727 {
2728 if (!vcpu->sigset_active)
2729 return;
2730
2731 sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL);
2732 sigemptyset(¤t->real_blocked);
2733 }
2734
grow_halt_poll_ns(struct kvm_vcpu * vcpu)2735 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
2736 {
2737 unsigned int old, val, grow, grow_start;
2738
2739 old = val = vcpu->halt_poll_ns;
2740 grow_start = READ_ONCE(halt_poll_ns_grow_start);
2741 grow = READ_ONCE(halt_poll_ns_grow);
2742 if (!grow)
2743 goto out;
2744
2745 val *= grow;
2746 if (val < grow_start)
2747 val = grow_start;
2748
2749 if (val > vcpu->kvm->max_halt_poll_ns)
2750 val = vcpu->kvm->max_halt_poll_ns;
2751
2752 vcpu->halt_poll_ns = val;
2753 out:
2754 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
2755 }
2756
shrink_halt_poll_ns(struct kvm_vcpu * vcpu)2757 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
2758 {
2759 unsigned int old, val, shrink, grow_start;
2760
2761 old = val = vcpu->halt_poll_ns;
2762 shrink = READ_ONCE(halt_poll_ns_shrink);
2763 grow_start = READ_ONCE(halt_poll_ns_grow_start);
2764 if (shrink == 0)
2765 val = 0;
2766 else
2767 val /= shrink;
2768
2769 if (val < grow_start)
2770 val = 0;
2771
2772 vcpu->halt_poll_ns = val;
2773 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
2774 }
2775
kvm_vcpu_check_block(struct kvm_vcpu * vcpu)2776 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
2777 {
2778 int ret = -EINTR;
2779 int idx = srcu_read_lock(&vcpu->kvm->srcu);
2780
2781 if (kvm_arch_vcpu_runnable(vcpu)) {
2782 kvm_make_request(KVM_REQ_UNHALT, vcpu);
2783 goto out;
2784 }
2785 if (kvm_cpu_has_pending_timer(vcpu))
2786 goto out;
2787 if (signal_pending(current))
2788 goto out;
2789
2790 ret = 0;
2791 out:
2792 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2793 return ret;
2794 }
2795
2796 static inline void
update_halt_poll_stats(struct kvm_vcpu * vcpu,u64 poll_ns,bool waited)2797 update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited)
2798 {
2799 if (waited)
2800 vcpu->stat.halt_poll_fail_ns += poll_ns;
2801 else
2802 vcpu->stat.halt_poll_success_ns += poll_ns;
2803 }
2804
2805 /*
2806 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
2807 */
kvm_vcpu_block(struct kvm_vcpu * vcpu)2808 void kvm_vcpu_block(struct kvm_vcpu *vcpu)
2809 {
2810 ktime_t start, cur, poll_end;
2811 bool waited = false;
2812 u64 block_ns;
2813
2814 kvm_arch_vcpu_blocking(vcpu);
2815
2816 start = cur = poll_end = ktime_get();
2817 if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) {
2818 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
2819
2820 ++vcpu->stat.halt_attempted_poll;
2821 do {
2822 /*
2823 * This sets KVM_REQ_UNHALT if an interrupt
2824 * arrives.
2825 */
2826 if (kvm_vcpu_check_block(vcpu) < 0) {
2827 ++vcpu->stat.halt_successful_poll;
2828 if (!vcpu_valid_wakeup(vcpu))
2829 ++vcpu->stat.halt_poll_invalid;
2830 goto out;
2831 }
2832 poll_end = cur = ktime_get();
2833 } while (single_task_running() && !need_resched() &&
2834 ktime_before(cur, stop));
2835 }
2836
2837 prepare_to_rcuwait(&vcpu->wait);
2838 for (;;) {
2839 set_current_state(TASK_INTERRUPTIBLE);
2840
2841 if (kvm_vcpu_check_block(vcpu) < 0)
2842 break;
2843
2844 waited = true;
2845 schedule();
2846 }
2847 finish_rcuwait(&vcpu->wait);
2848 cur = ktime_get();
2849 out:
2850 kvm_arch_vcpu_unblocking(vcpu);
2851 block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
2852
2853 update_halt_poll_stats(
2854 vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited);
2855
2856 if (!kvm_arch_no_poll(vcpu)) {
2857 if (!vcpu_valid_wakeup(vcpu)) {
2858 shrink_halt_poll_ns(vcpu);
2859 } else if (vcpu->kvm->max_halt_poll_ns) {
2860 if (block_ns <= vcpu->halt_poll_ns)
2861 ;
2862 /* we had a long block, shrink polling */
2863 else if (vcpu->halt_poll_ns &&
2864 block_ns > vcpu->kvm->max_halt_poll_ns)
2865 shrink_halt_poll_ns(vcpu);
2866 /* we had a short halt and our poll time is too small */
2867 else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns &&
2868 block_ns < vcpu->kvm->max_halt_poll_ns)
2869 grow_halt_poll_ns(vcpu);
2870 } else {
2871 vcpu->halt_poll_ns = 0;
2872 }
2873 }
2874
2875 trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
2876 kvm_arch_vcpu_block_finish(vcpu);
2877 }
2878 EXPORT_SYMBOL_GPL(kvm_vcpu_block);
2879
kvm_vcpu_wake_up(struct kvm_vcpu * vcpu)2880 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
2881 {
2882 struct rcuwait *waitp;
2883
2884 waitp = kvm_arch_vcpu_get_wait(vcpu);
2885 if (rcuwait_wake_up(waitp)) {
2886 WRITE_ONCE(vcpu->ready, true);
2887 ++vcpu->stat.halt_wakeup;
2888 return true;
2889 }
2890
2891 return false;
2892 }
2893 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
2894
2895 #ifndef CONFIG_S390
2896 /*
2897 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
2898 */
kvm_vcpu_kick(struct kvm_vcpu * vcpu)2899 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
2900 {
2901 int me;
2902 int cpu = vcpu->cpu;
2903
2904 if (kvm_vcpu_wake_up(vcpu))
2905 return;
2906
2907 me = get_cpu();
2908 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
2909 if (kvm_arch_vcpu_should_kick(vcpu))
2910 smp_send_reschedule(cpu);
2911 put_cpu();
2912 }
2913 EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
2914 #endif /* !CONFIG_S390 */
2915
kvm_vcpu_yield_to(struct kvm_vcpu * target)2916 int kvm_vcpu_yield_to(struct kvm_vcpu *target)
2917 {
2918 struct pid *pid;
2919 struct task_struct *task = NULL;
2920 int ret = 0;
2921
2922 rcu_read_lock();
2923 pid = rcu_dereference(target->pid);
2924 if (pid)
2925 task = get_pid_task(pid, PIDTYPE_PID);
2926 rcu_read_unlock();
2927 if (!task)
2928 return ret;
2929 ret = yield_to(task, 1);
2930 put_task_struct(task);
2931
2932 return ret;
2933 }
2934 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
2935
2936 /*
2937 * Helper that checks whether a VCPU is eligible for directed yield.
2938 * Most eligible candidate to yield is decided by following heuristics:
2939 *
2940 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
2941 * (preempted lock holder), indicated by @in_spin_loop.
2942 * Set at the beginning and cleared at the end of interception/PLE handler.
2943 *
2944 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
2945 * chance last time (mostly it has become eligible now since we have probably
2946 * yielded to lockholder in last iteration. This is done by toggling
2947 * @dy_eligible each time a VCPU checked for eligibility.)
2948 *
2949 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
2950 * to preempted lock-holder could result in wrong VCPU selection and CPU
2951 * burning. Giving priority for a potential lock-holder increases lock
2952 * progress.
2953 *
2954 * Since algorithm is based on heuristics, accessing another VCPU data without
2955 * locking does not harm. It may result in trying to yield to same VCPU, fail
2956 * and continue with next VCPU and so on.
2957 */
kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu * vcpu)2958 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
2959 {
2960 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
2961 bool eligible;
2962
2963 eligible = !vcpu->spin_loop.in_spin_loop ||
2964 vcpu->spin_loop.dy_eligible;
2965
2966 if (vcpu->spin_loop.in_spin_loop)
2967 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
2968
2969 return eligible;
2970 #else
2971 return true;
2972 #endif
2973 }
2974
2975 /*
2976 * Unlike kvm_arch_vcpu_runnable, this function is called outside
2977 * a vcpu_load/vcpu_put pair. However, for most architectures
2978 * kvm_arch_vcpu_runnable does not require vcpu_load.
2979 */
kvm_arch_dy_runnable(struct kvm_vcpu * vcpu)2980 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
2981 {
2982 return kvm_arch_vcpu_runnable(vcpu);
2983 }
2984
vcpu_dy_runnable(struct kvm_vcpu * vcpu)2985 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
2986 {
2987 if (kvm_arch_dy_runnable(vcpu))
2988 return true;
2989
2990 #ifdef CONFIG_KVM_ASYNC_PF
2991 if (!list_empty_careful(&vcpu->async_pf.done))
2992 return true;
2993 #endif
2994
2995 return false;
2996 }
2997
kvm_vcpu_on_spin(struct kvm_vcpu * me,bool yield_to_kernel_mode)2998 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
2999 {
3000 struct kvm *kvm = me->kvm;
3001 struct kvm_vcpu *vcpu;
3002 int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
3003 int yielded = 0;
3004 int try = 3;
3005 int pass;
3006 int i;
3007
3008 kvm_vcpu_set_in_spin_loop(me, true);
3009 /*
3010 * We boost the priority of a VCPU that is runnable but not
3011 * currently running, because it got preempted by something
3012 * else and called schedule in __vcpu_run. Hopefully that
3013 * VCPU is holding the lock that we need and will release it.
3014 * We approximate round-robin by starting at the last boosted VCPU.
3015 */
3016 for (pass = 0; pass < 2 && !yielded && try; pass++) {
3017 kvm_for_each_vcpu(i, vcpu, kvm) {
3018 if (!pass && i <= last_boosted_vcpu) {
3019 i = last_boosted_vcpu;
3020 continue;
3021 } else if (pass && i > last_boosted_vcpu)
3022 break;
3023 if (!READ_ONCE(vcpu->ready))
3024 continue;
3025 if (vcpu == me)
3026 continue;
3027 if (rcuwait_active(&vcpu->wait) &&
3028 !vcpu_dy_runnable(vcpu))
3029 continue;
3030 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
3031 !kvm_arch_vcpu_in_kernel(vcpu))
3032 continue;
3033 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
3034 continue;
3035
3036 yielded = kvm_vcpu_yield_to(vcpu);
3037 if (yielded > 0) {
3038 kvm->last_boosted_vcpu = i;
3039 break;
3040 } else if (yielded < 0) {
3041 try--;
3042 if (!try)
3043 break;
3044 }
3045 }
3046 }
3047 kvm_vcpu_set_in_spin_loop(me, false);
3048
3049 /* Ensure vcpu is not eligible during next spinloop */
3050 kvm_vcpu_set_dy_eligible(me, false);
3051 }
3052 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
3053
kvm_vcpu_fault(struct vm_fault * vmf)3054 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
3055 {
3056 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
3057 struct page *page;
3058
3059 if (vmf->pgoff == 0)
3060 page = virt_to_page(vcpu->run);
3061 #ifdef CONFIG_X86
3062 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
3063 page = virt_to_page(vcpu->arch.pio_data);
3064 #endif
3065 #ifdef CONFIG_KVM_MMIO
3066 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
3067 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
3068 #endif
3069 else
3070 return kvm_arch_vcpu_fault(vcpu, vmf);
3071 get_page(page);
3072 vmf->page = page;
3073 return 0;
3074 }
3075
3076 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
3077 .fault = kvm_vcpu_fault,
3078 };
3079
kvm_vcpu_mmap(struct file * file,struct vm_area_struct * vma)3080 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
3081 {
3082 vma->vm_ops = &kvm_vcpu_vm_ops;
3083 return 0;
3084 }
3085
kvm_vcpu_release(struct inode * inode,struct file * filp)3086 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
3087 {
3088 struct kvm_vcpu *vcpu = filp->private_data;
3089
3090 kvm_put_kvm(vcpu->kvm);
3091 return 0;
3092 }
3093
3094 static struct file_operations kvm_vcpu_fops = {
3095 .release = kvm_vcpu_release,
3096 .unlocked_ioctl = kvm_vcpu_ioctl,
3097 .mmap = kvm_vcpu_mmap,
3098 .llseek = noop_llseek,
3099 KVM_COMPAT(kvm_vcpu_compat_ioctl),
3100 };
3101
3102 /*
3103 * Allocates an inode for the vcpu.
3104 */
create_vcpu_fd(struct kvm_vcpu * vcpu)3105 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
3106 {
3107 char name[8 + 1 + ITOA_MAX_LEN + 1];
3108
3109 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
3110 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
3111 }
3112
kvm_create_vcpu_debugfs(struct kvm_vcpu * vcpu)3113 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
3114 {
3115 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
3116 struct dentry *debugfs_dentry;
3117 char dir_name[ITOA_MAX_LEN * 2];
3118
3119 if (!debugfs_initialized())
3120 return;
3121
3122 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
3123 debugfs_dentry = debugfs_create_dir(dir_name,
3124 vcpu->kvm->debugfs_dentry);
3125
3126 kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
3127 #endif
3128 }
3129
3130 /*
3131 * Creates some virtual cpus. Good luck creating more than one.
3132 */
kvm_vm_ioctl_create_vcpu(struct kvm * kvm,u32 id)3133 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
3134 {
3135 int r;
3136 struct kvm_vcpu *vcpu;
3137 struct page *page;
3138
3139 if (id >= KVM_MAX_VCPU_ID)
3140 return -EINVAL;
3141
3142 mutex_lock(&kvm->lock);
3143 if (kvm->created_vcpus == KVM_MAX_VCPUS) {
3144 mutex_unlock(&kvm->lock);
3145 return -EINVAL;
3146 }
3147
3148 kvm->created_vcpus++;
3149 mutex_unlock(&kvm->lock);
3150
3151 r = kvm_arch_vcpu_precreate(kvm, id);
3152 if (r)
3153 goto vcpu_decrement;
3154
3155 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
3156 if (!vcpu) {
3157 r = -ENOMEM;
3158 goto vcpu_decrement;
3159 }
3160
3161 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
3162 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3163 if (!page) {
3164 r = -ENOMEM;
3165 goto vcpu_free;
3166 }
3167 vcpu->run = page_address(page);
3168
3169 kvm_vcpu_init(vcpu, kvm, id);
3170
3171 r = kvm_arch_vcpu_create(vcpu);
3172 if (r)
3173 goto vcpu_free_run_page;
3174
3175 mutex_lock(&kvm->lock);
3176 if (kvm_get_vcpu_by_id(kvm, id)) {
3177 r = -EEXIST;
3178 goto unlock_vcpu_destroy;
3179 }
3180
3181 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
3182 BUG_ON(kvm->vcpus[vcpu->vcpu_idx]);
3183
3184 /* Now it's all set up, let userspace reach it */
3185 kvm_get_kvm(kvm);
3186 r = create_vcpu_fd(vcpu);
3187 if (r < 0) {
3188 kvm_put_kvm_no_destroy(kvm);
3189 goto unlock_vcpu_destroy;
3190 }
3191
3192 kvm->vcpus[vcpu->vcpu_idx] = vcpu;
3193
3194 /*
3195 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus
3196 * before kvm->online_vcpu's incremented value.
3197 */
3198 smp_wmb();
3199 atomic_inc(&kvm->online_vcpus);
3200
3201 mutex_unlock(&kvm->lock);
3202 kvm_arch_vcpu_postcreate(vcpu);
3203 kvm_create_vcpu_debugfs(vcpu);
3204 return r;
3205
3206 unlock_vcpu_destroy:
3207 mutex_unlock(&kvm->lock);
3208 kvm_arch_vcpu_destroy(vcpu);
3209 vcpu_free_run_page:
3210 free_page((unsigned long)vcpu->run);
3211 vcpu_free:
3212 kmem_cache_free(kvm_vcpu_cache, vcpu);
3213 vcpu_decrement:
3214 mutex_lock(&kvm->lock);
3215 kvm->created_vcpus--;
3216 mutex_unlock(&kvm->lock);
3217 return r;
3218 }
3219
kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu * vcpu,sigset_t * sigset)3220 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
3221 {
3222 if (sigset) {
3223 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
3224 vcpu->sigset_active = 1;
3225 vcpu->sigset = *sigset;
3226 } else
3227 vcpu->sigset_active = 0;
3228 return 0;
3229 }
3230
kvm_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)3231 static long kvm_vcpu_ioctl(struct file *filp,
3232 unsigned int ioctl, unsigned long arg)
3233 {
3234 struct kvm_vcpu *vcpu = filp->private_data;
3235 void __user *argp = (void __user *)arg;
3236 int r;
3237 struct kvm_fpu *fpu = NULL;
3238 struct kvm_sregs *kvm_sregs = NULL;
3239
3240 if (vcpu->kvm->mm != current->mm)
3241 return -EIO;
3242
3243 if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
3244 return -EINVAL;
3245
3246 /*
3247 * Some architectures have vcpu ioctls that are asynchronous to vcpu
3248 * execution; mutex_lock() would break them.
3249 */
3250 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
3251 if (r != -ENOIOCTLCMD)
3252 return r;
3253
3254 if (mutex_lock_killable(&vcpu->mutex))
3255 return -EINTR;
3256 switch (ioctl) {
3257 case KVM_RUN: {
3258 struct pid *oldpid;
3259 r = -EINVAL;
3260 if (arg)
3261 goto out;
3262 oldpid = rcu_access_pointer(vcpu->pid);
3263 if (unlikely(oldpid != task_pid(current))) {
3264 /* The thread running this VCPU changed. */
3265 struct pid *newpid;
3266
3267 r = kvm_arch_vcpu_run_pid_change(vcpu);
3268 if (r)
3269 break;
3270
3271 newpid = get_task_pid(current, PIDTYPE_PID);
3272 rcu_assign_pointer(vcpu->pid, newpid);
3273 if (oldpid)
3274 synchronize_rcu();
3275 put_pid(oldpid);
3276 }
3277 r = kvm_arch_vcpu_ioctl_run(vcpu);
3278 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
3279 break;
3280 }
3281 case KVM_GET_REGS: {
3282 struct kvm_regs *kvm_regs;
3283
3284 r = -ENOMEM;
3285 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
3286 if (!kvm_regs)
3287 goto out;
3288 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
3289 if (r)
3290 goto out_free1;
3291 r = -EFAULT;
3292 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
3293 goto out_free1;
3294 r = 0;
3295 out_free1:
3296 kfree(kvm_regs);
3297 break;
3298 }
3299 case KVM_SET_REGS: {
3300 struct kvm_regs *kvm_regs;
3301
3302 kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
3303 if (IS_ERR(kvm_regs)) {
3304 r = PTR_ERR(kvm_regs);
3305 goto out;
3306 }
3307 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
3308 kfree(kvm_regs);
3309 break;
3310 }
3311 case KVM_GET_SREGS: {
3312 kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
3313 GFP_KERNEL_ACCOUNT);
3314 r = -ENOMEM;
3315 if (!kvm_sregs)
3316 goto out;
3317 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
3318 if (r)
3319 goto out;
3320 r = -EFAULT;
3321 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
3322 goto out;
3323 r = 0;
3324 break;
3325 }
3326 case KVM_SET_SREGS: {
3327 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
3328 if (IS_ERR(kvm_sregs)) {
3329 r = PTR_ERR(kvm_sregs);
3330 kvm_sregs = NULL;
3331 goto out;
3332 }
3333 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
3334 break;
3335 }
3336 case KVM_GET_MP_STATE: {
3337 struct kvm_mp_state mp_state;
3338
3339 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
3340 if (r)
3341 goto out;
3342 r = -EFAULT;
3343 if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
3344 goto out;
3345 r = 0;
3346 break;
3347 }
3348 case KVM_SET_MP_STATE: {
3349 struct kvm_mp_state mp_state;
3350
3351 r = -EFAULT;
3352 if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
3353 goto out;
3354 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
3355 break;
3356 }
3357 case KVM_TRANSLATE: {
3358 struct kvm_translation tr;
3359
3360 r = -EFAULT;
3361 if (copy_from_user(&tr, argp, sizeof(tr)))
3362 goto out;
3363 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
3364 if (r)
3365 goto out;
3366 r = -EFAULT;
3367 if (copy_to_user(argp, &tr, sizeof(tr)))
3368 goto out;
3369 r = 0;
3370 break;
3371 }
3372 case KVM_SET_GUEST_DEBUG: {
3373 struct kvm_guest_debug dbg;
3374
3375 r = -EFAULT;
3376 if (copy_from_user(&dbg, argp, sizeof(dbg)))
3377 goto out;
3378 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
3379 break;
3380 }
3381 case KVM_SET_SIGNAL_MASK: {
3382 struct kvm_signal_mask __user *sigmask_arg = argp;
3383 struct kvm_signal_mask kvm_sigmask;
3384 sigset_t sigset, *p;
3385
3386 p = NULL;
3387 if (argp) {
3388 r = -EFAULT;
3389 if (copy_from_user(&kvm_sigmask, argp,
3390 sizeof(kvm_sigmask)))
3391 goto out;
3392 r = -EINVAL;
3393 if (kvm_sigmask.len != sizeof(sigset))
3394 goto out;
3395 r = -EFAULT;
3396 if (copy_from_user(&sigset, sigmask_arg->sigset,
3397 sizeof(sigset)))
3398 goto out;
3399 p = &sigset;
3400 }
3401 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
3402 break;
3403 }
3404 case KVM_GET_FPU: {
3405 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
3406 r = -ENOMEM;
3407 if (!fpu)
3408 goto out;
3409 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
3410 if (r)
3411 goto out;
3412 r = -EFAULT;
3413 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
3414 goto out;
3415 r = 0;
3416 break;
3417 }
3418 case KVM_SET_FPU: {
3419 fpu = memdup_user(argp, sizeof(*fpu));
3420 if (IS_ERR(fpu)) {
3421 r = PTR_ERR(fpu);
3422 fpu = NULL;
3423 goto out;
3424 }
3425 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
3426 break;
3427 }
3428 default:
3429 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
3430 }
3431 out:
3432 mutex_unlock(&vcpu->mutex);
3433 kfree(fpu);
3434 kfree(kvm_sregs);
3435 return r;
3436 }
3437
3438 #ifdef CONFIG_KVM_COMPAT
kvm_vcpu_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)3439 static long kvm_vcpu_compat_ioctl(struct file *filp,
3440 unsigned int ioctl, unsigned long arg)
3441 {
3442 struct kvm_vcpu *vcpu = filp->private_data;
3443 void __user *argp = compat_ptr(arg);
3444 int r;
3445
3446 if (vcpu->kvm->mm != current->mm)
3447 return -EIO;
3448
3449 switch (ioctl) {
3450 case KVM_SET_SIGNAL_MASK: {
3451 struct kvm_signal_mask __user *sigmask_arg = argp;
3452 struct kvm_signal_mask kvm_sigmask;
3453 sigset_t sigset;
3454
3455 if (argp) {
3456 r = -EFAULT;
3457 if (copy_from_user(&kvm_sigmask, argp,
3458 sizeof(kvm_sigmask)))
3459 goto out;
3460 r = -EINVAL;
3461 if (kvm_sigmask.len != sizeof(compat_sigset_t))
3462 goto out;
3463 r = -EFAULT;
3464 if (get_compat_sigset(&sigset,
3465 (compat_sigset_t __user *)sigmask_arg->sigset))
3466 goto out;
3467 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
3468 } else
3469 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
3470 break;
3471 }
3472 default:
3473 r = kvm_vcpu_ioctl(filp, ioctl, arg);
3474 }
3475
3476 out:
3477 return r;
3478 }
3479 #endif
3480
kvm_device_mmap(struct file * filp,struct vm_area_struct * vma)3481 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
3482 {
3483 struct kvm_device *dev = filp->private_data;
3484
3485 if (dev->ops->mmap)
3486 return dev->ops->mmap(dev, vma);
3487
3488 return -ENODEV;
3489 }
3490
kvm_device_ioctl_attr(struct kvm_device * dev,int (* accessor)(struct kvm_device * dev,struct kvm_device_attr * attr),unsigned long arg)3491 static int kvm_device_ioctl_attr(struct kvm_device *dev,
3492 int (*accessor)(struct kvm_device *dev,
3493 struct kvm_device_attr *attr),
3494 unsigned long arg)
3495 {
3496 struct kvm_device_attr attr;
3497
3498 if (!accessor)
3499 return -EPERM;
3500
3501 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3502 return -EFAULT;
3503
3504 return accessor(dev, &attr);
3505 }
3506
kvm_device_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)3507 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
3508 unsigned long arg)
3509 {
3510 struct kvm_device *dev = filp->private_data;
3511
3512 if (dev->kvm->mm != current->mm)
3513 return -EIO;
3514
3515 switch (ioctl) {
3516 case KVM_SET_DEVICE_ATTR:
3517 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
3518 case KVM_GET_DEVICE_ATTR:
3519 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
3520 case KVM_HAS_DEVICE_ATTR:
3521 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
3522 default:
3523 if (dev->ops->ioctl)
3524 return dev->ops->ioctl(dev, ioctl, arg);
3525
3526 return -ENOTTY;
3527 }
3528 }
3529
kvm_device_release(struct inode * inode,struct file * filp)3530 static int kvm_device_release(struct inode *inode, struct file *filp)
3531 {
3532 struct kvm_device *dev = filp->private_data;
3533 struct kvm *kvm = dev->kvm;
3534
3535 if (dev->ops->release) {
3536 mutex_lock(&kvm->lock);
3537 list_del(&dev->vm_node);
3538 dev->ops->release(dev);
3539 mutex_unlock(&kvm->lock);
3540 }
3541
3542 kvm_put_kvm(kvm);
3543 return 0;
3544 }
3545
3546 static const struct file_operations kvm_device_fops = {
3547 .unlocked_ioctl = kvm_device_ioctl,
3548 .release = kvm_device_release,
3549 KVM_COMPAT(kvm_device_ioctl),
3550 .mmap = kvm_device_mmap,
3551 };
3552
kvm_device_from_filp(struct file * filp)3553 struct kvm_device *kvm_device_from_filp(struct file *filp)
3554 {
3555 if (filp->f_op != &kvm_device_fops)
3556 return NULL;
3557
3558 return filp->private_data;
3559 }
3560
3561 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
3562 #ifdef CONFIG_KVM_MPIC
3563 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
3564 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
3565 #endif
3566 };
3567
kvm_register_device_ops(const struct kvm_device_ops * ops,u32 type)3568 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
3569 {
3570 if (type >= ARRAY_SIZE(kvm_device_ops_table))
3571 return -ENOSPC;
3572
3573 if (kvm_device_ops_table[type] != NULL)
3574 return -EEXIST;
3575
3576 kvm_device_ops_table[type] = ops;
3577 return 0;
3578 }
3579
kvm_unregister_device_ops(u32 type)3580 void kvm_unregister_device_ops(u32 type)
3581 {
3582 if (kvm_device_ops_table[type] != NULL)
3583 kvm_device_ops_table[type] = NULL;
3584 }
3585
kvm_ioctl_create_device(struct kvm * kvm,struct kvm_create_device * cd)3586 static int kvm_ioctl_create_device(struct kvm *kvm,
3587 struct kvm_create_device *cd)
3588 {
3589 const struct kvm_device_ops *ops = NULL;
3590 struct kvm_device *dev;
3591 bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
3592 int type;
3593 int ret;
3594
3595 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
3596 return -ENODEV;
3597
3598 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
3599 ops = kvm_device_ops_table[type];
3600 if (ops == NULL)
3601 return -ENODEV;
3602
3603 if (test)
3604 return 0;
3605
3606 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
3607 if (!dev)
3608 return -ENOMEM;
3609
3610 dev->ops = ops;
3611 dev->kvm = kvm;
3612
3613 mutex_lock(&kvm->lock);
3614 ret = ops->create(dev, type);
3615 if (ret < 0) {
3616 mutex_unlock(&kvm->lock);
3617 kfree(dev);
3618 return ret;
3619 }
3620 list_add(&dev->vm_node, &kvm->devices);
3621 mutex_unlock(&kvm->lock);
3622
3623 if (ops->init)
3624 ops->init(dev);
3625
3626 kvm_get_kvm(kvm);
3627 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
3628 if (ret < 0) {
3629 kvm_put_kvm_no_destroy(kvm);
3630 mutex_lock(&kvm->lock);
3631 list_del(&dev->vm_node);
3632 mutex_unlock(&kvm->lock);
3633 ops->destroy(dev);
3634 return ret;
3635 }
3636
3637 cd->fd = ret;
3638 return 0;
3639 }
3640
kvm_vm_ioctl_check_extension_generic(struct kvm * kvm,long arg)3641 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
3642 {
3643 switch (arg) {
3644 case KVM_CAP_USER_MEMORY:
3645 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
3646 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
3647 case KVM_CAP_INTERNAL_ERROR_DATA:
3648 #ifdef CONFIG_HAVE_KVM_MSI
3649 case KVM_CAP_SIGNAL_MSI:
3650 #endif
3651 #ifdef CONFIG_HAVE_KVM_IRQFD
3652 case KVM_CAP_IRQFD:
3653 case KVM_CAP_IRQFD_RESAMPLE:
3654 #endif
3655 case KVM_CAP_IOEVENTFD_ANY_LENGTH:
3656 case KVM_CAP_CHECK_EXTENSION_VM:
3657 case KVM_CAP_ENABLE_CAP_VM:
3658 case KVM_CAP_HALT_POLL:
3659 return 1;
3660 #ifdef CONFIG_KVM_MMIO
3661 case KVM_CAP_COALESCED_MMIO:
3662 return KVM_COALESCED_MMIO_PAGE_OFFSET;
3663 case KVM_CAP_COALESCED_PIO:
3664 return 1;
3665 #endif
3666 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3667 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
3668 return KVM_DIRTY_LOG_MANUAL_CAPS;
3669 #endif
3670 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
3671 case KVM_CAP_IRQ_ROUTING:
3672 return KVM_MAX_IRQ_ROUTES;
3673 #endif
3674 #if KVM_ADDRESS_SPACE_NUM > 1
3675 case KVM_CAP_MULTI_ADDRESS_SPACE:
3676 return KVM_ADDRESS_SPACE_NUM;
3677 #endif
3678 case KVM_CAP_NR_MEMSLOTS:
3679 return KVM_USER_MEM_SLOTS;
3680 default:
3681 break;
3682 }
3683 return kvm_vm_ioctl_check_extension(kvm, arg);
3684 }
3685
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)3686 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
3687 struct kvm_enable_cap *cap)
3688 {
3689 return -EINVAL;
3690 }
3691
kvm_vm_ioctl_enable_cap_generic(struct kvm * kvm,struct kvm_enable_cap * cap)3692 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
3693 struct kvm_enable_cap *cap)
3694 {
3695 switch (cap->cap) {
3696 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3697 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
3698 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
3699
3700 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
3701 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
3702
3703 if (cap->flags || (cap->args[0] & ~allowed_options))
3704 return -EINVAL;
3705 kvm->manual_dirty_log_protect = cap->args[0];
3706 return 0;
3707 }
3708 #endif
3709 case KVM_CAP_HALT_POLL: {
3710 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
3711 return -EINVAL;
3712
3713 kvm->max_halt_poll_ns = cap->args[0];
3714 return 0;
3715 }
3716 default:
3717 return kvm_vm_ioctl_enable_cap(kvm, cap);
3718 }
3719 }
3720
kvm_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)3721 static long kvm_vm_ioctl(struct file *filp,
3722 unsigned int ioctl, unsigned long arg)
3723 {
3724 struct kvm *kvm = filp->private_data;
3725 void __user *argp = (void __user *)arg;
3726 int r;
3727
3728 if (kvm->mm != current->mm)
3729 return -EIO;
3730 switch (ioctl) {
3731 case KVM_CREATE_VCPU:
3732 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
3733 break;
3734 case KVM_ENABLE_CAP: {
3735 struct kvm_enable_cap cap;
3736
3737 r = -EFAULT;
3738 if (copy_from_user(&cap, argp, sizeof(cap)))
3739 goto out;
3740 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
3741 break;
3742 }
3743 case KVM_SET_USER_MEMORY_REGION: {
3744 struct kvm_userspace_memory_region kvm_userspace_mem;
3745
3746 r = -EFAULT;
3747 if (copy_from_user(&kvm_userspace_mem, argp,
3748 sizeof(kvm_userspace_mem)))
3749 goto out;
3750
3751 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
3752 break;
3753 }
3754 case KVM_GET_DIRTY_LOG: {
3755 struct kvm_dirty_log log;
3756
3757 r = -EFAULT;
3758 if (copy_from_user(&log, argp, sizeof(log)))
3759 goto out;
3760 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
3761 break;
3762 }
3763 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3764 case KVM_CLEAR_DIRTY_LOG: {
3765 struct kvm_clear_dirty_log log;
3766
3767 r = -EFAULT;
3768 if (copy_from_user(&log, argp, sizeof(log)))
3769 goto out;
3770 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
3771 break;
3772 }
3773 #endif
3774 #ifdef CONFIG_KVM_MMIO
3775 case KVM_REGISTER_COALESCED_MMIO: {
3776 struct kvm_coalesced_mmio_zone zone;
3777
3778 r = -EFAULT;
3779 if (copy_from_user(&zone, argp, sizeof(zone)))
3780 goto out;
3781 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
3782 break;
3783 }
3784 case KVM_UNREGISTER_COALESCED_MMIO: {
3785 struct kvm_coalesced_mmio_zone zone;
3786
3787 r = -EFAULT;
3788 if (copy_from_user(&zone, argp, sizeof(zone)))
3789 goto out;
3790 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
3791 break;
3792 }
3793 #endif
3794 case KVM_IRQFD: {
3795 struct kvm_irqfd data;
3796
3797 r = -EFAULT;
3798 if (copy_from_user(&data, argp, sizeof(data)))
3799 goto out;
3800 r = kvm_irqfd(kvm, &data);
3801 break;
3802 }
3803 case KVM_IOEVENTFD: {
3804 struct kvm_ioeventfd data;
3805
3806 r = -EFAULT;
3807 if (copy_from_user(&data, argp, sizeof(data)))
3808 goto out;
3809 r = kvm_ioeventfd(kvm, &data);
3810 break;
3811 }
3812 #ifdef CONFIG_HAVE_KVM_MSI
3813 case KVM_SIGNAL_MSI: {
3814 struct kvm_msi msi;
3815
3816 r = -EFAULT;
3817 if (copy_from_user(&msi, argp, sizeof(msi)))
3818 goto out;
3819 r = kvm_send_userspace_msi(kvm, &msi);
3820 break;
3821 }
3822 #endif
3823 #ifdef __KVM_HAVE_IRQ_LINE
3824 case KVM_IRQ_LINE_STATUS:
3825 case KVM_IRQ_LINE: {
3826 struct kvm_irq_level irq_event;
3827
3828 r = -EFAULT;
3829 if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
3830 goto out;
3831
3832 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
3833 ioctl == KVM_IRQ_LINE_STATUS);
3834 if (r)
3835 goto out;
3836
3837 r = -EFAULT;
3838 if (ioctl == KVM_IRQ_LINE_STATUS) {
3839 if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
3840 goto out;
3841 }
3842
3843 r = 0;
3844 break;
3845 }
3846 #endif
3847 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
3848 case KVM_SET_GSI_ROUTING: {
3849 struct kvm_irq_routing routing;
3850 struct kvm_irq_routing __user *urouting;
3851 struct kvm_irq_routing_entry *entries = NULL;
3852
3853 r = -EFAULT;
3854 if (copy_from_user(&routing, argp, sizeof(routing)))
3855 goto out;
3856 r = -EINVAL;
3857 if (!kvm_arch_can_set_irq_routing(kvm))
3858 goto out;
3859 if (routing.nr > KVM_MAX_IRQ_ROUTES)
3860 goto out;
3861 if (routing.flags)
3862 goto out;
3863 if (routing.nr) {
3864 urouting = argp;
3865 entries = vmemdup_user(urouting->entries,
3866 array_size(sizeof(*entries),
3867 routing.nr));
3868 if (IS_ERR(entries)) {
3869 r = PTR_ERR(entries);
3870 goto out;
3871 }
3872 }
3873 r = kvm_set_irq_routing(kvm, entries, routing.nr,
3874 routing.flags);
3875 kvfree(entries);
3876 break;
3877 }
3878 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
3879 case KVM_CREATE_DEVICE: {
3880 struct kvm_create_device cd;
3881
3882 r = -EFAULT;
3883 if (copy_from_user(&cd, argp, sizeof(cd)))
3884 goto out;
3885
3886 r = kvm_ioctl_create_device(kvm, &cd);
3887 if (r)
3888 goto out;
3889
3890 r = -EFAULT;
3891 if (copy_to_user(argp, &cd, sizeof(cd)))
3892 goto out;
3893
3894 r = 0;
3895 break;
3896 }
3897 case KVM_CHECK_EXTENSION:
3898 r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
3899 break;
3900 default:
3901 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
3902 }
3903 out:
3904 return r;
3905 }
3906
3907 #ifdef CONFIG_KVM_COMPAT
3908 struct compat_kvm_dirty_log {
3909 __u32 slot;
3910 __u32 padding1;
3911 union {
3912 compat_uptr_t dirty_bitmap; /* one bit per page */
3913 __u64 padding2;
3914 };
3915 };
3916
3917 struct compat_kvm_clear_dirty_log {
3918 __u32 slot;
3919 __u32 num_pages;
3920 __u64 first_page;
3921 union {
3922 compat_uptr_t dirty_bitmap; /* one bit per page */
3923 __u64 padding2;
3924 };
3925 };
3926
kvm_vm_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)3927 static long kvm_vm_compat_ioctl(struct file *filp,
3928 unsigned int ioctl, unsigned long arg)
3929 {
3930 struct kvm *kvm = filp->private_data;
3931 int r;
3932
3933 if (kvm->mm != current->mm)
3934 return -EIO;
3935 switch (ioctl) {
3936 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3937 case KVM_CLEAR_DIRTY_LOG: {
3938 struct compat_kvm_clear_dirty_log compat_log;
3939 struct kvm_clear_dirty_log log;
3940
3941 if (copy_from_user(&compat_log, (void __user *)arg,
3942 sizeof(compat_log)))
3943 return -EFAULT;
3944 log.slot = compat_log.slot;
3945 log.num_pages = compat_log.num_pages;
3946 log.first_page = compat_log.first_page;
3947 log.padding2 = compat_log.padding2;
3948 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
3949
3950 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
3951 break;
3952 }
3953 #endif
3954 case KVM_GET_DIRTY_LOG: {
3955 struct compat_kvm_dirty_log compat_log;
3956 struct kvm_dirty_log log;
3957
3958 if (copy_from_user(&compat_log, (void __user *)arg,
3959 sizeof(compat_log)))
3960 return -EFAULT;
3961 log.slot = compat_log.slot;
3962 log.padding1 = compat_log.padding1;
3963 log.padding2 = compat_log.padding2;
3964 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
3965
3966 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
3967 break;
3968 }
3969 default:
3970 r = kvm_vm_ioctl(filp, ioctl, arg);
3971 }
3972 return r;
3973 }
3974 #endif
3975
3976 static struct file_operations kvm_vm_fops = {
3977 .release = kvm_vm_release,
3978 .unlocked_ioctl = kvm_vm_ioctl,
3979 .llseek = noop_llseek,
3980 KVM_COMPAT(kvm_vm_compat_ioctl),
3981 };
3982
kvm_dev_ioctl_create_vm(unsigned long type)3983 static int kvm_dev_ioctl_create_vm(unsigned long type)
3984 {
3985 int r;
3986 struct kvm *kvm;
3987 struct file *file;
3988
3989 kvm = kvm_create_vm(type);
3990 if (IS_ERR(kvm))
3991 return PTR_ERR(kvm);
3992 #ifdef CONFIG_KVM_MMIO
3993 r = kvm_coalesced_mmio_init(kvm);
3994 if (r < 0)
3995 goto put_kvm;
3996 #endif
3997 r = get_unused_fd_flags(O_CLOEXEC);
3998 if (r < 0)
3999 goto put_kvm;
4000
4001 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
4002 if (IS_ERR(file)) {
4003 put_unused_fd(r);
4004 r = PTR_ERR(file);
4005 goto put_kvm;
4006 }
4007
4008 /*
4009 * Don't call kvm_put_kvm anymore at this point; file->f_op is
4010 * already set, with ->release() being kvm_vm_release(). In error
4011 * cases it will be called by the final fput(file) and will take
4012 * care of doing kvm_put_kvm(kvm).
4013 */
4014 if (kvm_create_vm_debugfs(kvm, r) < 0) {
4015 put_unused_fd(r);
4016 fput(file);
4017 return -ENOMEM;
4018 }
4019 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
4020
4021 fd_install(r, file);
4022 return r;
4023
4024 put_kvm:
4025 kvm_put_kvm(kvm);
4026 return r;
4027 }
4028
kvm_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4029 static long kvm_dev_ioctl(struct file *filp,
4030 unsigned int ioctl, unsigned long arg)
4031 {
4032 long r = -EINVAL;
4033
4034 switch (ioctl) {
4035 case KVM_GET_API_VERSION:
4036 if (arg)
4037 goto out;
4038 r = KVM_API_VERSION;
4039 break;
4040 case KVM_CREATE_VM:
4041 r = kvm_dev_ioctl_create_vm(arg);
4042 break;
4043 case KVM_CHECK_EXTENSION:
4044 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
4045 break;
4046 case KVM_GET_VCPU_MMAP_SIZE:
4047 if (arg)
4048 goto out;
4049 r = PAGE_SIZE; /* struct kvm_run */
4050 #ifdef CONFIG_X86
4051 r += PAGE_SIZE; /* pio data page */
4052 #endif
4053 #ifdef CONFIG_KVM_MMIO
4054 r += PAGE_SIZE; /* coalesced mmio ring page */
4055 #endif
4056 break;
4057 case KVM_TRACE_ENABLE:
4058 case KVM_TRACE_PAUSE:
4059 case KVM_TRACE_DISABLE:
4060 r = -EOPNOTSUPP;
4061 break;
4062 default:
4063 return kvm_arch_dev_ioctl(filp, ioctl, arg);
4064 }
4065 out:
4066 return r;
4067 }
4068
4069 static struct file_operations kvm_chardev_ops = {
4070 .unlocked_ioctl = kvm_dev_ioctl,
4071 .llseek = noop_llseek,
4072 KVM_COMPAT(kvm_dev_ioctl),
4073 };
4074
4075 static struct miscdevice kvm_dev = {
4076 KVM_MINOR,
4077 "kvm",
4078 &kvm_chardev_ops,
4079 };
4080
hardware_enable_nolock(void * junk)4081 static void hardware_enable_nolock(void *junk)
4082 {
4083 int cpu = raw_smp_processor_id();
4084 int r;
4085
4086 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
4087 return;
4088
4089 cpumask_set_cpu(cpu, cpus_hardware_enabled);
4090
4091 r = kvm_arch_hardware_enable();
4092
4093 if (r) {
4094 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
4095 atomic_inc(&hardware_enable_failed);
4096 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
4097 }
4098 }
4099
kvm_starting_cpu(unsigned int cpu)4100 static int kvm_starting_cpu(unsigned int cpu)
4101 {
4102 raw_spin_lock(&kvm_count_lock);
4103 if (kvm_usage_count)
4104 hardware_enable_nolock(NULL);
4105 raw_spin_unlock(&kvm_count_lock);
4106 return 0;
4107 }
4108
hardware_disable_nolock(void * junk)4109 static void hardware_disable_nolock(void *junk)
4110 {
4111 int cpu = raw_smp_processor_id();
4112
4113 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
4114 return;
4115 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
4116 kvm_arch_hardware_disable();
4117 }
4118
kvm_dying_cpu(unsigned int cpu)4119 static int kvm_dying_cpu(unsigned int cpu)
4120 {
4121 raw_spin_lock(&kvm_count_lock);
4122 if (kvm_usage_count)
4123 hardware_disable_nolock(NULL);
4124 raw_spin_unlock(&kvm_count_lock);
4125 return 0;
4126 }
4127
hardware_disable_all_nolock(void)4128 static void hardware_disable_all_nolock(void)
4129 {
4130 BUG_ON(!kvm_usage_count);
4131
4132 kvm_usage_count--;
4133 if (!kvm_usage_count)
4134 on_each_cpu(hardware_disable_nolock, NULL, 1);
4135 }
4136
hardware_disable_all(void)4137 static void hardware_disable_all(void)
4138 {
4139 raw_spin_lock(&kvm_count_lock);
4140 hardware_disable_all_nolock();
4141 raw_spin_unlock(&kvm_count_lock);
4142 }
4143
hardware_enable_all(void)4144 static int hardware_enable_all(void)
4145 {
4146 int r = 0;
4147
4148 raw_spin_lock(&kvm_count_lock);
4149
4150 kvm_usage_count++;
4151 if (kvm_usage_count == 1) {
4152 atomic_set(&hardware_enable_failed, 0);
4153 on_each_cpu(hardware_enable_nolock, NULL, 1);
4154
4155 if (atomic_read(&hardware_enable_failed)) {
4156 hardware_disable_all_nolock();
4157 r = -EBUSY;
4158 }
4159 }
4160
4161 raw_spin_unlock(&kvm_count_lock);
4162
4163 return r;
4164 }
4165
kvm_reboot(struct notifier_block * notifier,unsigned long val,void * v)4166 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
4167 void *v)
4168 {
4169 /*
4170 * Some (well, at least mine) BIOSes hang on reboot if
4171 * in vmx root mode.
4172 *
4173 * And Intel TXT required VMX off for all cpu when system shutdown.
4174 */
4175 pr_info("kvm: exiting hardware virtualization\n");
4176 kvm_rebooting = true;
4177 on_each_cpu(hardware_disable_nolock, NULL, 1);
4178 return NOTIFY_OK;
4179 }
4180
4181 static struct notifier_block kvm_reboot_notifier = {
4182 .notifier_call = kvm_reboot,
4183 .priority = 0,
4184 };
4185
kvm_io_bus_destroy(struct kvm_io_bus * bus)4186 static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
4187 {
4188 int i;
4189
4190 for (i = 0; i < bus->dev_count; i++) {
4191 struct kvm_io_device *pos = bus->range[i].dev;
4192
4193 kvm_iodevice_destructor(pos);
4194 }
4195 kfree(bus);
4196 }
4197
kvm_io_bus_cmp(const struct kvm_io_range * r1,const struct kvm_io_range * r2)4198 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
4199 const struct kvm_io_range *r2)
4200 {
4201 gpa_t addr1 = r1->addr;
4202 gpa_t addr2 = r2->addr;
4203
4204 if (addr1 < addr2)
4205 return -1;
4206
4207 /* If r2->len == 0, match the exact address. If r2->len != 0,
4208 * accept any overlapping write. Any order is acceptable for
4209 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
4210 * we process all of them.
4211 */
4212 if (r2->len) {
4213 addr1 += r1->len;
4214 addr2 += r2->len;
4215 }
4216
4217 if (addr1 > addr2)
4218 return 1;
4219
4220 return 0;
4221 }
4222
kvm_io_bus_sort_cmp(const void * p1,const void * p2)4223 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
4224 {
4225 return kvm_io_bus_cmp(p1, p2);
4226 }
4227
kvm_io_bus_get_first_dev(struct kvm_io_bus * bus,gpa_t addr,int len)4228 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
4229 gpa_t addr, int len)
4230 {
4231 struct kvm_io_range *range, key;
4232 int off;
4233
4234 key = (struct kvm_io_range) {
4235 .addr = addr,
4236 .len = len,
4237 };
4238
4239 range = bsearch(&key, bus->range, bus->dev_count,
4240 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
4241 if (range == NULL)
4242 return -ENOENT;
4243
4244 off = range - bus->range;
4245
4246 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
4247 off--;
4248
4249 return off;
4250 }
4251
__kvm_io_bus_write(struct kvm_vcpu * vcpu,struct kvm_io_bus * bus,struct kvm_io_range * range,const void * val)4252 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
4253 struct kvm_io_range *range, const void *val)
4254 {
4255 int idx;
4256
4257 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
4258 if (idx < 0)
4259 return -EOPNOTSUPP;
4260
4261 while (idx < bus->dev_count &&
4262 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
4263 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
4264 range->len, val))
4265 return idx;
4266 idx++;
4267 }
4268
4269 return -EOPNOTSUPP;
4270 }
4271
4272 /* kvm_io_bus_write - called under kvm->slots_lock */
kvm_io_bus_write(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,const void * val)4273 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
4274 int len, const void *val)
4275 {
4276 struct kvm_io_bus *bus;
4277 struct kvm_io_range range;
4278 int r;
4279
4280 range = (struct kvm_io_range) {
4281 .addr = addr,
4282 .len = len,
4283 };
4284
4285 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
4286 if (!bus)
4287 return -ENOMEM;
4288 r = __kvm_io_bus_write(vcpu, bus, &range, val);
4289 return r < 0 ? r : 0;
4290 }
4291 EXPORT_SYMBOL_GPL(kvm_io_bus_write);
4292
4293 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */
kvm_io_bus_write_cookie(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,const void * val,long cookie)4294 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
4295 gpa_t addr, int len, const void *val, long cookie)
4296 {
4297 struct kvm_io_bus *bus;
4298 struct kvm_io_range range;
4299
4300 range = (struct kvm_io_range) {
4301 .addr = addr,
4302 .len = len,
4303 };
4304
4305 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
4306 if (!bus)
4307 return -ENOMEM;
4308
4309 /* First try the device referenced by cookie. */
4310 if ((cookie >= 0) && (cookie < bus->dev_count) &&
4311 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
4312 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
4313 val))
4314 return cookie;
4315
4316 /*
4317 * cookie contained garbage; fall back to search and return the
4318 * correct cookie value.
4319 */
4320 return __kvm_io_bus_write(vcpu, bus, &range, val);
4321 }
4322
__kvm_io_bus_read(struct kvm_vcpu * vcpu,struct kvm_io_bus * bus,struct kvm_io_range * range,void * val)4323 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
4324 struct kvm_io_range *range, void *val)
4325 {
4326 int idx;
4327
4328 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
4329 if (idx < 0)
4330 return -EOPNOTSUPP;
4331
4332 while (idx < bus->dev_count &&
4333 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
4334 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
4335 range->len, val))
4336 return idx;
4337 idx++;
4338 }
4339
4340 return -EOPNOTSUPP;
4341 }
4342
4343 /* kvm_io_bus_read - called under kvm->slots_lock */
kvm_io_bus_read(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,void * val)4344 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
4345 int len, void *val)
4346 {
4347 struct kvm_io_bus *bus;
4348 struct kvm_io_range range;
4349 int r;
4350
4351 range = (struct kvm_io_range) {
4352 .addr = addr,
4353 .len = len,
4354 };
4355
4356 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
4357 if (!bus)
4358 return -ENOMEM;
4359 r = __kvm_io_bus_read(vcpu, bus, &range, val);
4360 return r < 0 ? r : 0;
4361 }
4362
4363 /* Caller must hold slots_lock. */
kvm_io_bus_register_dev(struct kvm * kvm,enum kvm_bus bus_idx,gpa_t addr,int len,struct kvm_io_device * dev)4364 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
4365 int len, struct kvm_io_device *dev)
4366 {
4367 int i;
4368 struct kvm_io_bus *new_bus, *bus;
4369 struct kvm_io_range range;
4370
4371 bus = kvm_get_bus(kvm, bus_idx);
4372 if (!bus)
4373 return -ENOMEM;
4374
4375 /* exclude ioeventfd which is limited by maximum fd */
4376 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
4377 return -ENOSPC;
4378
4379 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
4380 GFP_KERNEL_ACCOUNT);
4381 if (!new_bus)
4382 return -ENOMEM;
4383
4384 range = (struct kvm_io_range) {
4385 .addr = addr,
4386 .len = len,
4387 .dev = dev,
4388 };
4389
4390 for (i = 0; i < bus->dev_count; i++)
4391 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
4392 break;
4393
4394 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
4395 new_bus->dev_count++;
4396 new_bus->range[i] = range;
4397 memcpy(new_bus->range + i + 1, bus->range + i,
4398 (bus->dev_count - i) * sizeof(struct kvm_io_range));
4399 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
4400 synchronize_srcu_expedited(&kvm->srcu);
4401 kfree(bus);
4402
4403 return 0;
4404 }
4405
4406 /* Caller must hold slots_lock. */
kvm_io_bus_unregister_dev(struct kvm * kvm,enum kvm_bus bus_idx,struct kvm_io_device * dev)4407 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
4408 struct kvm_io_device *dev)
4409 {
4410 int i, j;
4411 struct kvm_io_bus *new_bus, *bus;
4412
4413 bus = kvm_get_bus(kvm, bus_idx);
4414 if (!bus)
4415 return 0;
4416
4417 for (i = 0; i < bus->dev_count; i++)
4418 if (bus->range[i].dev == dev) {
4419 break;
4420 }
4421
4422 if (i == bus->dev_count)
4423 return 0;
4424
4425 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
4426 GFP_KERNEL_ACCOUNT);
4427 if (new_bus) {
4428 memcpy(new_bus, bus, struct_size(bus, range, i));
4429 new_bus->dev_count--;
4430 memcpy(new_bus->range + i, bus->range + i + 1,
4431 flex_array_size(new_bus, range, new_bus->dev_count - i));
4432 }
4433
4434 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
4435 synchronize_srcu_expedited(&kvm->srcu);
4436
4437 /* Destroy the old bus _after_ installing the (null) bus. */
4438 if (!new_bus) {
4439 pr_err("kvm: failed to shrink bus, removing it completely\n");
4440 for (j = 0; j < bus->dev_count; j++) {
4441 if (j == i)
4442 continue;
4443 kvm_iodevice_destructor(bus->range[j].dev);
4444 }
4445 }
4446
4447 kfree(bus);
4448 return new_bus ? 0 : -ENOMEM;
4449 }
4450
kvm_io_bus_get_dev(struct kvm * kvm,enum kvm_bus bus_idx,gpa_t addr)4451 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
4452 gpa_t addr)
4453 {
4454 struct kvm_io_bus *bus;
4455 int dev_idx, srcu_idx;
4456 struct kvm_io_device *iodev = NULL;
4457
4458 srcu_idx = srcu_read_lock(&kvm->srcu);
4459
4460 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
4461 if (!bus)
4462 goto out_unlock;
4463
4464 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
4465 if (dev_idx < 0)
4466 goto out_unlock;
4467
4468 iodev = bus->range[dev_idx].dev;
4469
4470 out_unlock:
4471 srcu_read_unlock(&kvm->srcu, srcu_idx);
4472
4473 return iodev;
4474 }
4475 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
4476
kvm_debugfs_open(struct inode * inode,struct file * file,int (* get)(void *,u64 *),int (* set)(void *,u64),const char * fmt)4477 static int kvm_debugfs_open(struct inode *inode, struct file *file,
4478 int (*get)(void *, u64 *), int (*set)(void *, u64),
4479 const char *fmt)
4480 {
4481 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
4482 inode->i_private;
4483
4484 /* The debugfs files are a reference to the kvm struct which
4485 * is still valid when kvm_destroy_vm is called.
4486 * To avoid the race between open and the removal of the debugfs
4487 * directory we test against the users count.
4488 */
4489 if (!refcount_inc_not_zero(&stat_data->kvm->users_count))
4490 return -ENOENT;
4491
4492 if (simple_attr_open(inode, file, get,
4493 KVM_DBGFS_GET_MODE(stat_data->dbgfs_item) & 0222
4494 ? set : NULL,
4495 fmt)) {
4496 kvm_put_kvm(stat_data->kvm);
4497 return -ENOMEM;
4498 }
4499
4500 return 0;
4501 }
4502
kvm_debugfs_release(struct inode * inode,struct file * file)4503 static int kvm_debugfs_release(struct inode *inode, struct file *file)
4504 {
4505 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
4506 inode->i_private;
4507
4508 simple_attr_release(inode, file);
4509 kvm_put_kvm(stat_data->kvm);
4510
4511 return 0;
4512 }
4513
kvm_get_stat_per_vm(struct kvm * kvm,size_t offset,u64 * val)4514 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
4515 {
4516 *val = *(ulong *)((void *)kvm + offset);
4517
4518 return 0;
4519 }
4520
kvm_clear_stat_per_vm(struct kvm * kvm,size_t offset)4521 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
4522 {
4523 *(ulong *)((void *)kvm + offset) = 0;
4524
4525 return 0;
4526 }
4527
kvm_get_stat_per_vcpu(struct kvm * kvm,size_t offset,u64 * val)4528 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
4529 {
4530 int i;
4531 struct kvm_vcpu *vcpu;
4532
4533 *val = 0;
4534
4535 kvm_for_each_vcpu(i, vcpu, kvm)
4536 *val += *(u64 *)((void *)vcpu + offset);
4537
4538 return 0;
4539 }
4540
kvm_clear_stat_per_vcpu(struct kvm * kvm,size_t offset)4541 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
4542 {
4543 int i;
4544 struct kvm_vcpu *vcpu;
4545
4546 kvm_for_each_vcpu(i, vcpu, kvm)
4547 *(u64 *)((void *)vcpu + offset) = 0;
4548
4549 return 0;
4550 }
4551
kvm_stat_data_get(void * data,u64 * val)4552 static int kvm_stat_data_get(void *data, u64 *val)
4553 {
4554 int r = -EFAULT;
4555 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
4556
4557 switch (stat_data->dbgfs_item->kind) {
4558 case KVM_STAT_VM:
4559 r = kvm_get_stat_per_vm(stat_data->kvm,
4560 stat_data->dbgfs_item->offset, val);
4561 break;
4562 case KVM_STAT_VCPU:
4563 r = kvm_get_stat_per_vcpu(stat_data->kvm,
4564 stat_data->dbgfs_item->offset, val);
4565 break;
4566 }
4567
4568 return r;
4569 }
4570
kvm_stat_data_clear(void * data,u64 val)4571 static int kvm_stat_data_clear(void *data, u64 val)
4572 {
4573 int r = -EFAULT;
4574 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
4575
4576 if (val)
4577 return -EINVAL;
4578
4579 switch (stat_data->dbgfs_item->kind) {
4580 case KVM_STAT_VM:
4581 r = kvm_clear_stat_per_vm(stat_data->kvm,
4582 stat_data->dbgfs_item->offset);
4583 break;
4584 case KVM_STAT_VCPU:
4585 r = kvm_clear_stat_per_vcpu(stat_data->kvm,
4586 stat_data->dbgfs_item->offset);
4587 break;
4588 }
4589
4590 return r;
4591 }
4592
kvm_stat_data_open(struct inode * inode,struct file * file)4593 static int kvm_stat_data_open(struct inode *inode, struct file *file)
4594 {
4595 __simple_attr_check_format("%llu\n", 0ull);
4596 return kvm_debugfs_open(inode, file, kvm_stat_data_get,
4597 kvm_stat_data_clear, "%llu\n");
4598 }
4599
4600 static const struct file_operations stat_fops_per_vm = {
4601 .owner = THIS_MODULE,
4602 .open = kvm_stat_data_open,
4603 .release = kvm_debugfs_release,
4604 .read = simple_attr_read,
4605 .write = simple_attr_write,
4606 .llseek = no_llseek,
4607 };
4608
vm_stat_get(void * _offset,u64 * val)4609 static int vm_stat_get(void *_offset, u64 *val)
4610 {
4611 unsigned offset = (long)_offset;
4612 struct kvm *kvm;
4613 u64 tmp_val;
4614
4615 *val = 0;
4616 mutex_lock(&kvm_lock);
4617 list_for_each_entry(kvm, &vm_list, vm_list) {
4618 kvm_get_stat_per_vm(kvm, offset, &tmp_val);
4619 *val += tmp_val;
4620 }
4621 mutex_unlock(&kvm_lock);
4622 return 0;
4623 }
4624
vm_stat_clear(void * _offset,u64 val)4625 static int vm_stat_clear(void *_offset, u64 val)
4626 {
4627 unsigned offset = (long)_offset;
4628 struct kvm *kvm;
4629
4630 if (val)
4631 return -EINVAL;
4632
4633 mutex_lock(&kvm_lock);
4634 list_for_each_entry(kvm, &vm_list, vm_list) {
4635 kvm_clear_stat_per_vm(kvm, offset);
4636 }
4637 mutex_unlock(&kvm_lock);
4638
4639 return 0;
4640 }
4641
4642 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
4643
vcpu_stat_get(void * _offset,u64 * val)4644 static int vcpu_stat_get(void *_offset, u64 *val)
4645 {
4646 unsigned offset = (long)_offset;
4647 struct kvm *kvm;
4648 u64 tmp_val;
4649
4650 *val = 0;
4651 mutex_lock(&kvm_lock);
4652 list_for_each_entry(kvm, &vm_list, vm_list) {
4653 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
4654 *val += tmp_val;
4655 }
4656 mutex_unlock(&kvm_lock);
4657 return 0;
4658 }
4659
vcpu_stat_clear(void * _offset,u64 val)4660 static int vcpu_stat_clear(void *_offset, u64 val)
4661 {
4662 unsigned offset = (long)_offset;
4663 struct kvm *kvm;
4664
4665 if (val)
4666 return -EINVAL;
4667
4668 mutex_lock(&kvm_lock);
4669 list_for_each_entry(kvm, &vm_list, vm_list) {
4670 kvm_clear_stat_per_vcpu(kvm, offset);
4671 }
4672 mutex_unlock(&kvm_lock);
4673
4674 return 0;
4675 }
4676
4677 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
4678 "%llu\n");
4679
4680 static const struct file_operations *stat_fops[] = {
4681 [KVM_STAT_VCPU] = &vcpu_stat_fops,
4682 [KVM_STAT_VM] = &vm_stat_fops,
4683 };
4684
kvm_uevent_notify_change(unsigned int type,struct kvm * kvm)4685 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
4686 {
4687 struct kobj_uevent_env *env;
4688 unsigned long long created, active;
4689
4690 if (!kvm_dev.this_device || !kvm)
4691 return;
4692
4693 mutex_lock(&kvm_lock);
4694 if (type == KVM_EVENT_CREATE_VM) {
4695 kvm_createvm_count++;
4696 kvm_active_vms++;
4697 } else if (type == KVM_EVENT_DESTROY_VM) {
4698 kvm_active_vms--;
4699 }
4700 created = kvm_createvm_count;
4701 active = kvm_active_vms;
4702 mutex_unlock(&kvm_lock);
4703
4704 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
4705 if (!env)
4706 return;
4707
4708 add_uevent_var(env, "CREATED=%llu", created);
4709 add_uevent_var(env, "COUNT=%llu", active);
4710
4711 if (type == KVM_EVENT_CREATE_VM) {
4712 add_uevent_var(env, "EVENT=create");
4713 kvm->userspace_pid = task_pid_nr(current);
4714 } else if (type == KVM_EVENT_DESTROY_VM) {
4715 add_uevent_var(env, "EVENT=destroy");
4716 }
4717 add_uevent_var(env, "PID=%d", kvm->userspace_pid);
4718
4719 if (kvm->debugfs_dentry) {
4720 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
4721
4722 if (p) {
4723 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
4724 if (!IS_ERR(tmp))
4725 add_uevent_var(env, "STATS_PATH=%s", tmp);
4726 kfree(p);
4727 }
4728 }
4729 /* no need for checks, since we are adding at most only 5 keys */
4730 env->envp[env->envp_idx++] = NULL;
4731 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
4732 kfree(env);
4733 }
4734
kvm_init_debug(void)4735 static void kvm_init_debug(void)
4736 {
4737 struct kvm_stats_debugfs_item *p;
4738
4739 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
4740
4741 kvm_debugfs_num_entries = 0;
4742 for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
4743 debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p),
4744 kvm_debugfs_dir, (void *)(long)p->offset,
4745 stat_fops[p->kind]);
4746 }
4747 }
4748
kvm_suspend(void)4749 static int kvm_suspend(void)
4750 {
4751 if (kvm_usage_count)
4752 hardware_disable_nolock(NULL);
4753 return 0;
4754 }
4755
kvm_resume(void)4756 static void kvm_resume(void)
4757 {
4758 if (kvm_usage_count) {
4759 #ifdef CONFIG_LOCKDEP
4760 WARN_ON(lockdep_is_held(&kvm_count_lock));
4761 #endif
4762 hardware_enable_nolock(NULL);
4763 }
4764 }
4765
4766 static struct syscore_ops kvm_syscore_ops = {
4767 .suspend = kvm_suspend,
4768 .resume = kvm_resume,
4769 };
4770
4771 static inline
preempt_notifier_to_vcpu(struct preempt_notifier * pn)4772 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
4773 {
4774 return container_of(pn, struct kvm_vcpu, preempt_notifier);
4775 }
4776
kvm_sched_in(struct preempt_notifier * pn,int cpu)4777 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
4778 {
4779 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
4780
4781 WRITE_ONCE(vcpu->preempted, false);
4782 WRITE_ONCE(vcpu->ready, false);
4783
4784 __this_cpu_write(kvm_running_vcpu, vcpu);
4785 kvm_arch_sched_in(vcpu, cpu);
4786 kvm_arch_vcpu_load(vcpu, cpu);
4787 }
4788
kvm_sched_out(struct preempt_notifier * pn,struct task_struct * next)4789 static void kvm_sched_out(struct preempt_notifier *pn,
4790 struct task_struct *next)
4791 {
4792 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
4793
4794 if (current->state == TASK_RUNNING) {
4795 WRITE_ONCE(vcpu->preempted, true);
4796 WRITE_ONCE(vcpu->ready, true);
4797 }
4798 kvm_arch_vcpu_put(vcpu);
4799 __this_cpu_write(kvm_running_vcpu, NULL);
4800 }
4801
4802 /**
4803 * kvm_get_running_vcpu - get the vcpu running on the current CPU.
4804 *
4805 * We can disable preemption locally around accessing the per-CPU variable,
4806 * and use the resolved vcpu pointer after enabling preemption again,
4807 * because even if the current thread is migrated to another CPU, reading
4808 * the per-CPU value later will give us the same value as we update the
4809 * per-CPU variable in the preempt notifier handlers.
4810 */
kvm_get_running_vcpu(void)4811 struct kvm_vcpu *kvm_get_running_vcpu(void)
4812 {
4813 struct kvm_vcpu *vcpu;
4814
4815 preempt_disable();
4816 vcpu = __this_cpu_read(kvm_running_vcpu);
4817 preempt_enable();
4818
4819 return vcpu;
4820 }
4821 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
4822
4823 /**
4824 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
4825 */
kvm_get_running_vcpus(void)4826 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
4827 {
4828 return &kvm_running_vcpu;
4829 }
4830
4831 struct kvm_cpu_compat_check {
4832 void *opaque;
4833 int *ret;
4834 };
4835
check_processor_compat(void * data)4836 static void check_processor_compat(void *data)
4837 {
4838 struct kvm_cpu_compat_check *c = data;
4839
4840 *c->ret = kvm_arch_check_processor_compat(c->opaque);
4841 }
4842
kvm_init(void * opaque,unsigned vcpu_size,unsigned vcpu_align,struct module * module)4843 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
4844 struct module *module)
4845 {
4846 struct kvm_cpu_compat_check c;
4847 int r;
4848 int cpu;
4849
4850 r = kvm_arch_init(opaque);
4851 if (r)
4852 goto out_fail;
4853
4854 /*
4855 * kvm_arch_init makes sure there's at most one caller
4856 * for architectures that support multiple implementations,
4857 * like intel and amd on x86.
4858 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
4859 * conflicts in case kvm is already setup for another implementation.
4860 */
4861 r = kvm_irqfd_init();
4862 if (r)
4863 goto out_irqfd;
4864
4865 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
4866 r = -ENOMEM;
4867 goto out_free_0;
4868 }
4869
4870 r = kvm_arch_hardware_setup(opaque);
4871 if (r < 0)
4872 goto out_free_1;
4873
4874 c.ret = &r;
4875 c.opaque = opaque;
4876 for_each_online_cpu(cpu) {
4877 smp_call_function_single(cpu, check_processor_compat, &c, 1);
4878 if (r < 0)
4879 goto out_free_2;
4880 }
4881
4882 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
4883 kvm_starting_cpu, kvm_dying_cpu);
4884 if (r)
4885 goto out_free_2;
4886 register_reboot_notifier(&kvm_reboot_notifier);
4887
4888 /* A kmem cache lets us meet the alignment requirements of fx_save. */
4889 if (!vcpu_align)
4890 vcpu_align = __alignof__(struct kvm_vcpu);
4891 kvm_vcpu_cache =
4892 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
4893 SLAB_ACCOUNT,
4894 offsetof(struct kvm_vcpu, arch),
4895 sizeof_field(struct kvm_vcpu, arch),
4896 NULL);
4897 if (!kvm_vcpu_cache) {
4898 r = -ENOMEM;
4899 goto out_free_3;
4900 }
4901
4902 r = kvm_async_pf_init();
4903 if (r)
4904 goto out_free;
4905
4906 kvm_chardev_ops.owner = module;
4907 kvm_vm_fops.owner = module;
4908 kvm_vcpu_fops.owner = module;
4909
4910 r = misc_register(&kvm_dev);
4911 if (r) {
4912 pr_err("kvm: misc device register failed\n");
4913 goto out_unreg;
4914 }
4915
4916 register_syscore_ops(&kvm_syscore_ops);
4917
4918 kvm_preempt_ops.sched_in = kvm_sched_in;
4919 kvm_preempt_ops.sched_out = kvm_sched_out;
4920
4921 kvm_init_debug();
4922
4923 r = kvm_vfio_ops_init();
4924 WARN_ON(r);
4925
4926 return 0;
4927
4928 out_unreg:
4929 kvm_async_pf_deinit();
4930 out_free:
4931 kmem_cache_destroy(kvm_vcpu_cache);
4932 out_free_3:
4933 unregister_reboot_notifier(&kvm_reboot_notifier);
4934 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
4935 out_free_2:
4936 kvm_arch_hardware_unsetup();
4937 out_free_1:
4938 free_cpumask_var(cpus_hardware_enabled);
4939 out_free_0:
4940 kvm_irqfd_exit();
4941 out_irqfd:
4942 kvm_arch_exit();
4943 out_fail:
4944 return r;
4945 }
4946 EXPORT_SYMBOL_GPL(kvm_init);
4947
kvm_exit(void)4948 void kvm_exit(void)
4949 {
4950 debugfs_remove_recursive(kvm_debugfs_dir);
4951 misc_deregister(&kvm_dev);
4952 kmem_cache_destroy(kvm_vcpu_cache);
4953 kvm_async_pf_deinit();
4954 unregister_syscore_ops(&kvm_syscore_ops);
4955 unregister_reboot_notifier(&kvm_reboot_notifier);
4956 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
4957 on_each_cpu(hardware_disable_nolock, NULL, 1);
4958 kvm_arch_hardware_unsetup();
4959 kvm_arch_exit();
4960 kvm_irqfd_exit();
4961 free_cpumask_var(cpus_hardware_enabled);
4962 kvm_vfio_ops_exit();
4963 }
4964 EXPORT_SYMBOL_GPL(kvm_exit);
4965
4966 struct kvm_vm_worker_thread_context {
4967 struct kvm *kvm;
4968 struct task_struct *parent;
4969 struct completion init_done;
4970 kvm_vm_thread_fn_t thread_fn;
4971 uintptr_t data;
4972 int err;
4973 };
4974
kvm_vm_worker_thread(void * context)4975 static int kvm_vm_worker_thread(void *context)
4976 {
4977 /*
4978 * The init_context is allocated on the stack of the parent thread, so
4979 * we have to locally copy anything that is needed beyond initialization
4980 */
4981 struct kvm_vm_worker_thread_context *init_context = context;
4982 struct kvm *kvm = init_context->kvm;
4983 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
4984 uintptr_t data = init_context->data;
4985 int err;
4986
4987 err = kthread_park(current);
4988 /* kthread_park(current) is never supposed to return an error */
4989 WARN_ON(err != 0);
4990 if (err)
4991 goto init_complete;
4992
4993 err = cgroup_attach_task_all(init_context->parent, current);
4994 if (err) {
4995 kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
4996 __func__, err);
4997 goto init_complete;
4998 }
4999
5000 set_user_nice(current, task_nice(init_context->parent));
5001
5002 init_complete:
5003 init_context->err = err;
5004 complete(&init_context->init_done);
5005 init_context = NULL;
5006
5007 if (err)
5008 return err;
5009
5010 /* Wait to be woken up by the spawner before proceeding. */
5011 kthread_parkme();
5012
5013 if (!kthread_should_stop())
5014 err = thread_fn(kvm, data);
5015
5016 return err;
5017 }
5018
kvm_vm_create_worker_thread(struct kvm * kvm,kvm_vm_thread_fn_t thread_fn,uintptr_t data,const char * name,struct task_struct ** thread_ptr)5019 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
5020 uintptr_t data, const char *name,
5021 struct task_struct **thread_ptr)
5022 {
5023 struct kvm_vm_worker_thread_context init_context = {};
5024 struct task_struct *thread;
5025
5026 *thread_ptr = NULL;
5027 init_context.kvm = kvm;
5028 init_context.parent = current;
5029 init_context.thread_fn = thread_fn;
5030 init_context.data = data;
5031 init_completion(&init_context.init_done);
5032
5033 thread = kthread_run(kvm_vm_worker_thread, &init_context,
5034 "%s-%d", name, task_pid_nr(current));
5035 if (IS_ERR(thread))
5036 return PTR_ERR(thread);
5037
5038 /* kthread_run is never supposed to return NULL */
5039 WARN_ON(thread == NULL);
5040
5041 wait_for_completion(&init_context.init_done);
5042
5043 if (!init_context.err)
5044 *thread_ptr = thread;
5045
5046 return init_context.err;
5047 }
5048