• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Support KVM gust page tracking
3  *
4  * This feature allows us to track page access in guest. Currently, only
5  * write access is tracked.
6  *
7  * Copyright(C) 2015 Intel Corporation.
8  *
9  * Author:
10  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
11  *
12  * This work is licensed under the terms of the GNU GPL, version 2.  See
13  * the COPYING file in the top-level directory.
14  */
15 
16 #include <linux/kvm_host.h>
17 #include <asm/kvm_host.h>
18 #include <asm/kvm_page_track.h>
19 
20 #include "mmu.h"
21 
kvm_page_track_free_memslot(struct kvm_memory_slot * free,struct kvm_memory_slot * dont)22 void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
23 				 struct kvm_memory_slot *dont)
24 {
25 	int i;
26 
27 	for (i = 0; i < KVM_PAGE_TRACK_MAX; i++)
28 		if (!dont || free->arch.gfn_track[i] !=
29 		      dont->arch.gfn_track[i]) {
30 			kvfree(free->arch.gfn_track[i]);
31 			free->arch.gfn_track[i] = NULL;
32 		}
33 }
34 
kvm_page_track_create_memslot(struct kvm_memory_slot * slot,unsigned long npages)35 int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
36 				  unsigned long npages)
37 {
38 	int  i;
39 
40 	for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
41 		slot->arch.gfn_track[i] = kvm_kvzalloc(npages *
42 					    sizeof(*slot->arch.gfn_track[i]));
43 		if (!slot->arch.gfn_track[i])
44 			goto track_free;
45 	}
46 
47 	return 0;
48 
49 track_free:
50 	kvm_page_track_free_memslot(slot, NULL);
51 	return -ENOMEM;
52 }
53 
page_track_mode_is_valid(enum kvm_page_track_mode mode)54 static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode)
55 {
56 	if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
57 		return false;
58 
59 	return true;
60 }
61 
update_gfn_track(struct kvm_memory_slot * slot,gfn_t gfn,enum kvm_page_track_mode mode,short count)62 static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
63 			     enum kvm_page_track_mode mode, short count)
64 {
65 	int index, val;
66 
67 	index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
68 
69 	val = slot->arch.gfn_track[mode][index];
70 
71 	if (WARN_ON(val + count < 0 || val + count > USHRT_MAX))
72 		return;
73 
74 	slot->arch.gfn_track[mode][index] += count;
75 }
76 
77 /*
78  * add guest page to the tracking pool so that corresponding access on that
79  * page will be intercepted.
80  *
81  * It should be called under the protection both of mmu-lock and kvm->srcu
82  * or kvm->slots_lock.
83  *
84  * @kvm: the guest instance we are interested in.
85  * @slot: the @gfn belongs to.
86  * @gfn: the guest page.
87  * @mode: tracking mode, currently only write track is supported.
88  */
kvm_slot_page_track_add_page(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,enum kvm_page_track_mode mode)89 void kvm_slot_page_track_add_page(struct kvm *kvm,
90 				  struct kvm_memory_slot *slot, gfn_t gfn,
91 				  enum kvm_page_track_mode mode)
92 {
93 
94 	if (WARN_ON(!page_track_mode_is_valid(mode)))
95 		return;
96 
97 	update_gfn_track(slot, gfn, mode, 1);
98 
99 	/*
100 	 * new track stops large page mapping for the
101 	 * tracked page.
102 	 */
103 	kvm_mmu_gfn_disallow_lpage(slot, gfn);
104 
105 	if (mode == KVM_PAGE_TRACK_WRITE)
106 		if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
107 			kvm_flush_remote_tlbs(kvm);
108 }
109 
110 /*
111  * remove the guest page from the tracking pool which stops the interception
112  * of corresponding access on that page. It is the opposed operation of
113  * kvm_slot_page_track_add_page().
114  *
115  * It should be called under the protection both of mmu-lock and kvm->srcu
116  * or kvm->slots_lock.
117  *
118  * @kvm: the guest instance we are interested in.
119  * @slot: the @gfn belongs to.
120  * @gfn: the guest page.
121  * @mode: tracking mode, currently only write track is supported.
122  */
kvm_slot_page_track_remove_page(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,enum kvm_page_track_mode mode)123 void kvm_slot_page_track_remove_page(struct kvm *kvm,
124 				     struct kvm_memory_slot *slot, gfn_t gfn,
125 				     enum kvm_page_track_mode mode)
126 {
127 	if (WARN_ON(!page_track_mode_is_valid(mode)))
128 		return;
129 
130 	update_gfn_track(slot, gfn, mode, -1);
131 
132 	/*
133 	 * allow large page mapping for the tracked page
134 	 * after the tracker is gone.
135 	 */
136 	kvm_mmu_gfn_allow_lpage(slot, gfn);
137 }
138 
139 /*
140  * check if the corresponding access on the specified guest page is tracked.
141  */
kvm_page_track_is_active(struct kvm_vcpu * vcpu,gfn_t gfn,enum kvm_page_track_mode mode)142 bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
143 			      enum kvm_page_track_mode mode)
144 {
145 	struct kvm_memory_slot *slot;
146 	int index;
147 
148 	if (WARN_ON(!page_track_mode_is_valid(mode)))
149 		return false;
150 
151 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
152 	if (!slot)
153 		return false;
154 
155 	index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
156 	return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
157 }
158 
kvm_page_track_cleanup(struct kvm * kvm)159 void kvm_page_track_cleanup(struct kvm *kvm)
160 {
161 	struct kvm_page_track_notifier_head *head;
162 
163 	head = &kvm->arch.track_notifier_head;
164 	cleanup_srcu_struct(&head->track_srcu);
165 }
166 
kvm_page_track_init(struct kvm * kvm)167 void kvm_page_track_init(struct kvm *kvm)
168 {
169 	struct kvm_page_track_notifier_head *head;
170 
171 	head = &kvm->arch.track_notifier_head;
172 	init_srcu_struct(&head->track_srcu);
173 	INIT_HLIST_HEAD(&head->track_notifier_list);
174 }
175 
176 /*
177  * register the notifier so that event interception for the tracked guest
178  * pages can be received.
179  */
180 void
kvm_page_track_register_notifier(struct kvm * kvm,struct kvm_page_track_notifier_node * n)181 kvm_page_track_register_notifier(struct kvm *kvm,
182 				 struct kvm_page_track_notifier_node *n)
183 {
184 	struct kvm_page_track_notifier_head *head;
185 
186 	head = &kvm->arch.track_notifier_head;
187 
188 	spin_lock(&kvm->mmu_lock);
189 	hlist_add_head_rcu(&n->node, &head->track_notifier_list);
190 	spin_unlock(&kvm->mmu_lock);
191 }
192 
193 /*
194  * stop receiving the event interception. It is the opposed operation of
195  * kvm_page_track_register_notifier().
196  */
197 void
kvm_page_track_unregister_notifier(struct kvm * kvm,struct kvm_page_track_notifier_node * n)198 kvm_page_track_unregister_notifier(struct kvm *kvm,
199 				   struct kvm_page_track_notifier_node *n)
200 {
201 	struct kvm_page_track_notifier_head *head;
202 
203 	head = &kvm->arch.track_notifier_head;
204 
205 	spin_lock(&kvm->mmu_lock);
206 	hlist_del_rcu(&n->node);
207 	spin_unlock(&kvm->mmu_lock);
208 	synchronize_srcu(&head->track_srcu);
209 }
210 
211 /*
212  * Notify the node that write access is intercepted and write emulation is
213  * finished at this time.
214  *
215  * The node should figure out if the written page is the one that node is
216  * interested in by itself.
217  */
kvm_page_track_write(struct kvm_vcpu * vcpu,gpa_t gpa,const u8 * new,int bytes)218 void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
219 			  int bytes)
220 {
221 	struct kvm_page_track_notifier_head *head;
222 	struct kvm_page_track_notifier_node *n;
223 	int idx;
224 
225 	head = &vcpu->kvm->arch.track_notifier_head;
226 
227 	if (hlist_empty(&head->track_notifier_list))
228 		return;
229 
230 	idx = srcu_read_lock(&head->track_srcu);
231 	hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
232 		if (n->track_write)
233 			n->track_write(vcpu, gpa, new, bytes);
234 	srcu_read_unlock(&head->track_srcu, idx);
235 }
236