1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KVM dirty page logging performance test
4 *
5 * Based on dirty_log_test.c
6 *
7 * Copyright (C) 2018, Red Hat, Inc.
8 * Copyright (C) 2020, Google, Inc.
9 */
10
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <time.h>
14 #include <pthread.h>
15 #include <linux/bitmap.h>
16
17 #include "kvm_util.h"
18 #include "test_util.h"
19 #include "perf_test_util.h"
20 #include "guest_modes.h"
21
22 #ifdef __aarch64__
23 #include "aarch64/vgic.h"
24
25 #define GICD_BASE_GPA 0x8000000ULL
26 #define GICR_BASE_GPA 0x80A0000ULL
27
28 static int gic_fd;
29
arch_setup_vm(struct kvm_vm * vm,unsigned int nr_vcpus)30 static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
31 {
32 /*
33 * The test can still run even if hardware does not support GICv3, as it
34 * is only an optimization to reduce guest exits.
35 */
36 gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
37 }
38
arch_cleanup_vm(struct kvm_vm * vm)39 static void arch_cleanup_vm(struct kvm_vm *vm)
40 {
41 if (gic_fd > 0)
42 close(gic_fd);
43 }
44
45 #else /* __aarch64__ */
46
arch_setup_vm(struct kvm_vm * vm,unsigned int nr_vcpus)47 static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
48 {
49 }
50
arch_cleanup_vm(struct kvm_vm * vm)51 static void arch_cleanup_vm(struct kvm_vm *vm)
52 {
53 }
54
55 #endif
56
57 /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
58 #define TEST_HOST_LOOP_N 2UL
59
60 static int nr_vcpus = 1;
61 static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
62 static bool run_vcpus_while_disabling_dirty_logging;
63
64 /* Host variables */
65 static u64 dirty_log_manual_caps;
66 static bool host_quit;
67 static int iteration;
68 static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
69
vcpu_worker(struct perf_test_vcpu_args * vcpu_args)70 static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
71 {
72 struct kvm_vcpu *vcpu = vcpu_args->vcpu;
73 int vcpu_idx = vcpu_args->vcpu_idx;
74 uint64_t pages_count = 0;
75 struct kvm_run *run;
76 struct timespec start;
77 struct timespec ts_diff;
78 struct timespec total = (struct timespec){0};
79 struct timespec avg;
80 int ret;
81
82 run = vcpu->run;
83
84 while (!READ_ONCE(host_quit)) {
85 int current_iteration = READ_ONCE(iteration);
86
87 clock_gettime(CLOCK_MONOTONIC, &start);
88 ret = _vcpu_run(vcpu);
89 ts_diff = timespec_elapsed(start);
90
91 TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
92 TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
93 "Invalid guest sync status: exit_reason=%s\n",
94 exit_reason_str(run->exit_reason));
95
96 pr_debug("Got sync event from vCPU %d\n", vcpu_idx);
97 vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
98 pr_debug("vCPU %d updated last completed iteration to %d\n",
99 vcpu_idx, vcpu_last_completed_iteration[vcpu_idx]);
100
101 if (current_iteration) {
102 pages_count += vcpu_args->pages;
103 total = timespec_add(total, ts_diff);
104 pr_debug("vCPU %d iteration %d dirty memory time: %ld.%.9lds\n",
105 vcpu_idx, current_iteration, ts_diff.tv_sec,
106 ts_diff.tv_nsec);
107 } else {
108 pr_debug("vCPU %d iteration %d populate memory time: %ld.%.9lds\n",
109 vcpu_idx, current_iteration, ts_diff.tv_sec,
110 ts_diff.tv_nsec);
111 }
112
113 /*
114 * Keep running the guest while dirty logging is being disabled
115 * (iteration is negative) so that vCPUs are accessing memory
116 * for the entire duration of zapping collapsible SPTEs.
117 */
118 while (current_iteration == READ_ONCE(iteration) &&
119 READ_ONCE(iteration) >= 0 && !READ_ONCE(host_quit)) {}
120 }
121
122 avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_idx]);
123 pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
124 vcpu_idx, pages_count, vcpu_last_completed_iteration[vcpu_idx],
125 total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec);
126 }
127
128 struct test_params {
129 unsigned long iterations;
130 uint64_t phys_offset;
131 int wr_fract;
132 bool partition_vcpu_memory_access;
133 enum vm_mem_backing_src_type backing_src;
134 int slots;
135 };
136
toggle_dirty_logging(struct kvm_vm * vm,int slots,bool enable)137 static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
138 {
139 int i;
140
141 for (i = 0; i < slots; i++) {
142 int slot = PERF_TEST_MEM_SLOT_INDEX + i;
143 int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;
144
145 vm_mem_region_set_flags(vm, slot, flags);
146 }
147 }
148
enable_dirty_logging(struct kvm_vm * vm,int slots)149 static inline void enable_dirty_logging(struct kvm_vm *vm, int slots)
150 {
151 toggle_dirty_logging(vm, slots, true);
152 }
153
disable_dirty_logging(struct kvm_vm * vm,int slots)154 static inline void disable_dirty_logging(struct kvm_vm *vm, int slots)
155 {
156 toggle_dirty_logging(vm, slots, false);
157 }
158
get_dirty_log(struct kvm_vm * vm,unsigned long * bitmaps[],int slots)159 static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
160 {
161 int i;
162
163 for (i = 0; i < slots; i++) {
164 int slot = PERF_TEST_MEM_SLOT_INDEX + i;
165
166 kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
167 }
168 }
169
clear_dirty_log(struct kvm_vm * vm,unsigned long * bitmaps[],int slots,uint64_t pages_per_slot)170 static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
171 int slots, uint64_t pages_per_slot)
172 {
173 int i;
174
175 for (i = 0; i < slots; i++) {
176 int slot = PERF_TEST_MEM_SLOT_INDEX + i;
177
178 kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
179 }
180 }
181
alloc_bitmaps(int slots,uint64_t pages_per_slot)182 static unsigned long **alloc_bitmaps(int slots, uint64_t pages_per_slot)
183 {
184 unsigned long **bitmaps;
185 int i;
186
187 bitmaps = malloc(slots * sizeof(bitmaps[0]));
188 TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
189
190 for (i = 0; i < slots; i++) {
191 bitmaps[i] = bitmap_zalloc(pages_per_slot);
192 TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
193 }
194
195 return bitmaps;
196 }
197
free_bitmaps(unsigned long * bitmaps[],int slots)198 static void free_bitmaps(unsigned long *bitmaps[], int slots)
199 {
200 int i;
201
202 for (i = 0; i < slots; i++)
203 free(bitmaps[i]);
204
205 free(bitmaps);
206 }
207
run_test(enum vm_guest_mode mode,void * arg)208 static void run_test(enum vm_guest_mode mode, void *arg)
209 {
210 struct test_params *p = arg;
211 struct kvm_vm *vm;
212 unsigned long **bitmaps;
213 uint64_t guest_num_pages;
214 uint64_t host_num_pages;
215 uint64_t pages_per_slot;
216 struct timespec start;
217 struct timespec ts_diff;
218 struct timespec get_dirty_log_total = (struct timespec){0};
219 struct timespec vcpu_dirty_total = (struct timespec){0};
220 struct timespec avg;
221 struct timespec clear_dirty_log_total = (struct timespec){0};
222 int i;
223
224 vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
225 p->slots, p->backing_src,
226 p->partition_vcpu_memory_access);
227
228 perf_test_set_wr_fract(vm, p->wr_fract);
229
230 guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift;
231 guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
232 host_num_pages = vm_num_host_pages(mode, guest_num_pages);
233 pages_per_slot = host_num_pages / p->slots;
234
235 bitmaps = alloc_bitmaps(p->slots, pages_per_slot);
236
237 if (dirty_log_manual_caps)
238 vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2,
239 dirty_log_manual_caps);
240
241 arch_setup_vm(vm, nr_vcpus);
242
243 /* Start the iterations */
244 iteration = 0;
245 host_quit = false;
246
247 clock_gettime(CLOCK_MONOTONIC, &start);
248 for (i = 0; i < nr_vcpus; i++)
249 vcpu_last_completed_iteration[i] = -1;
250
251 perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
252
253 /* Allow the vCPUs to populate memory */
254 pr_debug("Starting iteration %d - Populating\n", iteration);
255 for (i = 0; i < nr_vcpus; i++) {
256 while (READ_ONCE(vcpu_last_completed_iteration[i]) !=
257 iteration)
258 ;
259 }
260
261 ts_diff = timespec_elapsed(start);
262 pr_info("Populate memory time: %ld.%.9lds\n",
263 ts_diff.tv_sec, ts_diff.tv_nsec);
264
265 /* Enable dirty logging */
266 clock_gettime(CLOCK_MONOTONIC, &start);
267 enable_dirty_logging(vm, p->slots);
268 ts_diff = timespec_elapsed(start);
269 pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
270 ts_diff.tv_sec, ts_diff.tv_nsec);
271
272 while (iteration < p->iterations) {
273 /*
274 * Incrementing the iteration number will start the vCPUs
275 * dirtying memory again.
276 */
277 clock_gettime(CLOCK_MONOTONIC, &start);
278 iteration++;
279
280 pr_debug("Starting iteration %d\n", iteration);
281 for (i = 0; i < nr_vcpus; i++) {
282 while (READ_ONCE(vcpu_last_completed_iteration[i])
283 != iteration)
284 ;
285 }
286
287 ts_diff = timespec_elapsed(start);
288 vcpu_dirty_total = timespec_add(vcpu_dirty_total, ts_diff);
289 pr_info("Iteration %d dirty memory time: %ld.%.9lds\n",
290 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
291
292 clock_gettime(CLOCK_MONOTONIC, &start);
293 get_dirty_log(vm, bitmaps, p->slots);
294 ts_diff = timespec_elapsed(start);
295 get_dirty_log_total = timespec_add(get_dirty_log_total,
296 ts_diff);
297 pr_info("Iteration %d get dirty log time: %ld.%.9lds\n",
298 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
299
300 if (dirty_log_manual_caps) {
301 clock_gettime(CLOCK_MONOTONIC, &start);
302 clear_dirty_log(vm, bitmaps, p->slots, pages_per_slot);
303 ts_diff = timespec_elapsed(start);
304 clear_dirty_log_total = timespec_add(clear_dirty_log_total,
305 ts_diff);
306 pr_info("Iteration %d clear dirty log time: %ld.%.9lds\n",
307 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
308 }
309 }
310
311 /*
312 * Run vCPUs while dirty logging is being disabled to stress disabling
313 * in terms of both performance and correctness. Opt-in via command
314 * line as this significantly increases time to disable dirty logging.
315 */
316 if (run_vcpus_while_disabling_dirty_logging)
317 WRITE_ONCE(iteration, -1);
318
319 /* Disable dirty logging */
320 clock_gettime(CLOCK_MONOTONIC, &start);
321 disable_dirty_logging(vm, p->slots);
322 ts_diff = timespec_elapsed(start);
323 pr_info("Disabling dirty logging time: %ld.%.9lds\n",
324 ts_diff.tv_sec, ts_diff.tv_nsec);
325
326 /*
327 * Tell the vCPU threads to quit. No need to manually check that vCPUs
328 * have stopped running after disabling dirty logging, the join will
329 * wait for them to exit.
330 */
331 host_quit = true;
332 perf_test_join_vcpu_threads(nr_vcpus);
333
334 avg = timespec_div(get_dirty_log_total, p->iterations);
335 pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
336 p->iterations, get_dirty_log_total.tv_sec,
337 get_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
338
339 if (dirty_log_manual_caps) {
340 avg = timespec_div(clear_dirty_log_total, p->iterations);
341 pr_info("Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
342 p->iterations, clear_dirty_log_total.tv_sec,
343 clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
344 }
345
346 free_bitmaps(bitmaps, p->slots);
347 arch_cleanup_vm(vm);
348 perf_test_destroy_vm(vm);
349 }
350
help(char * name)351 static void help(char *name)
352 {
353 puts("");
354 printf("usage: %s [-h] [-i iterations] [-p offset] [-g] "
355 "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]"
356 "[-x memslots]\n", name);
357 puts("");
358 printf(" -i: specify iteration counts (default: %"PRIu64")\n",
359 TEST_HOST_LOOP_N);
360 printf(" -g: Do not enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2. This\n"
361 " makes KVM_GET_DIRTY_LOG clear the dirty log (i.e.\n"
362 " KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE is not enabled)\n"
363 " and writes will be tracked as soon as dirty logging is\n"
364 " enabled on the memslot (i.e. KVM_DIRTY_LOG_INITIALLY_SET\n"
365 " is not enabled).\n");
366 printf(" -p: specify guest physical test memory offset\n"
367 " Warning: a low offset can conflict with the loaded test code.\n");
368 guest_modes_help();
369 printf(" -n: Run the vCPUs in nested mode (L2)\n");
370 printf(" -e: Run vCPUs while dirty logging is being disabled. This\n"
371 " can significantly increase runtime, especially if there\n"
372 " isn't a dedicated pCPU for the main thread.\n");
373 printf(" -b: specify the size of the memory region which should be\n"
374 " dirtied by each vCPU. e.g. 10M or 3G.\n"
375 " (default: 1G)\n");
376 printf(" -f: specify the fraction of pages which should be written to\n"
377 " as opposed to simply read, in the form\n"
378 " 1/<fraction of pages to write>.\n"
379 " (default: 1 i.e. all pages are written to.)\n");
380 printf(" -v: specify the number of vCPUs to run.\n");
381 printf(" -o: Overlap guest memory accesses instead of partitioning\n"
382 " them into a separate region of memory for each vCPU.\n");
383 backing_src_help("-s");
384 printf(" -x: Split the memory region into this number of memslots.\n"
385 " (default: 1)\n");
386 puts("");
387 exit(0);
388 }
389
main(int argc,char * argv[])390 int main(int argc, char *argv[])
391 {
392 int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
393 struct test_params p = {
394 .iterations = TEST_HOST_LOOP_N,
395 .wr_fract = 1,
396 .partition_vcpu_memory_access = true,
397 .backing_src = DEFAULT_VM_MEM_SRC,
398 .slots = 1,
399 };
400 int opt;
401
402 dirty_log_manual_caps =
403 kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
404 dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
405 KVM_DIRTY_LOG_INITIALLY_SET);
406
407 guest_modes_append_default();
408
409 while ((opt = getopt(argc, argv, "eghi:p:m:nb:f:v:os:x:")) != -1) {
410 switch (opt) {
411 case 'e':
412 /* 'e' is for evil. */
413 run_vcpus_while_disabling_dirty_logging = true;
414 case 'g':
415 dirty_log_manual_caps = 0;
416 break;
417 case 'i':
418 p.iterations = atoi(optarg);
419 break;
420 case 'p':
421 p.phys_offset = strtoull(optarg, NULL, 0);
422 break;
423 case 'm':
424 guest_modes_cmdline(optarg);
425 break;
426 case 'n':
427 perf_test_args.nested = true;
428 break;
429 case 'b':
430 guest_percpu_mem_size = parse_size(optarg);
431 break;
432 case 'f':
433 p.wr_fract = atoi(optarg);
434 TEST_ASSERT(p.wr_fract >= 1,
435 "Write fraction cannot be less than one");
436 break;
437 case 'v':
438 nr_vcpus = atoi(optarg);
439 TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
440 "Invalid number of vcpus, must be between 1 and %d", max_vcpus);
441 break;
442 case 'o':
443 p.partition_vcpu_memory_access = false;
444 break;
445 case 's':
446 p.backing_src = parse_backing_src_type(optarg);
447 break;
448 case 'x':
449 p.slots = atoi(optarg);
450 break;
451 case 'h':
452 default:
453 help(argv[0]);
454 break;
455 }
456 }
457
458 TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations");
459
460 pr_info("Test iterations: %"PRIu64"\n", p.iterations);
461
462 for_each_guest_mode(run_test, &p);
463
464 return 0;
465 }
466