1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KVM dirty page logging performance test
4 *
5 * Based on dirty_log_test.c
6 *
7 * Copyright (C) 2018, Red Hat, Inc.
8 * Copyright (C) 2020, Google, Inc.
9 */
10
11#include <stdio.h>
12#include <stdlib.h>
13#include <time.h>
14#include <pthread.h>
15#include <linux/bitmap.h>
16
17#include "kvm_util.h"
18#include "test_util.h"
19#include "memstress.h"
20#include "guest_modes.h"
21
22#ifdef __aarch64__
23#include "aarch64/vgic.h"
24
25#define GICD_BASE_GPA 0x8000000ULL
26#define GICR_BASE_GPA 0x80A0000ULL
27
28static int gic_fd;
29
30static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
31{
32 /*
33 * The test can still run even if hardware does not support GICv3, as it
34 * is only an optimization to reduce guest exits.
35 */
36 gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
37}
38
39static void arch_cleanup_vm(struct kvm_vm *vm)
40{
41 if (gic_fd > 0)
42 close(gic_fd);
43}
44
45#else /* __aarch64__ */
46
47static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
48{
49}
50
51static void arch_cleanup_vm(struct kvm_vm *vm)
52{
53}
54
55#endif
56
57/* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
58#define TEST_HOST_LOOP_N 2UL
59
60static int nr_vcpus = 1;
61static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
62static bool run_vcpus_while_disabling_dirty_logging;
63
64/* Host variables */
65static u64 dirty_log_manual_caps;
66static bool host_quit;
67static int iteration;
68static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
69
70static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
71{
72 struct kvm_vcpu *vcpu = vcpu_args->vcpu;
73 int vcpu_idx = vcpu_args->vcpu_idx;
74 uint64_t pages_count = 0;
75 struct kvm_run *run;
76 struct timespec start;
77 struct timespec ts_diff;
78 struct timespec total = (struct timespec){0};
79 struct timespec avg;
80 int ret;
81
82 run = vcpu->run;
83
84 while (!READ_ONCE(host_quit)) {
85 int current_iteration = READ_ONCE(iteration);
86
87 clock_gettime(CLOCK_MONOTONIC, &start);
88 ret = _vcpu_run(vcpu);
89 ts_diff = timespec_elapsed(start);
90
91 TEST_ASSERT(ret == 0, "vcpu_run failed: %d", ret);
92 TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
93 "Invalid guest sync status: exit_reason=%s",
94 exit_reason_str(run->exit_reason));
95
96 pr_debug("Got sync event from vCPU %d\n", vcpu_idx);
97 vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
98 pr_debug("vCPU %d updated last completed iteration to %d\n",
99 vcpu_idx, vcpu_last_completed_iteration[vcpu_idx]);
100
101 if (current_iteration) {
102 pages_count += vcpu_args->pages;
103 total = timespec_add(total, ts_diff);
104 pr_debug("vCPU %d iteration %d dirty memory time: %ld.%.9lds\n",
105 vcpu_idx, current_iteration, ts_diff.tv_sec,
106 ts_diff.tv_nsec);
107 } else {
108 pr_debug("vCPU %d iteration %d populate memory time: %ld.%.9lds\n",
109 vcpu_idx, current_iteration, ts_diff.tv_sec,
110 ts_diff.tv_nsec);
111 }
112
113 /*
114 * Keep running the guest while dirty logging is being disabled
115 * (iteration is negative) so that vCPUs are accessing memory
116 * for the entire duration of zapping collapsible SPTEs.
117 */
118 while (current_iteration == READ_ONCE(iteration) &&
119 READ_ONCE(iteration) >= 0 && !READ_ONCE(host_quit)) {}
120 }
121
122 avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_idx]);
123 pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
124 vcpu_idx, pages_count, vcpu_last_completed_iteration[vcpu_idx],
125 total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec);
126}
127
128struct test_params {
129 unsigned long iterations;
130 uint64_t phys_offset;
131 bool partition_vcpu_memory_access;
132 enum vm_mem_backing_src_type backing_src;
133 int slots;
134 uint32_t write_percent;
135 uint32_t random_seed;
136 bool random_access;
137};
138
139static void run_test(enum vm_guest_mode mode, void *arg)
140{
141 struct test_params *p = arg;
142 struct kvm_vm *vm;
143 unsigned long **bitmaps;
144 uint64_t guest_num_pages;
145 uint64_t host_num_pages;
146 uint64_t pages_per_slot;
147 struct timespec start;
148 struct timespec ts_diff;
149 struct timespec get_dirty_log_total = (struct timespec){0};
150 struct timespec vcpu_dirty_total = (struct timespec){0};
151 struct timespec avg;
152 struct timespec clear_dirty_log_total = (struct timespec){0};
153 int i;
154
155 vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
156 p->slots, p->backing_src,
157 p->partition_vcpu_memory_access);
158
159 pr_info("Random seed: %u\n", p->random_seed);
160 memstress_set_random_seed(vm, p->random_seed);
161 memstress_set_write_percent(vm, p->write_percent);
162
163 guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift;
164 guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
165 host_num_pages = vm_num_host_pages(mode, guest_num_pages);
166 pages_per_slot = host_num_pages / p->slots;
167
168 bitmaps = memstress_alloc_bitmaps(p->slots, pages_per_slot);
169
170 if (dirty_log_manual_caps)
171 vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2,
172 dirty_log_manual_caps);
173
174 arch_setup_vm(vm, nr_vcpus);
175
176 /* Start the iterations */
177 iteration = 0;
178 host_quit = false;
179
180 clock_gettime(CLOCK_MONOTONIC, &start);
181 for (i = 0; i < nr_vcpus; i++)
182 vcpu_last_completed_iteration[i] = -1;
183
184 /*
185 * Use 100% writes during the population phase to ensure all
186 * memory is actually populated and not just mapped to the zero
187 * page. The prevents expensive copy-on-write faults from
188 * occurring during the dirty memory iterations below, which
189 * would pollute the performance results.
190 */
191 memstress_set_write_percent(vm, 100);
192 memstress_set_random_access(vm, false);
193 memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);
194
195 /* Allow the vCPUs to populate memory */
196 pr_debug("Starting iteration %d - Populating\n", iteration);
197 for (i = 0; i < nr_vcpus; i++) {
198 while (READ_ONCE(vcpu_last_completed_iteration[i]) !=
199 iteration)
200 ;
201 }
202
203 ts_diff = timespec_elapsed(start);
204 pr_info("Populate memory time: %ld.%.9lds\n",
205 ts_diff.tv_sec, ts_diff.tv_nsec);
206
207 /* Enable dirty logging */
208 clock_gettime(CLOCK_MONOTONIC, &start);
209 memstress_enable_dirty_logging(vm, p->slots);
210 ts_diff = timespec_elapsed(start);
211 pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
212 ts_diff.tv_sec, ts_diff.tv_nsec);
213
214 memstress_set_write_percent(vm, p->write_percent);
215 memstress_set_random_access(vm, p->random_access);
216
217 while (iteration < p->iterations) {
218 /*
219 * Incrementing the iteration number will start the vCPUs
220 * dirtying memory again.
221 */
222 clock_gettime(CLOCK_MONOTONIC, &start);
223 iteration++;
224
225 pr_debug("Starting iteration %d\n", iteration);
226 for (i = 0; i < nr_vcpus; i++) {
227 while (READ_ONCE(vcpu_last_completed_iteration[i])
228 != iteration)
229 ;
230 }
231
232 ts_diff = timespec_elapsed(start);
233 vcpu_dirty_total = timespec_add(vcpu_dirty_total, ts_diff);
234 pr_info("Iteration %d dirty memory time: %ld.%.9lds\n",
235 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
236
237 clock_gettime(CLOCK_MONOTONIC, &start);
238 memstress_get_dirty_log(vm, bitmaps, p->slots);
239 ts_diff = timespec_elapsed(start);
240 get_dirty_log_total = timespec_add(get_dirty_log_total,
241 ts_diff);
242 pr_info("Iteration %d get dirty log time: %ld.%.9lds\n",
243 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
244
245 if (dirty_log_manual_caps) {
246 clock_gettime(CLOCK_MONOTONIC, &start);
247 memstress_clear_dirty_log(vm, bitmaps, p->slots,
248 pages_per_slot);
249 ts_diff = timespec_elapsed(start);
250 clear_dirty_log_total = timespec_add(clear_dirty_log_total,
251 ts_diff);
252 pr_info("Iteration %d clear dirty log time: %ld.%.9lds\n",
253 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
254 }
255 }
256
257 /*
258 * Run vCPUs while dirty logging is being disabled to stress disabling
259 * in terms of both performance and correctness. Opt-in via command
260 * line as this significantly increases time to disable dirty logging.
261 */
262 if (run_vcpus_while_disabling_dirty_logging)
263 WRITE_ONCE(iteration, -1);
264
265 /* Disable dirty logging */
266 clock_gettime(CLOCK_MONOTONIC, &start);
267 memstress_disable_dirty_logging(vm, p->slots);
268 ts_diff = timespec_elapsed(start);
269 pr_info("Disabling dirty logging time: %ld.%.9lds\n",
270 ts_diff.tv_sec, ts_diff.tv_nsec);
271
272 /*
273 * Tell the vCPU threads to quit. No need to manually check that vCPUs
274 * have stopped running after disabling dirty logging, the join will
275 * wait for them to exit.
276 */
277 host_quit = true;
278 memstress_join_vcpu_threads(nr_vcpus);
279
280 avg = timespec_div(get_dirty_log_total, p->iterations);
281 pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
282 p->iterations, get_dirty_log_total.tv_sec,
283 get_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
284
285 if (dirty_log_manual_caps) {
286 avg = timespec_div(clear_dirty_log_total, p->iterations);
287 pr_info("Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
288 p->iterations, clear_dirty_log_total.tv_sec,
289 clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
290 }
291
292 memstress_free_bitmaps(bitmaps, p->slots);
293 arch_cleanup_vm(vm);
294 memstress_destroy_vm(vm);
295}
296
297static void help(char *name)
298{
299 puts("");
300 printf("usage: %s [-h] [-a] [-i iterations] [-p offset] [-g] "
301 "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-r random seed ] [-s mem type]"
302 "[-x memslots] [-w percentage] [-c physical cpus to run test on]\n", name);
303 puts("");
304 printf(" -a: access memory randomly rather than in order.\n");
305 printf(" -i: specify iteration counts (default: %"PRIu64")\n",
306 TEST_HOST_LOOP_N);
307 printf(" -g: Do not enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2. This\n"
308 " makes KVM_GET_DIRTY_LOG clear the dirty log (i.e.\n"
309 " KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE is not enabled)\n"
310 " and writes will be tracked as soon as dirty logging is\n"
311 " enabled on the memslot (i.e. KVM_DIRTY_LOG_INITIALLY_SET\n"
312 " is not enabled).\n");
313 printf(" -p: specify guest physical test memory offset\n"
314 " Warning: a low offset can conflict with the loaded test code.\n");
315 guest_modes_help();
316 printf(" -n: Run the vCPUs in nested mode (L2)\n");
317 printf(" -e: Run vCPUs while dirty logging is being disabled. This\n"
318 " can significantly increase runtime, especially if there\n"
319 " isn't a dedicated pCPU for the main thread.\n");
320 printf(" -b: specify the size of the memory region which should be\n"
321 " dirtied by each vCPU. e.g. 10M or 3G.\n"
322 " (default: 1G)\n");
323 printf(" -v: specify the number of vCPUs to run.\n");
324 printf(" -o: Overlap guest memory accesses instead of partitioning\n"
325 " them into a separate region of memory for each vCPU.\n");
326 printf(" -r: specify the starting random seed.\n");
327 backing_src_help("-s");
328 printf(" -x: Split the memory region into this number of memslots.\n"
329 " (default: 1)\n");
330 printf(" -w: specify the percentage of pages which should be written to\n"
331 " as an integer from 0-100 inclusive. This is probabilistic,\n"
332 " so -w X means each page has an X%% chance of writing\n"
333 " and a (100-X)%% chance of reading.\n"
334 " (default: 100 i.e. all pages are written to.)\n");
335 kvm_print_vcpu_pinning_help();
336 puts("");
337 exit(0);
338}
339
340int main(int argc, char *argv[])
341{
342 int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
343 const char *pcpu_list = NULL;
344 struct test_params p = {
345 .iterations = TEST_HOST_LOOP_N,
346 .partition_vcpu_memory_access = true,
347 .backing_src = DEFAULT_VM_MEM_SRC,
348 .slots = 1,
349 .random_seed = 1,
350 .write_percent = 100,
351 };
352 int opt;
353
354 dirty_log_manual_caps =
355 kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
356 dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
357 KVM_DIRTY_LOG_INITIALLY_SET);
358
359 guest_modes_append_default();
360
361 while ((opt = getopt(argc, argv, "ab:c:eghi:m:nop:r:s:v:x:w:")) != -1) {
362 switch (opt) {
363 case 'a':
364 p.random_access = true;
365 break;
366 case 'b':
367 guest_percpu_mem_size = parse_size(optarg);
368 break;
369 case 'c':
370 pcpu_list = optarg;
371 break;
372 case 'e':
373 /* 'e' is for evil. */
374 run_vcpus_while_disabling_dirty_logging = true;
375 break;
376 case 'g':
377 dirty_log_manual_caps = 0;
378 break;
379 case 'h':
380 help(name: argv[0]);
381 break;
382 case 'i':
383 p.iterations = atoi_positive("Number of iterations", optarg);
384 break;
385 case 'm':
386 guest_modes_cmdline(optarg);
387 break;
388 case 'n':
389 memstress_args.nested = true;
390 break;
391 case 'o':
392 p.partition_vcpu_memory_access = false;
393 break;
394 case 'p':
395 p.phys_offset = strtoull(optarg, NULL, 0);
396 break;
397 case 'r':
398 p.random_seed = atoi_positive("Random seed", optarg);
399 break;
400 case 's':
401 p.backing_src = parse_backing_src_type(optarg);
402 break;
403 case 'v':
404 nr_vcpus = atoi_positive("Number of vCPUs", optarg);
405 TEST_ASSERT(nr_vcpus <= max_vcpus,
406 "Invalid number of vcpus, must be between 1 and %d", max_vcpus);
407 break;
408 case 'w':
409 p.write_percent = atoi_non_negative("Write percentage", optarg);
410 TEST_ASSERT(p.write_percent <= 100,
411 "Write percentage must be between 0 and 100");
412 break;
413 case 'x':
414 p.slots = atoi_positive("Number of slots", optarg);
415 break;
416 default:
417 help(name: argv[0]);
418 break;
419 }
420 }
421
422 if (pcpu_list) {
423 kvm_parse_vcpu_pinning(pcpu_list, memstress_args.vcpu_to_pcpu,
424 nr_vcpus);
425 memstress_args.pin_vcpus = true;
426 }
427
428 TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations");
429
430 pr_info("Test iterations: %"PRIu64"\n", p.iterations);
431
432 for_each_guest_mode(run_test, &p);
433
434 return 0;
435}
436

source code of linux/tools/testing/selftests/kvm/dirty_log_perf_test.c