1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #define _GNU_SOURCE /* for program_invocation_short_name */ |
3 | #include <fcntl.h> |
4 | #include <pthread.h> |
5 | #include <sched.h> |
6 | #include <semaphore.h> |
7 | #include <signal.h> |
8 | #include <stdio.h> |
9 | #include <stdlib.h> |
10 | #include <string.h> |
11 | #include <sys/ioctl.h> |
12 | #include <sys/mman.h> |
13 | |
14 | #include <linux/compiler.h> |
15 | |
16 | #include <test_util.h> |
17 | #include <kvm_util.h> |
18 | #include <processor.h> |
19 | |
20 | /* |
21 | * s390x needs at least 1MB alignment, and the x86_64 MOVE/DELETE tests need a |
22 | * 2MB sized and aligned region so that the initial region corresponds to |
23 | * exactly one large page. |
24 | */ |
25 | #define MEM_REGION_SIZE 0x200000 |
26 | |
27 | #ifdef __x86_64__ |
28 | /* |
29 | * Somewhat arbitrary location and slot, intended to not overlap anything. |
30 | */ |
31 | #define MEM_REGION_GPA 0xc0000000 |
32 | #define MEM_REGION_SLOT 10 |
33 | |
34 | static const uint64_t MMIO_VAL = 0xbeefull; |
35 | |
36 | extern const uint64_t final_rip_start; |
37 | extern const uint64_t final_rip_end; |
38 | |
39 | static sem_t vcpu_ready; |
40 | |
41 | static inline uint64_t guest_spin_on_val(uint64_t spin_val) |
42 | { |
43 | uint64_t val; |
44 | |
45 | do { |
46 | val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA)); |
47 | } while (val == spin_val); |
48 | |
49 | GUEST_SYNC(0); |
50 | return val; |
51 | } |
52 | |
53 | static void *vcpu_worker(void *data) |
54 | { |
55 | struct kvm_vcpu *vcpu = data; |
56 | struct kvm_run *run = vcpu->run; |
57 | struct ucall uc; |
58 | uint64_t cmd; |
59 | |
60 | /* |
61 | * Loop until the guest is done. Re-enter the guest on all MMIO exits, |
62 | * which will occur if the guest attempts to access a memslot after it |
63 | * has been deleted or while it is being moved . |
64 | */ |
65 | while (1) { |
66 | vcpu_run(vcpu); |
67 | |
68 | if (run->exit_reason == KVM_EXIT_IO) { |
69 | cmd = get_ucall(vcpu, &uc); |
70 | if (cmd != UCALL_SYNC) |
71 | break; |
72 | |
73 | sem_post(&vcpu_ready); |
74 | continue; |
75 | } |
76 | |
77 | if (run->exit_reason != KVM_EXIT_MMIO) |
78 | break; |
79 | |
80 | TEST_ASSERT(!run->mmio.is_write, "Unexpected exit mmio write" ); |
81 | TEST_ASSERT(run->mmio.len == 8, |
82 | "Unexpected exit mmio size = %u" , run->mmio.len); |
83 | |
84 | TEST_ASSERT(run->mmio.phys_addr == MEM_REGION_GPA, |
85 | "Unexpected exit mmio address = 0x%llx" , |
86 | run->mmio.phys_addr); |
87 | memcpy(run->mmio.data, &MMIO_VAL, 8); |
88 | } |
89 | |
90 | if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT) |
91 | REPORT_GUEST_ASSERT(uc); |
92 | |
93 | return NULL; |
94 | } |
95 | |
96 | static void wait_for_vcpu(void) |
97 | { |
98 | struct timespec ts; |
99 | |
100 | TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts), |
101 | "clock_gettime() failed: %d" , errno); |
102 | |
103 | ts.tv_sec += 2; |
104 | TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts), |
105 | "sem_timedwait() failed: %d" , errno); |
106 | |
107 | /* Wait for the vCPU thread to reenter the guest. */ |
108 | usleep(100000); |
109 | } |
110 | |
111 | static struct kvm_vm *spawn_vm(struct kvm_vcpu **vcpu, pthread_t *vcpu_thread, |
112 | void *guest_code) |
113 | { |
114 | struct kvm_vm *vm; |
115 | uint64_t *hva; |
116 | uint64_t gpa; |
117 | |
118 | vm = vm_create_with_one_vcpu(vcpu, guest_code); |
119 | |
120 | vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP, |
121 | MEM_REGION_GPA, MEM_REGION_SLOT, |
122 | MEM_REGION_SIZE / getpagesize(), 0); |
123 | |
124 | /* |
125 | * Allocate and map two pages so that the GPA accessed by guest_code() |
126 | * stays valid across the memslot move. |
127 | */ |
128 | gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT); |
129 | TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n" ); |
130 | |
131 | virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2); |
132 | |
133 | /* Ditto for the host mapping so that both pages can be zeroed. */ |
134 | hva = addr_gpa2hva(vm, MEM_REGION_GPA); |
135 | memset(hva, 0, 2 * 4096); |
136 | |
137 | pthread_create(vcpu_thread, NULL, vcpu_worker, *vcpu); |
138 | |
139 | /* Ensure the guest thread is spun up. */ |
140 | wait_for_vcpu(); |
141 | |
142 | return vm; |
143 | } |
144 | |
145 | |
146 | static void guest_code_move_memory_region(void) |
147 | { |
148 | uint64_t val; |
149 | |
150 | GUEST_SYNC(0); |
151 | |
152 | /* |
153 | * Spin until the memory region starts getting moved to a |
154 | * misaligned address. |
155 | * Every region move may or may not trigger MMIO, as the |
156 | * window where the memslot is invalid is usually quite small. |
157 | */ |
158 | val = guest_spin_on_val(spin_val: 0); |
159 | __GUEST_ASSERT(val == 1 || val == MMIO_VAL, |
160 | "Expected '1' or MMIO ('%lx'), got '%lx'" , MMIO_VAL, val); |
161 | |
162 | /* Spin until the misaligning memory region move completes. */ |
163 | val = guest_spin_on_val(spin_val: MMIO_VAL); |
164 | __GUEST_ASSERT(val == 1 || val == 0, |
165 | "Expected '0' or '1' (no MMIO), got '%lx'" , val); |
166 | |
167 | /* Spin until the memory region starts to get re-aligned. */ |
168 | val = guest_spin_on_val(spin_val: 0); |
169 | __GUEST_ASSERT(val == 1 || val == MMIO_VAL, |
170 | "Expected '1' or MMIO ('%lx'), got '%lx'" , MMIO_VAL, val); |
171 | |
172 | /* Spin until the re-aligning memory region move completes. */ |
173 | val = guest_spin_on_val(spin_val: MMIO_VAL); |
174 | GUEST_ASSERT_EQ(val, 1); |
175 | |
176 | GUEST_DONE(); |
177 | } |
178 | |
179 | static void test_move_memory_region(void) |
180 | { |
181 | pthread_t vcpu_thread; |
182 | struct kvm_vcpu *vcpu; |
183 | struct kvm_vm *vm; |
184 | uint64_t *hva; |
185 | |
186 | vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_move_memory_region); |
187 | |
188 | hva = addr_gpa2hva(vm, MEM_REGION_GPA); |
189 | |
190 | /* |
191 | * Shift the region's base GPA. The guest should not see "2" as the |
192 | * hva->gpa translation is misaligned, i.e. the guest is accessing a |
193 | * different host pfn. |
194 | */ |
195 | vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA - 4096); |
196 | WRITE_ONCE(*hva, 2); |
197 | |
198 | /* |
199 | * The guest _might_ see an invalid memslot and trigger MMIO, but it's |
200 | * a tiny window. Spin and defer the sync until the memslot is |
201 | * restored and guest behavior is once again deterministic. |
202 | */ |
203 | usleep(100000); |
204 | |
205 | /* |
206 | * Note, value in memory needs to be changed *before* restoring the |
207 | * memslot, else the guest could race the update and see "2". |
208 | */ |
209 | WRITE_ONCE(*hva, 1); |
210 | |
211 | /* Restore the original base, the guest should see "1". */ |
212 | vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA); |
213 | wait_for_vcpu(); |
214 | /* Defered sync from when the memslot was misaligned (above). */ |
215 | wait_for_vcpu(); |
216 | |
217 | pthread_join(vcpu_thread, NULL); |
218 | |
219 | kvm_vm_free(vm); |
220 | } |
221 | |
222 | static void guest_code_delete_memory_region(void) |
223 | { |
224 | uint64_t val; |
225 | |
226 | GUEST_SYNC(0); |
227 | |
228 | /* Spin until the memory region is deleted. */ |
229 | val = guest_spin_on_val(spin_val: 0); |
230 | GUEST_ASSERT_EQ(val, MMIO_VAL); |
231 | |
232 | /* Spin until the memory region is recreated. */ |
233 | val = guest_spin_on_val(spin_val: MMIO_VAL); |
234 | GUEST_ASSERT_EQ(val, 0); |
235 | |
236 | /* Spin until the memory region is deleted. */ |
237 | val = guest_spin_on_val(spin_val: 0); |
238 | GUEST_ASSERT_EQ(val, MMIO_VAL); |
239 | |
240 | asm("1:\n\t" |
241 | ".pushsection .rodata\n\t" |
242 | ".global final_rip_start\n\t" |
243 | "final_rip_start: .quad 1b\n\t" |
244 | ".popsection" ); |
245 | |
246 | /* Spin indefinitely (until the code memslot is deleted). */ |
247 | guest_spin_on_val(spin_val: MMIO_VAL); |
248 | |
249 | asm("1:\n\t" |
250 | ".pushsection .rodata\n\t" |
251 | ".global final_rip_end\n\t" |
252 | "final_rip_end: .quad 1b\n\t" |
253 | ".popsection" ); |
254 | |
255 | GUEST_ASSERT(0); |
256 | } |
257 | |
258 | static void test_delete_memory_region(void) |
259 | { |
260 | pthread_t vcpu_thread; |
261 | struct kvm_vcpu *vcpu; |
262 | struct kvm_regs regs; |
263 | struct kvm_run *run; |
264 | struct kvm_vm *vm; |
265 | |
266 | vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_delete_memory_region); |
267 | |
268 | /* Delete the memory region, the guest should not die. */ |
269 | vm_mem_region_delete(vm, MEM_REGION_SLOT); |
270 | wait_for_vcpu(); |
271 | |
272 | /* Recreate the memory region. The guest should see "0". */ |
273 | vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP, |
274 | MEM_REGION_GPA, MEM_REGION_SLOT, |
275 | MEM_REGION_SIZE / getpagesize(), 0); |
276 | wait_for_vcpu(); |
277 | |
278 | /* Delete the region again so that there's only one memslot left. */ |
279 | vm_mem_region_delete(vm, MEM_REGION_SLOT); |
280 | wait_for_vcpu(); |
281 | |
282 | /* |
283 | * Delete the primary memslot. This should cause an emulation error or |
284 | * shutdown due to the page tables getting nuked. |
285 | */ |
286 | vm_mem_region_delete(vm, 0); |
287 | |
288 | pthread_join(vcpu_thread, NULL); |
289 | |
290 | run = vcpu->run; |
291 | |
292 | TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN || |
293 | run->exit_reason == KVM_EXIT_INTERNAL_ERROR, |
294 | "Unexpected exit reason = %d" , run->exit_reason); |
295 | |
296 | vcpu_regs_get(vcpu, ®s); |
297 | |
298 | /* |
299 | * On AMD, after KVM_EXIT_SHUTDOWN the VMCB has been reinitialized already, |
300 | * so the instruction pointer would point to the reset vector. |
301 | */ |
302 | if (run->exit_reason == KVM_EXIT_INTERNAL_ERROR) |
303 | TEST_ASSERT(regs.rip >= final_rip_start && |
304 | regs.rip < final_rip_end, |
305 | "Bad rip, expected 0x%lx - 0x%lx, got 0x%llx" , |
306 | final_rip_start, final_rip_end, regs.rip); |
307 | |
308 | kvm_vm_free(vm); |
309 | } |
310 | |
311 | static void test_zero_memory_regions(void) |
312 | { |
313 | struct kvm_vcpu *vcpu; |
314 | struct kvm_vm *vm; |
315 | |
316 | pr_info("Testing KVM_RUN with zero added memory regions\n" ); |
317 | |
318 | vm = vm_create_barebones(); |
319 | vcpu = __vm_vcpu_add(vm, 0); |
320 | |
321 | vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul); |
322 | vcpu_run(vcpu); |
323 | TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR); |
324 | |
325 | kvm_vm_free(vm); |
326 | } |
327 | #endif /* __x86_64__ */ |
328 | |
329 | static void test_invalid_memory_region_flags(void) |
330 | { |
331 | uint32_t supported_flags = KVM_MEM_LOG_DIRTY_PAGES; |
332 | const uint32_t v2_only_flags = KVM_MEM_GUEST_MEMFD; |
333 | struct kvm_vm *vm; |
334 | int r, i; |
335 | |
336 | #if defined __aarch64__ || defined __riscv || defined __x86_64__ |
337 | supported_flags |= KVM_MEM_READONLY; |
338 | #endif |
339 | |
340 | #ifdef __x86_64__ |
341 | if (kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM)) |
342 | vm = vm_create_barebones_protected_vm(); |
343 | else |
344 | #endif |
345 | vm = vm_create_barebones(); |
346 | |
347 | if (kvm_check_cap(KVM_CAP_MEMORY_ATTRIBUTES) & KVM_MEMORY_ATTRIBUTE_PRIVATE) |
348 | supported_flags |= KVM_MEM_GUEST_MEMFD; |
349 | |
350 | for (i = 0; i < 32; i++) { |
351 | if ((supported_flags & BIT(i)) && !(v2_only_flags & BIT(i))) |
352 | continue; |
353 | |
354 | r = __vm_set_user_memory_region(vm, 0, BIT(i), |
355 | 0, MEM_REGION_SIZE, NULL); |
356 | |
357 | TEST_ASSERT(r && errno == EINVAL, |
358 | "KVM_SET_USER_MEMORY_REGION should have failed on v2 only flag 0x%lx" , BIT(i)); |
359 | |
360 | if (supported_flags & BIT(i)) |
361 | continue; |
362 | |
363 | r = __vm_set_user_memory_region2(vm, 0, BIT(i), |
364 | 0, MEM_REGION_SIZE, NULL, 0, 0); |
365 | TEST_ASSERT(r && errno == EINVAL, |
366 | "KVM_SET_USER_MEMORY_REGION2 should have failed on unsupported flag 0x%lx" , BIT(i)); |
367 | } |
368 | |
369 | if (supported_flags & KVM_MEM_GUEST_MEMFD) { |
370 | int guest_memfd = vm_create_guest_memfd(vm, MEM_REGION_SIZE, 0); |
371 | |
372 | r = __vm_set_user_memory_region2(vm, 0, |
373 | KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_GUEST_MEMFD, |
374 | 0, MEM_REGION_SIZE, NULL, guest_memfd, 0); |
375 | TEST_ASSERT(r && errno == EINVAL, |
376 | "KVM_SET_USER_MEMORY_REGION2 should have failed, dirty logging private memory is unsupported" ); |
377 | |
378 | r = __vm_set_user_memory_region2(vm, 0, |
379 | KVM_MEM_READONLY | KVM_MEM_GUEST_MEMFD, |
380 | 0, MEM_REGION_SIZE, NULL, guest_memfd, 0); |
381 | TEST_ASSERT(r && errno == EINVAL, |
382 | "KVM_SET_USER_MEMORY_REGION2 should have failed, read-only GUEST_MEMFD memslots are unsupported" ); |
383 | |
384 | close(guest_memfd); |
385 | } |
386 | } |
387 | |
388 | /* |
389 | * Test it can be added memory slots up to KVM_CAP_NR_MEMSLOTS, then any |
390 | * tentative to add further slots should fail. |
391 | */ |
392 | static void test_add_max_memory_regions(void) |
393 | { |
394 | int ret; |
395 | struct kvm_vm *vm; |
396 | uint32_t max_mem_slots; |
397 | uint32_t slot; |
398 | void *mem, *mem_aligned, *; |
399 | size_t alignment; |
400 | |
401 | #ifdef __s390x__ |
402 | /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */ |
403 | alignment = 0x100000; |
404 | #else |
405 | alignment = 1; |
406 | #endif |
407 | |
408 | max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS); |
409 | TEST_ASSERT(max_mem_slots > 0, |
410 | "KVM_CAP_NR_MEMSLOTS should be greater than 0" ); |
411 | pr_info("Allowed number of memory slots: %i\n" , max_mem_slots); |
412 | |
413 | vm = vm_create_barebones(); |
414 | |
415 | /* Check it can be added memory slots up to the maximum allowed */ |
416 | pr_info("Adding slots 0..%i, each memory region with %dK size\n" , |
417 | (max_mem_slots - 1), MEM_REGION_SIZE >> 10); |
418 | |
419 | mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment, |
420 | PROT_READ | PROT_WRITE, |
421 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0); |
422 | TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host" ); |
423 | mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1)); |
424 | |
425 | for (slot = 0; slot < max_mem_slots; slot++) |
426 | vm_set_user_memory_region(vm, slot, 0, |
427 | ((uint64_t)slot * MEM_REGION_SIZE), |
428 | MEM_REGION_SIZE, |
429 | mem_aligned + (uint64_t)slot * MEM_REGION_SIZE); |
430 | |
431 | /* Check it cannot be added memory slots beyond the limit */ |
432 | mem_extra = mmap(NULL, MEM_REGION_SIZE, PROT_READ | PROT_WRITE, |
433 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
434 | TEST_ASSERT(mem_extra != MAP_FAILED, "Failed to mmap() host" ); |
435 | |
436 | ret = __vm_set_user_memory_region(vm, max_mem_slots, 0, |
437 | (uint64_t)max_mem_slots * MEM_REGION_SIZE, |
438 | MEM_REGION_SIZE, mem_extra); |
439 | TEST_ASSERT(ret == -1 && errno == EINVAL, |
440 | "Adding one more memory slot should fail with EINVAL" ); |
441 | |
442 | munmap(mem, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment); |
443 | munmap(mem_extra, MEM_REGION_SIZE); |
444 | kvm_vm_free(vm); |
445 | } |
446 | |
447 | |
448 | #ifdef __x86_64__ |
449 | static void test_invalid_guest_memfd(struct kvm_vm *vm, int memfd, |
450 | size_t offset, const char *msg) |
451 | { |
452 | int r = __vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD, |
453 | MEM_REGION_GPA, MEM_REGION_SIZE, |
454 | 0, memfd, offset); |
455 | TEST_ASSERT(r == -1 && errno == EINVAL, "%s" , msg); |
456 | } |
457 | |
458 | static void test_add_private_memory_region(void) |
459 | { |
460 | struct kvm_vm *vm, *vm2; |
461 | int memfd, i; |
462 | |
463 | pr_info("Testing ADD of KVM_MEM_GUEST_MEMFD memory regions\n" ); |
464 | |
465 | vm = vm_create_barebones_protected_vm(); |
466 | |
467 | test_invalid_guest_memfd(vm, memfd: vm->kvm_fd, offset: 0, msg: "KVM fd should fail" ); |
468 | test_invalid_guest_memfd(vm, memfd: vm->fd, offset: 0, msg: "VM's fd should fail" ); |
469 | |
470 | memfd = kvm_memfd_alloc(MEM_REGION_SIZE, false); |
471 | test_invalid_guest_memfd(vm, memfd, offset: 0, msg: "Regular memfd() should fail" ); |
472 | close(memfd); |
473 | |
474 | vm2 = vm_create_barebones_protected_vm(); |
475 | memfd = vm_create_guest_memfd(vm2, MEM_REGION_SIZE, 0); |
476 | test_invalid_guest_memfd(vm, memfd, offset: 0, msg: "Other VM's guest_memfd() should fail" ); |
477 | |
478 | vm_set_user_memory_region2(vm2, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD, |
479 | MEM_REGION_GPA, MEM_REGION_SIZE, 0, memfd, 0); |
480 | close(memfd); |
481 | kvm_vm_free(vm2); |
482 | |
483 | memfd = vm_create_guest_memfd(vm, MEM_REGION_SIZE, 0); |
484 | for (i = 1; i < PAGE_SIZE; i++) |
485 | test_invalid_guest_memfd(vm, memfd, i, "Unaligned offset should fail" ); |
486 | |
487 | vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD, |
488 | MEM_REGION_GPA, MEM_REGION_SIZE, 0, memfd, 0); |
489 | close(memfd); |
490 | |
491 | kvm_vm_free(vm); |
492 | } |
493 | |
494 | static void test_add_overlapping_private_memory_regions(void) |
495 | { |
496 | struct kvm_vm *vm; |
497 | int memfd; |
498 | int r; |
499 | |
500 | pr_info("Testing ADD of overlapping KVM_MEM_GUEST_MEMFD memory regions\n" ); |
501 | |
502 | vm = vm_create_barebones_protected_vm(); |
503 | |
504 | memfd = vm_create_guest_memfd(vm, MEM_REGION_SIZE * 4, 0); |
505 | |
506 | vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD, |
507 | MEM_REGION_GPA, MEM_REGION_SIZE * 2, 0, memfd, 0); |
508 | |
509 | vm_set_user_memory_region2(vm, MEM_REGION_SLOT + 1, KVM_MEM_GUEST_MEMFD, |
510 | MEM_REGION_GPA * 2, MEM_REGION_SIZE * 2, |
511 | 0, memfd, MEM_REGION_SIZE * 2); |
512 | |
513 | /* |
514 | * Delete the first memslot, and then attempt to recreate it except |
515 | * with a "bad" offset that results in overlap in the guest_memfd(). |
516 | */ |
517 | vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD, |
518 | MEM_REGION_GPA, 0, NULL, -1, 0); |
519 | |
520 | /* Overlap the front half of the other slot. */ |
521 | r = __vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD, |
522 | MEM_REGION_GPA * 2 - MEM_REGION_SIZE, |
523 | MEM_REGION_SIZE * 2, |
524 | 0, memfd, 0); |
525 | TEST_ASSERT(r == -1 && errno == EEXIST, "%s" , |
526 | "Overlapping guest_memfd() bindings should fail with EEXIST" ); |
527 | |
528 | /* And now the back half of the other slot. */ |
529 | r = __vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD, |
530 | MEM_REGION_GPA * 2 + MEM_REGION_SIZE, |
531 | MEM_REGION_SIZE * 2, |
532 | 0, memfd, 0); |
533 | TEST_ASSERT(r == -1 && errno == EEXIST, "%s" , |
534 | "Overlapping guest_memfd() bindings should fail with EEXIST" ); |
535 | |
536 | close(memfd); |
537 | kvm_vm_free(vm); |
538 | } |
539 | #endif |
540 | |
541 | int main(int argc, char *argv[]) |
542 | { |
543 | #ifdef __x86_64__ |
544 | int i, loops; |
545 | |
546 | /* |
547 | * FIXME: the zero-memslot test fails on aarch64 and s390x because |
548 | * KVM_RUN fails with ENOEXEC or EFAULT. |
549 | */ |
550 | test_zero_memory_regions(); |
551 | #endif |
552 | |
553 | test_invalid_memory_region_flags(); |
554 | |
555 | test_add_max_memory_regions(); |
556 | |
557 | #ifdef __x86_64__ |
558 | if (kvm_has_cap(KVM_CAP_GUEST_MEMFD) && |
559 | (kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM))) { |
560 | test_add_private_memory_region(); |
561 | test_add_overlapping_private_memory_regions(); |
562 | } else { |
563 | pr_info("Skipping tests for KVM_MEM_GUEST_MEMFD memory regions\n" ); |
564 | } |
565 | |
566 | if (argc > 1) |
567 | loops = atoi_positive("Number of iterations" , argv[1]); |
568 | else |
569 | loops = 10; |
570 | |
571 | pr_info("Testing MOVE of in-use region, %d loops\n" , loops); |
572 | for (i = 0; i < loops; i++) |
573 | test_move_memory_region(); |
574 | |
575 | pr_info("Testing DELETE of in-use region, %d loops\n" , loops); |
576 | for (i = 0; i < loops; i++) |
577 | test_delete_memory_region(); |
578 | #endif |
579 | |
580 | return 0; |
581 | } |
582 | |