1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
3 *
4 * These tests are "kernel integrity" tests. They are looking for kernel
5 * WARN/OOPS/kasn/etc splats triggered by kernel sanitizers & debugging
6 * features. It does not attempt to verify that the system calls are doing what
7 * they are supposed to do.
8 *
9 * The basic philosophy is to run a sequence of calls that will succeed and then
10 * sweep every failure injection point on that call chain to look for
11 * interesting things in error handling.
12 *
13 * This test is best run with:
14 * echo 1 > /proc/sys/kernel/panic_on_warn
15 * If something is actually going wrong.
16 */
17#include <fcntl.h>
18#include <dirent.h>
19
20#define __EXPORTED_HEADERS__
21#include <linux/vfio.h>
22
23#include "iommufd_utils.h"
24
25static bool have_fault_injection;
26
27static int writeat(int dfd, const char *fn, const char *val)
28{
29 size_t val_len = strlen(val);
30 ssize_t res;
31 int fd;
32
33 fd = openat(dfd, fn, O_WRONLY);
34 if (fd == -1)
35 return -1;
36 res = write(fd, val, val_len);
37 assert(res == val_len);
38 close(fd);
39 return 0;
40}
41
42static __attribute__((constructor)) void setup_buffer(void)
43{
44 PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
45
46 BUFFER_SIZE = 2*1024*1024;
47
48 buffer = mmap(0, BUFFER_SIZE, PROT_READ | PROT_WRITE,
49 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
50}
51
52/*
53 * This sets up fail_injection in a way that is useful for this test.
54 * It does not attempt to restore things back to how they were.
55 */
56static __attribute__((constructor)) void setup_fault_injection(void)
57{
58 DIR *debugfs = opendir("/sys/kernel/debug/");
59 struct dirent *dent;
60
61 if (!debugfs)
62 return;
63
64 /* Allow any allocation call to be fault injected */
65 if (writeat(dfd: dirfd(debugfs), fn: "failslab/ignore-gfp-wait", val: "N"))
66 return;
67 writeat(dfd: dirfd(debugfs), fn: "fail_page_alloc/ignore-gfp-wait", val: "N");
68 writeat(dfd: dirfd(debugfs), fn: "fail_page_alloc/ignore-gfp-highmem", val: "N");
69
70 while ((dent = readdir(debugfs))) {
71 char fn[300];
72
73 if (strncmp(dent->d_name, "fail", 4) != 0)
74 continue;
75
76 /* We are looking for kernel splats, quiet down the log */
77 snprintf(buf: fn, size: sizeof(fn), fmt: "%s/verbose", dent->d_name);
78 writeat(dfd: dirfd(debugfs), fn, val: "0");
79 }
80 closedir(debugfs);
81 have_fault_injection = true;
82}
83
84struct fail_nth_state {
85 int proc_fd;
86 unsigned int iteration;
87};
88
89static void fail_nth_first(struct __test_metadata *_metadata,
90 struct fail_nth_state *nth_state)
91{
92 char buf[300];
93
94 snprintf(buf, size: sizeof(buf), fmt: "/proc/self/task/%u/fail-nth", getpid());
95 nth_state->proc_fd = open(buf, O_RDWR);
96 ASSERT_NE(-1, nth_state->proc_fd);
97}
98
99static bool fail_nth_next(struct __test_metadata *_metadata,
100 struct fail_nth_state *nth_state,
101 int test_result)
102{
103 static const char disable_nth[] = "0";
104 char buf[300];
105
106 /*
107 * This is just an arbitrary limit based on the current kernel
108 * situation. Changes in the kernel can dramatically change the number of
109 * required fault injection sites, so if this hits it doesn't
110 * necessarily mean a test failure, just that the limit has to be made
111 * bigger.
112 */
113 ASSERT_GT(400, nth_state->iteration);
114 if (nth_state->iteration != 0) {
115 ssize_t res;
116 ssize_t res2;
117
118 buf[0] = 0;
119 /*
120 * Annoyingly disabling the nth can also fail. This means
121 * the test passed without triggering failure
122 */
123 res = pread(nth_state->proc_fd, buf, sizeof(buf), 0);
124 if (res == -1 && errno == EFAULT) {
125 buf[0] = '1';
126 buf[1] = '\n';
127 res = 2;
128 }
129
130 res2 = pwrite(nth_state->proc_fd, disable_nth,
131 ARRAY_SIZE(disable_nth) - 1, 0);
132 if (res2 == -1 && errno == EFAULT) {
133 res2 = pwrite(nth_state->proc_fd, disable_nth,
134 ARRAY_SIZE(disable_nth) - 1, 0);
135 buf[0] = '1';
136 buf[1] = '\n';
137 }
138 ASSERT_EQ(ARRAY_SIZE(disable_nth) - 1, res2);
139
140 /* printf(" nth %u result=%d nth=%u\n", nth_state->iteration,
141 test_result, atoi(buf)); */
142 fflush(stdout);
143 ASSERT_LT(1, res);
144 if (res != 2 || buf[0] != '0' || buf[1] != '\n')
145 return false;
146 } else {
147 /* printf(" nth %u result=%d\n", nth_state->iteration,
148 test_result); */
149 }
150 nth_state->iteration++;
151 return true;
152}
153
154/*
155 * This is called during the test to start failure injection. It allows the test
156 * to do some setup that has already been swept and thus reduce the required
157 * iterations.
158 */
159void __fail_nth_enable(struct __test_metadata *_metadata,
160 struct fail_nth_state *nth_state)
161{
162 char buf[300];
163 size_t len;
164
165 if (!nth_state->iteration)
166 return;
167
168 len = snprintf(buf, size: sizeof(buf), fmt: "%u", nth_state->iteration);
169 ASSERT_EQ(len, pwrite(nth_state->proc_fd, buf, len, 0));
170}
171#define fail_nth_enable() __fail_nth_enable(_metadata, _nth_state)
172
173#define TEST_FAIL_NTH(fixture_name, name) \
174 static int test_nth_##name(struct __test_metadata *_metadata, \
175 FIXTURE_DATA(fixture_name) *self, \
176 const FIXTURE_VARIANT(fixture_name) \
177 *variant, \
178 struct fail_nth_state *_nth_state); \
179 TEST_F(fixture_name, name) \
180 { \
181 struct fail_nth_state nth_state = {}; \
182 int test_result = 0; \
183 \
184 if (!have_fault_injection) \
185 SKIP(return, \
186 "fault injection is not enabled in the kernel"); \
187 fail_nth_first(_metadata, &nth_state); \
188 ASSERT_EQ(0, test_nth_##name(_metadata, self, variant, \
189 &nth_state)); \
190 while (fail_nth_next(_metadata, &nth_state, test_result)) { \
191 fixture_name##_teardown(_metadata, self, variant); \
192 fixture_name##_setup(_metadata, self, variant); \
193 test_result = test_nth_##name(_metadata, self, \
194 variant, &nth_state); \
195 }; \
196 ASSERT_EQ(0, test_result); \
197 } \
198 static int test_nth_##name( \
199 struct __test_metadata __attribute__((unused)) *_metadata, \
200 FIXTURE_DATA(fixture_name) __attribute__((unused)) *self, \
201 const FIXTURE_VARIANT(fixture_name) __attribute__((unused)) \
202 *variant, \
203 struct fail_nth_state *_nth_state)
204
205FIXTURE(basic_fail_nth)
206{
207 int fd;
208 uint32_t access_id;
209};
210
211FIXTURE_SETUP(basic_fail_nth)
212{
213 self->fd = -1;
214 self->access_id = 0;
215}
216
217FIXTURE_TEARDOWN(basic_fail_nth)
218{
219 int rc;
220
221 if (self->access_id) {
222 /* The access FD holds the iommufd open until it closes */
223 rc = _test_cmd_destroy_access(access_id: self->access_id);
224 assert(rc == 0);
225 }
226 teardown_iommufd(fd: self->fd, _metadata);
227}
228
229/* Cover ioas.c */
230TEST_FAIL_NTH(basic_fail_nth, basic)
231{
232 struct iommu_iova_range ranges[10];
233 uint32_t ioas_id;
234 __u64 iova;
235
236 fail_nth_enable();
237
238 self->fd = open("/dev/iommu", O_RDWR);
239 if (self->fd == -1)
240 return -1;
241
242 if (_test_ioctl_ioas_alloc(fd: self->fd, id: &ioas_id))
243 return -1;
244
245 {
246 struct iommu_ioas_iova_ranges ranges_cmd = {
247 .size = sizeof(ranges_cmd),
248 .num_iovas = ARRAY_SIZE(ranges),
249 .ioas_id = ioas_id,
250 .allowed_iovas = (uintptr_t)ranges,
251 };
252 if (ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd))
253 return -1;
254 }
255
256 {
257 struct iommu_ioas_allow_iovas allow_cmd = {
258 .size = sizeof(allow_cmd),
259 .ioas_id = ioas_id,
260 .num_iovas = 1,
261 .allowed_iovas = (uintptr_t)ranges,
262 };
263
264 ranges[0].start = 16*1024;
265 ranges[0].last = BUFFER_SIZE + 16 * 1024 * 600 - 1;
266 if (ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd))
267 return -1;
268 }
269
270 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
271 IOMMU_IOAS_MAP_WRITEABLE |
272 IOMMU_IOAS_MAP_READABLE))
273 return -1;
274
275 {
276 struct iommu_ioas_copy copy_cmd = {
277 .size = sizeof(copy_cmd),
278 .flags = IOMMU_IOAS_MAP_WRITEABLE |
279 IOMMU_IOAS_MAP_READABLE,
280 .dst_ioas_id = ioas_id,
281 .src_ioas_id = ioas_id,
282 .src_iova = iova,
283 .length = sizeof(ranges),
284 };
285
286 if (ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd))
287 return -1;
288 }
289
290 if (_test_ioctl_ioas_unmap(fd: self->fd, ioas_id, iova, length: BUFFER_SIZE,
291 NULL))
292 return -1;
293 /* Failure path of no IOVA to unmap */
294 _test_ioctl_ioas_unmap(fd: self->fd, ioas_id, iova, length: BUFFER_SIZE, NULL);
295 return 0;
296}
297
298/* iopt_area_fill_domains() and iopt_area_fill_domain() */
299TEST_FAIL_NTH(basic_fail_nth, map_domain)
300{
301 uint32_t ioas_id;
302 __u32 stdev_id;
303 __u32 hwpt_id;
304 __u64 iova;
305
306 self->fd = open("/dev/iommu", O_RDWR);
307 if (self->fd == -1)
308 return -1;
309
310 if (_test_ioctl_ioas_alloc(fd: self->fd, id: &ioas_id))
311 return -1;
312
313 if (_test_ioctl_set_temp_memory_limit(fd: self->fd, limit: 32))
314 return -1;
315
316 fail_nth_enable();
317
318 if (_test_cmd_mock_domain(fd: self->fd, ioas_id, stdev_id: &stdev_id, hwpt_id: &hwpt_id, NULL))
319 return -1;
320
321 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
322 IOMMU_IOAS_MAP_WRITEABLE |
323 IOMMU_IOAS_MAP_READABLE))
324 return -1;
325
326 if (_test_ioctl_destroy(fd: self->fd, id: stdev_id))
327 return -1;
328
329 if (_test_cmd_mock_domain(fd: self->fd, ioas_id, stdev_id: &stdev_id, hwpt_id: &hwpt_id, NULL))
330 return -1;
331 return 0;
332}
333
334TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
335{
336 uint32_t ioas_id;
337 __u32 stdev_id2;
338 __u32 stdev_id;
339 __u32 hwpt_id2;
340 __u32 hwpt_id;
341 __u64 iova;
342
343 self->fd = open("/dev/iommu", O_RDWR);
344 if (self->fd == -1)
345 return -1;
346
347 if (_test_ioctl_ioas_alloc(fd: self->fd, id: &ioas_id))
348 return -1;
349
350 if (_test_ioctl_set_temp_memory_limit(fd: self->fd, limit: 32))
351 return -1;
352
353 if (_test_cmd_mock_domain(fd: self->fd, ioas_id, stdev_id: &stdev_id, hwpt_id: &hwpt_id, NULL))
354 return -1;
355
356 fail_nth_enable();
357
358 if (_test_cmd_mock_domain(fd: self->fd, ioas_id, stdev_id: &stdev_id2, hwpt_id: &hwpt_id2,
359 NULL))
360 return -1;
361
362 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
363 IOMMU_IOAS_MAP_WRITEABLE |
364 IOMMU_IOAS_MAP_READABLE))
365 return -1;
366
367 if (_test_ioctl_destroy(fd: self->fd, id: stdev_id))
368 return -1;
369
370 if (_test_ioctl_destroy(fd: self->fd, id: stdev_id2))
371 return -1;
372
373 if (_test_cmd_mock_domain(fd: self->fd, ioas_id, stdev_id: &stdev_id, hwpt_id: &hwpt_id, NULL))
374 return -1;
375 if (_test_cmd_mock_domain(fd: self->fd, ioas_id, stdev_id: &stdev_id2, hwpt_id: &hwpt_id2,
376 NULL))
377 return -1;
378 return 0;
379}
380
381TEST_FAIL_NTH(basic_fail_nth, access_rw)
382{
383 uint64_t tmp_big[4096];
384 uint32_t ioas_id;
385 uint16_t tmp[32];
386 __u64 iova;
387
388 self->fd = open("/dev/iommu", O_RDWR);
389 if (self->fd == -1)
390 return -1;
391
392 if (_test_ioctl_ioas_alloc(fd: self->fd, id: &ioas_id))
393 return -1;
394
395 if (_test_ioctl_set_temp_memory_limit(fd: self->fd, limit: 32))
396 return -1;
397
398 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
399 IOMMU_IOAS_MAP_WRITEABLE |
400 IOMMU_IOAS_MAP_READABLE))
401 return -1;
402
403 fail_nth_enable();
404
405 if (_test_cmd_create_access(fd: self->fd, ioas_id, access_id: &self->access_id, flags: 0))
406 return -1;
407
408 {
409 struct iommu_test_cmd access_cmd = {
410 .size = sizeof(access_cmd),
411 .op = IOMMU_TEST_OP_ACCESS_RW,
412 .id = self->access_id,
413 .access_rw = { .iova = iova,
414 .length = sizeof(tmp),
415 .uptr = (uintptr_t)tmp },
416 };
417
418 // READ
419 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
420 &access_cmd))
421 return -1;
422
423 access_cmd.access_rw.flags = MOCK_ACCESS_RW_WRITE;
424 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
425 &access_cmd))
426 return -1;
427
428 access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH;
429 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
430 &access_cmd))
431 return -1;
432 access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH |
433 MOCK_ACCESS_RW_WRITE;
434 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
435 &access_cmd))
436 return -1;
437 }
438
439 {
440 struct iommu_test_cmd access_cmd = {
441 .size = sizeof(access_cmd),
442 .op = IOMMU_TEST_OP_ACCESS_RW,
443 .id = self->access_id,
444 .access_rw = { .iova = iova,
445 .flags = MOCK_ACCESS_RW_SLOW_PATH,
446 .length = sizeof(tmp_big),
447 .uptr = (uintptr_t)tmp_big },
448 };
449
450 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
451 &access_cmd))
452 return -1;
453 }
454 if (_test_cmd_destroy_access(access_id: self->access_id))
455 return -1;
456 self->access_id = 0;
457 return 0;
458}
459
460/* pages.c access functions */
461TEST_FAIL_NTH(basic_fail_nth, access_pin)
462{
463 uint32_t access_pages_id;
464 uint32_t ioas_id;
465 __u64 iova;
466
467 self->fd = open("/dev/iommu", O_RDWR);
468 if (self->fd == -1)
469 return -1;
470
471 if (_test_ioctl_ioas_alloc(fd: self->fd, id: &ioas_id))
472 return -1;
473
474 if (_test_ioctl_set_temp_memory_limit(fd: self->fd, limit: 32))
475 return -1;
476
477 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
478 IOMMU_IOAS_MAP_WRITEABLE |
479 IOMMU_IOAS_MAP_READABLE))
480 return -1;
481
482 if (_test_cmd_create_access(fd: self->fd, ioas_id, access_id: &self->access_id,
483 flags: MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
484 return -1;
485
486 fail_nth_enable();
487
488 {
489 struct iommu_test_cmd access_cmd = {
490 .size = sizeof(access_cmd),
491 .op = IOMMU_TEST_OP_ACCESS_PAGES,
492 .id = self->access_id,
493 .access_pages = { .iova = iova,
494 .length = BUFFER_SIZE,
495 .uptr = (uintptr_t)buffer },
496 };
497
498 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
499 &access_cmd))
500 return -1;
501 access_pages_id = access_cmd.access_pages.out_access_pages_id;
502 }
503
504 if (_test_cmd_destroy_access_pages(fd: self->fd, access_id: self->access_id,
505 access_pages_id))
506 return -1;
507
508 if (_test_cmd_destroy_access(access_id: self->access_id))
509 return -1;
510 self->access_id = 0;
511 return 0;
512}
513
514/* iopt_pages_fill_xarray() */
515TEST_FAIL_NTH(basic_fail_nth, access_pin_domain)
516{
517 uint32_t access_pages_id;
518 uint32_t ioas_id;
519 __u32 stdev_id;
520 __u32 hwpt_id;
521 __u64 iova;
522
523 self->fd = open("/dev/iommu", O_RDWR);
524 if (self->fd == -1)
525 return -1;
526
527 if (_test_ioctl_ioas_alloc(fd: self->fd, id: &ioas_id))
528 return -1;
529
530 if (_test_ioctl_set_temp_memory_limit(fd: self->fd, limit: 32))
531 return -1;
532
533 if (_test_cmd_mock_domain(fd: self->fd, ioas_id, stdev_id: &stdev_id, hwpt_id: &hwpt_id, NULL))
534 return -1;
535
536 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
537 IOMMU_IOAS_MAP_WRITEABLE |
538 IOMMU_IOAS_MAP_READABLE))
539 return -1;
540
541 if (_test_cmd_create_access(fd: self->fd, ioas_id, access_id: &self->access_id,
542 flags: MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES))
543 return -1;
544
545 fail_nth_enable();
546
547 {
548 struct iommu_test_cmd access_cmd = {
549 .size = sizeof(access_cmd),
550 .op = IOMMU_TEST_OP_ACCESS_PAGES,
551 .id = self->access_id,
552 .access_pages = { .iova = iova,
553 .length = BUFFER_SIZE,
554 .uptr = (uintptr_t)buffer },
555 };
556
557 if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
558 &access_cmd))
559 return -1;
560 access_pages_id = access_cmd.access_pages.out_access_pages_id;
561 }
562
563 if (_test_cmd_destroy_access_pages(fd: self->fd, access_id: self->access_id,
564 access_pages_id))
565 return -1;
566
567 if (_test_cmd_destroy_access(access_id: self->access_id))
568 return -1;
569 self->access_id = 0;
570
571 if (_test_ioctl_destroy(fd: self->fd, id: stdev_id))
572 return -1;
573 return 0;
574}
575
576/* device.c */
577TEST_FAIL_NTH(basic_fail_nth, device)
578{
579 struct iommu_test_hw_info info;
580 uint32_t ioas_id;
581 uint32_t ioas_id2;
582 uint32_t stdev_id;
583 uint32_t idev_id;
584 uint32_t hwpt_id;
585 __u64 iova;
586
587 self->fd = open("/dev/iommu", O_RDWR);
588 if (self->fd == -1)
589 return -1;
590
591 if (_test_ioctl_ioas_alloc(fd: self->fd, id: &ioas_id))
592 return -1;
593
594 if (_test_ioctl_ioas_alloc(fd: self->fd, id: &ioas_id2))
595 return -1;
596
597 iova = MOCK_APERTURE_START;
598 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, PAGE_SIZE, &iova,
599 IOMMU_IOAS_MAP_FIXED_IOVA |
600 IOMMU_IOAS_MAP_WRITEABLE |
601 IOMMU_IOAS_MAP_READABLE))
602 return -1;
603 if (_test_ioctl_ioas_map(self->fd, ioas_id2, buffer, PAGE_SIZE, &iova,
604 IOMMU_IOAS_MAP_FIXED_IOVA |
605 IOMMU_IOAS_MAP_WRITEABLE |
606 IOMMU_IOAS_MAP_READABLE))
607 return -1;
608
609 fail_nth_enable();
610
611 if (_test_cmd_mock_domain(fd: self->fd, ioas_id, stdev_id: &stdev_id, NULL,
612 idev_id: &idev_id))
613 return -1;
614
615 if (_test_cmd_get_hw_info(fd: self->fd, device_id: idev_id, data: &info, data_len: sizeof(info), NULL))
616 return -1;
617
618 if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0, &hwpt_id,
619 IOMMU_HWPT_DATA_NONE, 0, 0))
620 return -1;
621
622 if (_test_cmd_mock_domain_replace(fd: self->fd, stdev_id, pt_id: ioas_id2, NULL))
623 return -1;
624
625 if (_test_cmd_mock_domain_replace(fd: self->fd, stdev_id, pt_id: hwpt_id, NULL))
626 return -1;
627 return 0;
628}
629
630TEST_HARNESS_MAIN
631

source code of linux/tools/testing/selftests/iommu/iommufd_fail_nth.c