1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
6 */
7
8#define pr_fmt(fmt) "kasan: test: " fmt
9
10#include <kunit/test.h>
11#include <linux/bitops.h>
12#include <linux/delay.h>
13#include <linux/io.h>
14#include <linux/kasan.h>
15#include <linux/kernel.h>
16#include <linux/mempool.h>
17#include <linux/mm.h>
18#include <linux/mman.h>
19#include <linux/module.h>
20#include <linux/printk.h>
21#include <linux/random.h>
22#include <linux/set_memory.h>
23#include <linux/slab.h>
24#include <linux/string.h>
25#include <linux/tracepoint.h>
26#include <linux/uaccess.h>
27#include <linux/vmalloc.h>
28#include <trace/events/printk.h>
29
30#include <asm/page.h>
31
32#include "kasan.h"
33
34#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
35
36static bool multishot;
37
38/* Fields set based on lines observed in the console. */
39static struct {
40 bool report_found;
41 bool async_fault;
42} test_status;
43
44/*
45 * Some tests use these global variables to store return values from function
46 * calls that could otherwise be eliminated by the compiler as dead code.
47 */
48void *kasan_ptr_result;
49int kasan_int_result;
50
51/* Probe for console output: obtains test_status lines of interest. */
52static void probe_console(void *ignore, const char *buf, size_t len)
53{
54 if (strnstr(buf, "BUG: KASAN: ", len))
55 WRITE_ONCE(test_status.report_found, true);
56 else if (strnstr(buf, "Asynchronous fault: ", len))
57 WRITE_ONCE(test_status.async_fault, true);
58}
59
60static int kasan_suite_init(struct kunit_suite *suite)
61{
62 if (!kasan_enabled()) {
63 pr_err("Can't run KASAN tests with KASAN disabled");
64 return -1;
65 }
66
67 /* Stop failing KUnit tests on KASAN reports. */
68 kasan_kunit_test_suite_start();
69
70 /*
71 * Temporarily enable multi-shot mode. Otherwise, KASAN would only
72 * report the first detected bug and panic the kernel if panic_on_warn
73 * is enabled.
74 */
75 multishot = kasan_save_enable_multi_shot();
76
77 register_trace_console(probe: probe_console, NULL);
78 return 0;
79}
80
81static void kasan_suite_exit(struct kunit_suite *suite)
82{
83 kasan_kunit_test_suite_end();
84 kasan_restore_multi_shot(multishot);
85 unregister_trace_console(probe: probe_console, NULL);
86 tracepoint_synchronize_unregister();
87}
88
89static void kasan_test_exit(struct kunit *test)
90{
91 KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));
92}
93
94/**
95 * KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a
96 * KASAN report; causes a KUnit test failure otherwise.
97 *
98 * @test: Currently executing KUnit test.
99 * @expression: Expression that must produce a KASAN report.
100 *
101 * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
102 * checking is auto-disabled. When this happens, this test handler reenables
103 * tag checking. As tag checking can be only disabled or enabled per CPU,
104 * this handler disables migration (preemption).
105 *
106 * Since the compiler doesn't see that the expression can change the test_status
107 * fields, it can reorder or optimize away the accesses to those fields.
108 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
109 * expression to prevent that.
110 *
111 * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
112 * as false. This allows detecting KASAN reports that happen outside of the
113 * checks by asserting !test_status.report_found at the start of
114 * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
115 */
116#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
117 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
118 kasan_sync_fault_possible()) \
119 migrate_disable(); \
120 KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); \
121 barrier(); \
122 expression; \
123 barrier(); \
124 if (kasan_async_fault_possible()) \
125 kasan_force_async_fault(); \
126 if (!READ_ONCE(test_status.report_found)) { \
127 KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
128 "expected in \"" #expression \
129 "\", but none occurred"); \
130 } \
131 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
132 kasan_sync_fault_possible()) { \
133 if (READ_ONCE(test_status.report_found) && \
134 !READ_ONCE(test_status.async_fault)) \
135 kasan_enable_hw_tags(); \
136 migrate_enable(); \
137 } \
138 WRITE_ONCE(test_status.report_found, false); \
139 WRITE_ONCE(test_status.async_fault, false); \
140} while (0)
141
142#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
143 if (!IS_ENABLED(config)) \
144 kunit_skip((test), "Test requires " #config "=y"); \
145} while (0)
146
147#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
148 if (IS_ENABLED(config)) \
149 kunit_skip((test), "Test requires " #config "=n"); \
150} while (0)
151
152#define KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test) do { \
153 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \
154 break; /* No compiler instrumentation. */ \
155 if (IS_ENABLED(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX)) \
156 break; /* Should always be instrumented! */ \
157 if (IS_ENABLED(CONFIG_GENERIC_ENTRY)) \
158 kunit_skip((test), "Test requires checked mem*()"); \
159} while (0)
160
161static void kmalloc_oob_right(struct kunit *test)
162{
163 char *ptr;
164 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
165
166 ptr = kmalloc(size, GFP_KERNEL);
167 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
168
169 OPTIMIZER_HIDE_VAR(ptr);
170 /*
171 * An unaligned access past the requested kmalloc size.
172 * Only generic KASAN can precisely detect these.
173 */
174 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
175 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
176
177 /*
178 * An aligned access into the first out-of-bounds granule that falls
179 * within the aligned kmalloc object.
180 */
181 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
182
183 /* Out-of-bounds access past the aligned kmalloc object. */
184 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
185 ptr[size + KASAN_GRANULE_SIZE + 5]);
186
187 kfree(objp: ptr);
188}
189
190static void kmalloc_oob_left(struct kunit *test)
191{
192 char *ptr;
193 size_t size = 15;
194
195 ptr = kmalloc(size, GFP_KERNEL);
196 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
197
198 OPTIMIZER_HIDE_VAR(ptr);
199 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
200 kfree(objp: ptr);
201}
202
203static void kmalloc_node_oob_right(struct kunit *test)
204{
205 char *ptr;
206 size_t size = 4096;
207
208 ptr = kmalloc_node(size, GFP_KERNEL, node: 0);
209 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
210
211 OPTIMIZER_HIDE_VAR(ptr);
212 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
213 kfree(objp: ptr);
214}
215
216/*
217 * Check that KASAN detects an out-of-bounds access for a big object allocated
218 * via kmalloc(). But not as big as to trigger the page_alloc fallback.
219 */
220static void kmalloc_big_oob_right(struct kunit *test)
221{
222 char *ptr;
223 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
224
225 ptr = kmalloc(size, GFP_KERNEL);
226 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
227
228 OPTIMIZER_HIDE_VAR(ptr);
229 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
230 kfree(objp: ptr);
231}
232
233/*
234 * The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk
235 * that does not fit into the largest slab cache and therefore is allocated via
236 * the page_alloc fallback.
237 */
238
239static void kmalloc_large_oob_right(struct kunit *test)
240{
241 char *ptr;
242 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
243
244 ptr = kmalloc(size, GFP_KERNEL);
245 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
246
247 OPTIMIZER_HIDE_VAR(ptr);
248 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
249
250 kfree(objp: ptr);
251}
252
253static void kmalloc_large_uaf(struct kunit *test)
254{
255 char *ptr;
256 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
257
258 ptr = kmalloc(size, GFP_KERNEL);
259 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
260 kfree(objp: ptr);
261
262 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
263}
264
265static void kmalloc_large_invalid_free(struct kunit *test)
266{
267 char *ptr;
268 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
269
270 ptr = kmalloc(size, GFP_KERNEL);
271 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
272
273 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
274}
275
276static void page_alloc_oob_right(struct kunit *test)
277{
278 char *ptr;
279 struct page *pages;
280 size_t order = 4;
281 size_t size = (1UL << (PAGE_SHIFT + order));
282
283 /*
284 * With generic KASAN page allocations have no redzones, thus
285 * out-of-bounds detection is not guaranteed.
286 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
287 */
288 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
289
290 pages = alloc_pages(GFP_KERNEL, order);
291 ptr = page_address(pages);
292 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
293
294 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
295 free_pages(addr: (unsigned long)ptr, order);
296}
297
298static void page_alloc_uaf(struct kunit *test)
299{
300 char *ptr;
301 struct page *pages;
302 size_t order = 4;
303
304 pages = alloc_pages(GFP_KERNEL, order);
305 ptr = page_address(pages);
306 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
307 free_pages(addr: (unsigned long)ptr, order);
308
309 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
310}
311
312static void krealloc_more_oob_helper(struct kunit *test,
313 size_t size1, size_t size2)
314{
315 char *ptr1, *ptr2;
316 size_t middle;
317
318 KUNIT_ASSERT_LT(test, size1, size2);
319 middle = size1 + (size2 - size1) / 2;
320
321 ptr1 = kmalloc(size: size1, GFP_KERNEL);
322 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
323
324 ptr2 = krealloc(objp: ptr1, new_size: size2, GFP_KERNEL);
325 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
326
327 /* Suppress -Warray-bounds warnings. */
328 OPTIMIZER_HIDE_VAR(ptr2);
329
330 /* All offsets up to size2 must be accessible. */
331 ptr2[size1 - 1] = 'x';
332 ptr2[size1] = 'x';
333 ptr2[middle] = 'x';
334 ptr2[size2 - 1] = 'x';
335
336 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
337 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
338 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
339
340 /* For all modes first aligned offset after size2 must be inaccessible. */
341 KUNIT_EXPECT_KASAN_FAIL(test,
342 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
343
344 kfree(objp: ptr2);
345}
346
347static void krealloc_less_oob_helper(struct kunit *test,
348 size_t size1, size_t size2)
349{
350 char *ptr1, *ptr2;
351 size_t middle;
352
353 KUNIT_ASSERT_LT(test, size2, size1);
354 middle = size2 + (size1 - size2) / 2;
355
356 ptr1 = kmalloc(size: size1, GFP_KERNEL);
357 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
358
359 ptr2 = krealloc(objp: ptr1, new_size: size2, GFP_KERNEL);
360 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
361
362 /* Suppress -Warray-bounds warnings. */
363 OPTIMIZER_HIDE_VAR(ptr2);
364
365 /* Must be accessible for all modes. */
366 ptr2[size2 - 1] = 'x';
367
368 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
369 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
370 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
371
372 /* For all modes first aligned offset after size2 must be inaccessible. */
373 KUNIT_EXPECT_KASAN_FAIL(test,
374 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
375
376 /*
377 * For all modes all size2, middle, and size1 should land in separate
378 * granules and thus the latter two offsets should be inaccessible.
379 */
380 KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
381 round_down(middle, KASAN_GRANULE_SIZE));
382 KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
383 round_down(size1, KASAN_GRANULE_SIZE));
384 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
385 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
386 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
387
388 kfree(objp: ptr2);
389}
390
391static void krealloc_more_oob(struct kunit *test)
392{
393 krealloc_more_oob_helper(test, size1: 201, size2: 235);
394}
395
396static void krealloc_less_oob(struct kunit *test)
397{
398 krealloc_less_oob_helper(test, size1: 235, size2: 201);
399}
400
401static void krealloc_large_more_oob(struct kunit *test)
402{
403 krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
404 KMALLOC_MAX_CACHE_SIZE + 235);
405}
406
407static void krealloc_large_less_oob(struct kunit *test)
408{
409 krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
410 KMALLOC_MAX_CACHE_SIZE + 201);
411}
412
413/*
414 * Check that krealloc() detects a use-after-free, returns NULL,
415 * and doesn't unpoison the freed object.
416 */
417static void krealloc_uaf(struct kunit *test)
418{
419 char *ptr1, *ptr2;
420 int size1 = 201;
421 int size2 = 235;
422
423 ptr1 = kmalloc(size: size1, GFP_KERNEL);
424 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
425 kfree(objp: ptr1);
426
427 KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
428 KUNIT_ASSERT_NULL(test, ptr2);
429 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
430}
431
432static void kmalloc_oob_16(struct kunit *test)
433{
434 struct {
435 u64 words[2];
436 } *ptr1, *ptr2;
437
438 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
439
440 /* This test is specifically crafted for the generic mode. */
441 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
442
443 /* RELOC_HIDE to prevent gcc from warning about short alloc */
444 ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0);
445 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
446
447 ptr2 = kmalloc(size: sizeof(*ptr2), GFP_KERNEL);
448 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
449
450 OPTIMIZER_HIDE_VAR(ptr1);
451 OPTIMIZER_HIDE_VAR(ptr2);
452 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
453 kfree(objp: ptr1);
454 kfree(objp: ptr2);
455}
456
457static void kmalloc_uaf_16(struct kunit *test)
458{
459 struct {
460 u64 words[2];
461 } *ptr1, *ptr2;
462
463 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
464
465 ptr1 = kmalloc(size: sizeof(*ptr1), GFP_KERNEL);
466 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
467
468 ptr2 = kmalloc(size: sizeof(*ptr2), GFP_KERNEL);
469 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
470 kfree(objp: ptr2);
471
472 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
473 kfree(objp: ptr1);
474}
475
476/*
477 * Note: in the memset tests below, the written range touches both valid and
478 * invalid memory. This makes sure that the instrumentation does not only check
479 * the starting address but the whole range.
480 */
481
482static void kmalloc_oob_memset_2(struct kunit *test)
483{
484 char *ptr;
485 size_t size = 128 - KASAN_GRANULE_SIZE;
486 size_t memset_size = 2;
487
488 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
489
490 ptr = kmalloc(size, GFP_KERNEL);
491 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
492
493 OPTIMIZER_HIDE_VAR(ptr);
494 OPTIMIZER_HIDE_VAR(size);
495 OPTIMIZER_HIDE_VAR(memset_size);
496 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, memset_size));
497 kfree(objp: ptr);
498}
499
500static void kmalloc_oob_memset_4(struct kunit *test)
501{
502 char *ptr;
503 size_t size = 128 - KASAN_GRANULE_SIZE;
504 size_t memset_size = 4;
505
506 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
507
508 ptr = kmalloc(size, GFP_KERNEL);
509 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
510
511 OPTIMIZER_HIDE_VAR(ptr);
512 OPTIMIZER_HIDE_VAR(size);
513 OPTIMIZER_HIDE_VAR(memset_size);
514 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, memset_size));
515 kfree(objp: ptr);
516}
517
518static void kmalloc_oob_memset_8(struct kunit *test)
519{
520 char *ptr;
521 size_t size = 128 - KASAN_GRANULE_SIZE;
522 size_t memset_size = 8;
523
524 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
525
526 ptr = kmalloc(size, GFP_KERNEL);
527 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
528
529 OPTIMIZER_HIDE_VAR(ptr);
530 OPTIMIZER_HIDE_VAR(size);
531 OPTIMIZER_HIDE_VAR(memset_size);
532 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, memset_size));
533 kfree(objp: ptr);
534}
535
536static void kmalloc_oob_memset_16(struct kunit *test)
537{
538 char *ptr;
539 size_t size = 128 - KASAN_GRANULE_SIZE;
540 size_t memset_size = 16;
541
542 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
543
544 ptr = kmalloc(size, GFP_KERNEL);
545 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
546
547 OPTIMIZER_HIDE_VAR(ptr);
548 OPTIMIZER_HIDE_VAR(size);
549 OPTIMIZER_HIDE_VAR(memset_size);
550 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, memset_size));
551 kfree(objp: ptr);
552}
553
554static void kmalloc_oob_in_memset(struct kunit *test)
555{
556 char *ptr;
557 size_t size = 128 - KASAN_GRANULE_SIZE;
558
559 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
560
561 ptr = kmalloc(size, GFP_KERNEL);
562 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
563
564 OPTIMIZER_HIDE_VAR(ptr);
565 OPTIMIZER_HIDE_VAR(size);
566 KUNIT_EXPECT_KASAN_FAIL(test,
567 memset(ptr, 0, size + KASAN_GRANULE_SIZE));
568 kfree(objp: ptr);
569}
570
571static void kmalloc_memmove_negative_size(struct kunit *test)
572{
573 char *ptr;
574 size_t size = 64;
575 size_t invalid_size = -2;
576
577 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
578
579 /*
580 * Hardware tag-based mode doesn't check memmove for negative size.
581 * As a result, this test introduces a side-effect memory corruption,
582 * which can result in a crash.
583 */
584 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
585
586 ptr = kmalloc(size, GFP_KERNEL);
587 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
588
589 memset((char *)ptr, 0, 64);
590 OPTIMIZER_HIDE_VAR(ptr);
591 OPTIMIZER_HIDE_VAR(invalid_size);
592 KUNIT_EXPECT_KASAN_FAIL(test,
593 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
594 kfree(objp: ptr);
595}
596
597static void kmalloc_memmove_invalid_size(struct kunit *test)
598{
599 char *ptr;
600 size_t size = 64;
601 size_t invalid_size = size;
602
603 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
604
605 ptr = kmalloc(size, GFP_KERNEL);
606 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
607
608 memset((char *)ptr, 0, 64);
609 OPTIMIZER_HIDE_VAR(ptr);
610 OPTIMIZER_HIDE_VAR(invalid_size);
611 KUNIT_EXPECT_KASAN_FAIL(test,
612 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
613 kfree(objp: ptr);
614}
615
616static void kmalloc_uaf(struct kunit *test)
617{
618 char *ptr;
619 size_t size = 10;
620
621 ptr = kmalloc(size, GFP_KERNEL);
622 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
623
624 kfree(objp: ptr);
625 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
626}
627
628static void kmalloc_uaf_memset(struct kunit *test)
629{
630 char *ptr;
631 size_t size = 33;
632
633 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
634
635 /*
636 * Only generic KASAN uses quarantine, which is required to avoid a
637 * kernel memory corruption this test causes.
638 */
639 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
640
641 ptr = kmalloc(size, GFP_KERNEL);
642 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
643
644 kfree(objp: ptr);
645 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
646}
647
648static void kmalloc_uaf2(struct kunit *test)
649{
650 char *ptr1, *ptr2;
651 size_t size = 43;
652 int counter = 0;
653
654again:
655 ptr1 = kmalloc(size, GFP_KERNEL);
656 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
657
658 kfree(objp: ptr1);
659
660 ptr2 = kmalloc(size, GFP_KERNEL);
661 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
662
663 /*
664 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
665 * Allow up to 16 attempts at generating different tags.
666 */
667 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
668 kfree(objp: ptr2);
669 goto again;
670 }
671
672 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
673 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
674
675 kfree(objp: ptr2);
676}
677
678/*
679 * Check that KASAN detects use-after-free when another object was allocated in
680 * the same slot. Relevant for the tag-based modes, which do not use quarantine.
681 */
682static void kmalloc_uaf3(struct kunit *test)
683{
684 char *ptr1, *ptr2;
685 size_t size = 100;
686
687 /* This test is specifically crafted for tag-based modes. */
688 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
689
690 ptr1 = kmalloc(size, GFP_KERNEL);
691 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
692 kfree(objp: ptr1);
693
694 ptr2 = kmalloc(size, GFP_KERNEL);
695 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
696 kfree(objp: ptr2);
697
698 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
699}
700
701static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
702{
703 int *i_unsafe = unsafe;
704
705 KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*i_unsafe));
706 KUNIT_EXPECT_KASAN_FAIL(test, WRITE_ONCE(*i_unsafe, 42));
707 KUNIT_EXPECT_KASAN_FAIL(test, smp_load_acquire(i_unsafe));
708 KUNIT_EXPECT_KASAN_FAIL(test, smp_store_release(i_unsafe, 42));
709
710 KUNIT_EXPECT_KASAN_FAIL(test, atomic_read(unsafe));
711 KUNIT_EXPECT_KASAN_FAIL(test, atomic_set(unsafe, 42));
712 KUNIT_EXPECT_KASAN_FAIL(test, atomic_add(42, unsafe));
713 KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub(42, unsafe));
714 KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc(unsafe));
715 KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec(unsafe));
716 KUNIT_EXPECT_KASAN_FAIL(test, atomic_and(42, unsafe));
717 KUNIT_EXPECT_KASAN_FAIL(test, atomic_andnot(42, unsafe));
718 KUNIT_EXPECT_KASAN_FAIL(test, atomic_or(42, unsafe));
719 KUNIT_EXPECT_KASAN_FAIL(test, atomic_xor(42, unsafe));
720 KUNIT_EXPECT_KASAN_FAIL(test, atomic_xchg(unsafe, 42));
721 KUNIT_EXPECT_KASAN_FAIL(test, atomic_cmpxchg(unsafe, 21, 42));
722 KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(unsafe, safe, 42));
723 KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(safe, unsafe, 42));
724 KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub_and_test(42, unsafe));
725 KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_and_test(unsafe));
726 KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_and_test(unsafe));
727 KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_negative(42, unsafe));
728 KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42));
729 KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_not_zero(unsafe));
730 KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_unless_negative(unsafe));
731 KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_unless_positive(unsafe));
732 KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_if_positive(unsafe));
733
734 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_read(unsafe));
735 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_set(unsafe, 42));
736 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add(42, unsafe));
737 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub(42, unsafe));
738 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc(unsafe));
739 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec(unsafe));
740 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_and(42, unsafe));
741 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_andnot(42, unsafe));
742 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_or(42, unsafe));
743 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xor(42, unsafe));
744 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xchg(unsafe, 42));
745 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_cmpxchg(unsafe, 21, 42));
746 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(unsafe, safe, 42));
747 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(safe, unsafe, 42));
748 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub_and_test(42, unsafe));
749 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_and_test(unsafe));
750 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_and_test(unsafe));
751 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_negative(42, unsafe));
752 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_unless(unsafe, 21, 42));
753 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_not_zero(unsafe));
754 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_unless_negative(unsafe));
755 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_unless_positive(unsafe));
756 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_if_positive(unsafe));
757}
758
759static void kasan_atomics(struct kunit *test)
760{
761 void *a1, *a2;
762
763 /*
764 * Just as with kasan_bitops_tags(), we allocate 48 bytes of memory such
765 * that the following 16 bytes will make up the redzone.
766 */
767 a1 = kzalloc(size: 48, GFP_KERNEL);
768 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1);
769 a2 = kzalloc(size: sizeof(atomic_long_t), GFP_KERNEL);
770 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a2);
771
772 /* Use atomics to access the redzone. */
773 kasan_atomics_helper(test, unsafe: a1 + 48, safe: a2);
774
775 kfree(objp: a1);
776 kfree(objp: a2);
777}
778
779static void kmalloc_double_kzfree(struct kunit *test)
780{
781 char *ptr;
782 size_t size = 16;
783
784 ptr = kmalloc(size, GFP_KERNEL);
785 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
786
787 kfree_sensitive(objp: ptr);
788 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
789}
790
791/* Check that ksize() does NOT unpoison whole object. */
792static void ksize_unpoisons_memory(struct kunit *test)
793{
794 char *ptr;
795 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
796 size_t real_size;
797
798 ptr = kmalloc(size, GFP_KERNEL);
799 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
800
801 real_size = ksize(objp: ptr);
802 KUNIT_EXPECT_GT(test, real_size, size);
803
804 OPTIMIZER_HIDE_VAR(ptr);
805
806 /* These accesses shouldn't trigger a KASAN report. */
807 ptr[0] = 'x';
808 ptr[size - 1] = 'x';
809
810 /* These must trigger a KASAN report. */
811 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
812 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
813 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
814 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
815
816 kfree(objp: ptr);
817}
818
819/*
820 * Check that a use-after-free is detected by ksize() and via normal accesses
821 * after it.
822 */
823static void ksize_uaf(struct kunit *test)
824{
825 char *ptr;
826 int size = 128 - KASAN_GRANULE_SIZE;
827
828 ptr = kmalloc(size, GFP_KERNEL);
829 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
830 kfree(objp: ptr);
831
832 OPTIMIZER_HIDE_VAR(ptr);
833 KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
834 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
835 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
836}
837
838/*
839 * The two tests below check that Generic KASAN prints auxiliary stack traces
840 * for RCU callbacks and workqueues. The reports need to be inspected manually.
841 *
842 * These tests are still enabled for other KASAN modes to make sure that all
843 * modes report bad accesses in tested scenarios.
844 */
845
846static struct kasan_rcu_info {
847 int i;
848 struct rcu_head rcu;
849} *global_rcu_ptr;
850
851static void rcu_uaf_reclaim(struct rcu_head *rp)
852{
853 struct kasan_rcu_info *fp =
854 container_of(rp, struct kasan_rcu_info, rcu);
855
856 kfree(objp: fp);
857 ((volatile struct kasan_rcu_info *)fp)->i;
858}
859
860static void rcu_uaf(struct kunit *test)
861{
862 struct kasan_rcu_info *ptr;
863
864 ptr = kmalloc(size: sizeof(struct kasan_rcu_info), GFP_KERNEL);
865 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
866
867 global_rcu_ptr = rcu_dereference_protected(
868 (struct kasan_rcu_info __rcu *)ptr, NULL);
869
870 KUNIT_EXPECT_KASAN_FAIL(test,
871 call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
872 rcu_barrier());
873}
874
875static void workqueue_uaf_work(struct work_struct *work)
876{
877 kfree(objp: work);
878}
879
880static void workqueue_uaf(struct kunit *test)
881{
882 struct workqueue_struct *workqueue;
883 struct work_struct *work;
884
885 workqueue = create_workqueue("kasan_workqueue_test");
886 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue);
887
888 work = kmalloc(size: sizeof(struct work_struct), GFP_KERNEL);
889 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
890
891 INIT_WORK(work, workqueue_uaf_work);
892 queue_work(wq: workqueue, work);
893 destroy_workqueue(wq: workqueue);
894
895 KUNIT_EXPECT_KASAN_FAIL(test,
896 ((volatile struct work_struct *)work)->data);
897}
898
899static void kfree_via_page(struct kunit *test)
900{
901 char *ptr;
902 size_t size = 8;
903 struct page *page;
904 unsigned long offset;
905
906 ptr = kmalloc(size, GFP_KERNEL);
907 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
908
909 page = virt_to_page(ptr);
910 offset = offset_in_page(ptr);
911 kfree(page_address(page) + offset);
912}
913
914static void kfree_via_phys(struct kunit *test)
915{
916 char *ptr;
917 size_t size = 8;
918 phys_addr_t phys;
919
920 ptr = kmalloc(size, GFP_KERNEL);
921 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
922
923 phys = virt_to_phys(address: ptr);
924 kfree(phys_to_virt(address: phys));
925}
926
927static void kmem_cache_oob(struct kunit *test)
928{
929 char *p;
930 size_t size = 200;
931 struct kmem_cache *cache;
932
933 cache = kmem_cache_create(name: "test_cache", size, align: 0, flags: 0, NULL);
934 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
935
936 p = kmem_cache_alloc(cachep: cache, GFP_KERNEL);
937 if (!p) {
938 kunit_err(test, "Allocation failed: %s\n", __func__);
939 kmem_cache_destroy(s: cache);
940 return;
941 }
942
943 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
944
945 kmem_cache_free(s: cache, objp: p);
946 kmem_cache_destroy(s: cache);
947}
948
949static void kmem_cache_double_free(struct kunit *test)
950{
951 char *p;
952 size_t size = 200;
953 struct kmem_cache *cache;
954
955 cache = kmem_cache_create(name: "test_cache", size, align: 0, flags: 0, NULL);
956 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
957
958 p = kmem_cache_alloc(cachep: cache, GFP_KERNEL);
959 if (!p) {
960 kunit_err(test, "Allocation failed: %s\n", __func__);
961 kmem_cache_destroy(s: cache);
962 return;
963 }
964
965 kmem_cache_free(s: cache, objp: p);
966 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
967 kmem_cache_destroy(s: cache);
968}
969
970static void kmem_cache_invalid_free(struct kunit *test)
971{
972 char *p;
973 size_t size = 200;
974 struct kmem_cache *cache;
975
976 cache = kmem_cache_create(name: "test_cache", size, align: 0, SLAB_TYPESAFE_BY_RCU,
977 NULL);
978 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
979
980 p = kmem_cache_alloc(cachep: cache, GFP_KERNEL);
981 if (!p) {
982 kunit_err(test, "Allocation failed: %s\n", __func__);
983 kmem_cache_destroy(s: cache);
984 return;
985 }
986
987 /* Trigger invalid free, the object doesn't get freed. */
988 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
989
990 /*
991 * Properly free the object to prevent the "Objects remaining in
992 * test_cache on __kmem_cache_shutdown" BUG failure.
993 */
994 kmem_cache_free(s: cache, objp: p);
995
996 kmem_cache_destroy(s: cache);
997}
998
999static void empty_cache_ctor(void *object) { }
1000
1001static void kmem_cache_double_destroy(struct kunit *test)
1002{
1003 struct kmem_cache *cache;
1004
1005 /* Provide a constructor to prevent cache merging. */
1006 cache = kmem_cache_create(name: "test_cache", size: 200, align: 0, flags: 0, ctor: empty_cache_ctor);
1007 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1008 kmem_cache_destroy(s: cache);
1009 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
1010}
1011
1012static void kmem_cache_accounted(struct kunit *test)
1013{
1014 int i;
1015 char *p;
1016 size_t size = 200;
1017 struct kmem_cache *cache;
1018
1019 cache = kmem_cache_create(name: "test_cache", size, align: 0, SLAB_ACCOUNT, NULL);
1020 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1021
1022 /*
1023 * Several allocations with a delay to allow for lazy per memcg kmem
1024 * cache creation.
1025 */
1026 for (i = 0; i < 5; i++) {
1027 p = kmem_cache_alloc(cachep: cache, GFP_KERNEL);
1028 if (!p)
1029 goto free_cache;
1030
1031 kmem_cache_free(s: cache, objp: p);
1032 msleep(msecs: 100);
1033 }
1034
1035free_cache:
1036 kmem_cache_destroy(s: cache);
1037}
1038
1039static void kmem_cache_bulk(struct kunit *test)
1040{
1041 struct kmem_cache *cache;
1042 size_t size = 200;
1043 char *p[10];
1044 bool ret;
1045 int i;
1046
1047 cache = kmem_cache_create(name: "test_cache", size, align: 0, flags: 0, NULL);
1048 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1049
1050 ret = kmem_cache_alloc_bulk(s: cache, GFP_KERNEL, ARRAY_SIZE(p), p: (void **)&p);
1051 if (!ret) {
1052 kunit_err(test, "Allocation failed: %s\n", __func__);
1053 kmem_cache_destroy(s: cache);
1054 return;
1055 }
1056
1057 for (i = 0; i < ARRAY_SIZE(p); i++)
1058 p[i][0] = p[i][size - 1] = 42;
1059
1060 kmem_cache_free_bulk(s: cache, ARRAY_SIZE(p), p: (void **)&p);
1061 kmem_cache_destroy(s: cache);
1062}
1063
1064static void *mempool_prepare_kmalloc(struct kunit *test, mempool_t *pool, size_t size)
1065{
1066 int pool_size = 4;
1067 int ret;
1068 void *elem;
1069
1070 memset(pool, 0, sizeof(*pool));
1071 ret = mempool_init_kmalloc_pool(pool, min_nr: pool_size, size);
1072 KUNIT_ASSERT_EQ(test, ret, 0);
1073
1074 /*
1075 * Allocate one element to prevent mempool from freeing elements to the
1076 * underlying allocator and instead make it add them to the element
1077 * list when the tests trigger double-free and invalid-free bugs.
1078 * This allows testing KASAN annotations in add_element().
1079 */
1080 elem = mempool_alloc_preallocated(pool);
1081 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1082
1083 return elem;
1084}
1085
1086static struct kmem_cache *mempool_prepare_slab(struct kunit *test, mempool_t *pool, size_t size)
1087{
1088 struct kmem_cache *cache;
1089 int pool_size = 4;
1090 int ret;
1091
1092 cache = kmem_cache_create(name: "test_cache", size, align: 0, flags: 0, NULL);
1093 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1094
1095 memset(pool, 0, sizeof(*pool));
1096 ret = mempool_init_slab_pool(pool, min_nr: pool_size, kc: cache);
1097 KUNIT_ASSERT_EQ(test, ret, 0);
1098
1099 /*
1100 * Do not allocate one preallocated element, as we skip the double-free
1101 * and invalid-free tests for slab mempool for simplicity.
1102 */
1103
1104 return cache;
1105}
1106
1107static void *mempool_prepare_page(struct kunit *test, mempool_t *pool, int order)
1108{
1109 int pool_size = 4;
1110 int ret;
1111 void *elem;
1112
1113 memset(pool, 0, sizeof(*pool));
1114 ret = mempool_init_page_pool(pool, min_nr: pool_size, order);
1115 KUNIT_ASSERT_EQ(test, ret, 0);
1116
1117 elem = mempool_alloc_preallocated(pool);
1118 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1119
1120 return elem;
1121}
1122
1123static void mempool_oob_right_helper(struct kunit *test, mempool_t *pool, size_t size)
1124{
1125 char *elem;
1126
1127 elem = mempool_alloc_preallocated(pool);
1128 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1129
1130 OPTIMIZER_HIDE_VAR(elem);
1131
1132 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1133 KUNIT_EXPECT_KASAN_FAIL(test,
1134 ((volatile char *)&elem[size])[0]);
1135 else
1136 KUNIT_EXPECT_KASAN_FAIL(test,
1137 ((volatile char *)&elem[round_up(size, KASAN_GRANULE_SIZE)])[0]);
1138
1139 mempool_free(element: elem, pool);
1140}
1141
1142static void mempool_kmalloc_oob_right(struct kunit *test)
1143{
1144 mempool_t pool;
1145 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
1146 void *extra_elem;
1147
1148 extra_elem = mempool_prepare_kmalloc(test, pool: &pool, size);
1149
1150 mempool_oob_right_helper(test, pool: &pool, size);
1151
1152 mempool_free(element: extra_elem, pool: &pool);
1153 mempool_exit(pool: &pool);
1154}
1155
1156static void mempool_kmalloc_large_oob_right(struct kunit *test)
1157{
1158 mempool_t pool;
1159 size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1160 void *extra_elem;
1161
1162 extra_elem = mempool_prepare_kmalloc(test, pool: &pool, size);
1163
1164 mempool_oob_right_helper(test, pool: &pool, size);
1165
1166 mempool_free(element: extra_elem, pool: &pool);
1167 mempool_exit(pool: &pool);
1168}
1169
1170static void mempool_slab_oob_right(struct kunit *test)
1171{
1172 mempool_t pool;
1173 size_t size = 123;
1174 struct kmem_cache *cache;
1175
1176 cache = mempool_prepare_slab(test, pool: &pool, size);
1177
1178 mempool_oob_right_helper(test, pool: &pool, size);
1179
1180 mempool_exit(pool: &pool);
1181 kmem_cache_destroy(s: cache);
1182}
1183
1184/*
1185 * Skip the out-of-bounds test for page mempool. With Generic KASAN, page
1186 * allocations have no redzones, and thus the out-of-bounds detection is not
1187 * guaranteed; see https://bugzilla.kernel.org/show_bug.cgi?id=210503. With
1188 * the tag-based KASAN modes, the neighboring allocation might have the same
1189 * tag; see https://bugzilla.kernel.org/show_bug.cgi?id=203505.
1190 */
1191
1192static void mempool_uaf_helper(struct kunit *test, mempool_t *pool, bool page)
1193{
1194 char *elem, *ptr;
1195
1196 elem = mempool_alloc_preallocated(pool);
1197 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1198
1199 mempool_free(element: elem, pool);
1200
1201 ptr = page ? page_address((struct page *)elem) : elem;
1202 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
1203}
1204
1205static void mempool_kmalloc_uaf(struct kunit *test)
1206{
1207 mempool_t pool;
1208 size_t size = 128;
1209 void *extra_elem;
1210
1211 extra_elem = mempool_prepare_kmalloc(test, pool: &pool, size);
1212
1213 mempool_uaf_helper(test, pool: &pool, page: false);
1214
1215 mempool_free(element: extra_elem, pool: &pool);
1216 mempool_exit(pool: &pool);
1217}
1218
1219static void mempool_kmalloc_large_uaf(struct kunit *test)
1220{
1221 mempool_t pool;
1222 size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1223 void *extra_elem;
1224
1225 extra_elem = mempool_prepare_kmalloc(test, pool: &pool, size);
1226
1227 mempool_uaf_helper(test, pool: &pool, page: false);
1228
1229 mempool_free(element: extra_elem, pool: &pool);
1230 mempool_exit(pool: &pool);
1231}
1232
1233static void mempool_slab_uaf(struct kunit *test)
1234{
1235 mempool_t pool;
1236 size_t size = 123;
1237 struct kmem_cache *cache;
1238
1239 cache = mempool_prepare_slab(test, pool: &pool, size);
1240
1241 mempool_uaf_helper(test, pool: &pool, page: false);
1242
1243 mempool_exit(pool: &pool);
1244 kmem_cache_destroy(s: cache);
1245}
1246
1247static void mempool_page_alloc_uaf(struct kunit *test)
1248{
1249 mempool_t pool;
1250 int order = 2;
1251 void *extra_elem;
1252
1253 extra_elem = mempool_prepare_page(test, pool: &pool, order);
1254
1255 mempool_uaf_helper(test, pool: &pool, page: true);
1256
1257 mempool_free(element: extra_elem, pool: &pool);
1258 mempool_exit(pool: &pool);
1259}
1260
1261static void mempool_double_free_helper(struct kunit *test, mempool_t *pool)
1262{
1263 char *elem;
1264
1265 elem = mempool_alloc_preallocated(pool);
1266 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1267
1268 mempool_free(element: elem, pool);
1269
1270 KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem, pool));
1271}
1272
1273static void mempool_kmalloc_double_free(struct kunit *test)
1274{
1275 mempool_t pool;
1276 size_t size = 128;
1277 char *extra_elem;
1278
1279 extra_elem = mempool_prepare_kmalloc(test, pool: &pool, size);
1280
1281 mempool_double_free_helper(test, pool: &pool);
1282
1283 mempool_free(element: extra_elem, pool: &pool);
1284 mempool_exit(pool: &pool);
1285}
1286
1287static void mempool_kmalloc_large_double_free(struct kunit *test)
1288{
1289 mempool_t pool;
1290 size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1291 char *extra_elem;
1292
1293 extra_elem = mempool_prepare_kmalloc(test, pool: &pool, size);
1294
1295 mempool_double_free_helper(test, pool: &pool);
1296
1297 mempool_free(element: extra_elem, pool: &pool);
1298 mempool_exit(pool: &pool);
1299}
1300
1301static void mempool_page_alloc_double_free(struct kunit *test)
1302{
1303 mempool_t pool;
1304 int order = 2;
1305 char *extra_elem;
1306
1307 extra_elem = mempool_prepare_page(test, pool: &pool, order);
1308
1309 mempool_double_free_helper(test, pool: &pool);
1310
1311 mempool_free(element: extra_elem, pool: &pool);
1312 mempool_exit(pool: &pool);
1313}
1314
1315static void mempool_kmalloc_invalid_free_helper(struct kunit *test, mempool_t *pool)
1316{
1317 char *elem;
1318
1319 elem = mempool_alloc_preallocated(pool);
1320 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1321
1322 KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem + 1, pool));
1323
1324 mempool_free(element: elem, pool);
1325}
1326
1327static void mempool_kmalloc_invalid_free(struct kunit *test)
1328{
1329 mempool_t pool;
1330 size_t size = 128;
1331 char *extra_elem;
1332
1333 extra_elem = mempool_prepare_kmalloc(test, pool: &pool, size);
1334
1335 mempool_kmalloc_invalid_free_helper(test, pool: &pool);
1336
1337 mempool_free(element: extra_elem, pool: &pool);
1338 mempool_exit(pool: &pool);
1339}
1340
1341static void mempool_kmalloc_large_invalid_free(struct kunit *test)
1342{
1343 mempool_t pool;
1344 size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1345 char *extra_elem;
1346
1347 extra_elem = mempool_prepare_kmalloc(test, pool: &pool, size);
1348
1349 mempool_kmalloc_invalid_free_helper(test, pool: &pool);
1350
1351 mempool_free(element: extra_elem, pool: &pool);
1352 mempool_exit(pool: &pool);
1353}
1354
1355/*
1356 * Skip the invalid-free test for page mempool. The invalid-free detection only
1357 * works for compound pages and mempool preallocates all page elements without
1358 * the __GFP_COMP flag.
1359 */
1360
1361static char global_array[10];
1362
1363static void kasan_global_oob_right(struct kunit *test)
1364{
1365 /*
1366 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
1367 * from failing here and panicking the kernel, access the array via a
1368 * volatile pointer, which will prevent the compiler from being able to
1369 * determine the array bounds.
1370 *
1371 * This access uses a volatile pointer to char (char *volatile) rather
1372 * than the more conventional pointer to volatile char (volatile char *)
1373 * because we want to prevent the compiler from making inferences about
1374 * the pointer itself (i.e. its array bounds), not the data that it
1375 * refers to.
1376 */
1377 char *volatile array = global_array;
1378 char *p = &array[ARRAY_SIZE(global_array) + 3];
1379
1380 /* Only generic mode instruments globals. */
1381 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1382
1383 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1384}
1385
1386static void kasan_global_oob_left(struct kunit *test)
1387{
1388 char *volatile array = global_array;
1389 char *p = array - 3;
1390
1391 /*
1392 * GCC is known to fail this test, skip it.
1393 * See https://bugzilla.kernel.org/show_bug.cgi?id=215051.
1394 */
1395 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG);
1396 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1397 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1398}
1399
1400static void kasan_stack_oob(struct kunit *test)
1401{
1402 char stack_array[10];
1403 /* See comment in kasan_global_oob_right. */
1404 char *volatile array = stack_array;
1405 char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
1406
1407 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1408
1409 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1410}
1411
1412static void kasan_alloca_oob_left(struct kunit *test)
1413{
1414 volatile int i = 10;
1415 char alloca_array[i];
1416 /* See comment in kasan_global_oob_right. */
1417 char *volatile array = alloca_array;
1418 char *p = array - 1;
1419
1420 /* Only generic mode instruments dynamic allocas. */
1421 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1422 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1423
1424 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1425}
1426
1427static void kasan_alloca_oob_right(struct kunit *test)
1428{
1429 volatile int i = 10;
1430 char alloca_array[i];
1431 /* See comment in kasan_global_oob_right. */
1432 char *volatile array = alloca_array;
1433 char *p = array + i;
1434
1435 /* Only generic mode instruments dynamic allocas. */
1436 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1437 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1438
1439 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1440}
1441
1442static void kasan_memchr(struct kunit *test)
1443{
1444 char *ptr;
1445 size_t size = 24;
1446
1447 /*
1448 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1449 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1450 */
1451 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1452
1453 if (OOB_TAG_OFF)
1454 size = round_up(size, OOB_TAG_OFF);
1455
1456 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1457 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1458
1459 OPTIMIZER_HIDE_VAR(ptr);
1460 OPTIMIZER_HIDE_VAR(size);
1461 KUNIT_EXPECT_KASAN_FAIL(test,
1462 kasan_ptr_result = memchr(ptr, '1', size + 1));
1463
1464 kfree(objp: ptr);
1465}
1466
1467static void kasan_memcmp(struct kunit *test)
1468{
1469 char *ptr;
1470 size_t size = 24;
1471 int arr[9];
1472
1473 /*
1474 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1475 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1476 */
1477 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1478
1479 if (OOB_TAG_OFF)
1480 size = round_up(size, OOB_TAG_OFF);
1481
1482 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1483 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1484 memset(arr, 0, sizeof(arr));
1485
1486 OPTIMIZER_HIDE_VAR(ptr);
1487 OPTIMIZER_HIDE_VAR(size);
1488 KUNIT_EXPECT_KASAN_FAIL(test,
1489 kasan_int_result = memcmp(ptr, arr, size+1));
1490 kfree(objp: ptr);
1491}
1492
1493static void kasan_strings(struct kunit *test)
1494{
1495 char *ptr;
1496 size_t size = 24;
1497
1498 /*
1499 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1500 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1501 */
1502 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1503
1504 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1505 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1506
1507 kfree(objp: ptr);
1508
1509 /*
1510 * Try to cause only 1 invalid access (less spam in dmesg).
1511 * For that we need ptr to point to zeroed byte.
1512 * Skip metadata that could be stored in freed object so ptr
1513 * will likely point to zeroed byte.
1514 */
1515 ptr += 16;
1516 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
1517
1518 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
1519
1520 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
1521
1522 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
1523
1524 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
1525
1526 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
1527}
1528
1529static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
1530{
1531 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
1532 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
1533 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
1534 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
1535 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
1536 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
1537 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
1538 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
1539}
1540
1541static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
1542{
1543 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
1544 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
1545 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
1546 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
1547 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
1548 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
1549 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
1550 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
1551 if (nr < 7)
1552 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
1553 xor_unlock_is_negative_byte(1 << nr, addr));
1554}
1555
1556static void kasan_bitops_generic(struct kunit *test)
1557{
1558 long *bits;
1559
1560 /* This test is specifically crafted for the generic mode. */
1561 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1562
1563 /*
1564 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
1565 * this way we do not actually corrupt other memory.
1566 */
1567 bits = kzalloc(size: sizeof(*bits) + 1, GFP_KERNEL);
1568 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1569
1570 /*
1571 * Below calls try to access bit within allocated memory; however, the
1572 * below accesses are still out-of-bounds, since bitops are defined to
1573 * operate on the whole long the bit is in.
1574 */
1575 kasan_bitops_modify(test, BITS_PER_LONG, addr: bits);
1576
1577 /*
1578 * Below calls try to access bit beyond allocated memory.
1579 */
1580 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, addr: bits);
1581
1582 kfree(objp: bits);
1583}
1584
1585static void kasan_bitops_tags(struct kunit *test)
1586{
1587 long *bits;
1588
1589 /* This test is specifically crafted for tag-based modes. */
1590 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1591
1592 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
1593 bits = kzalloc(size: 48, GFP_KERNEL);
1594 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1595
1596 /* Do the accesses past the 48 allocated bytes, but within the redone. */
1597 kasan_bitops_modify(test, BITS_PER_LONG, addr: (void *)bits + 48);
1598 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, addr: (void *)bits + 48);
1599
1600 kfree(objp: bits);
1601}
1602
1603static void vmalloc_helpers_tags(struct kunit *test)
1604{
1605 void *ptr;
1606
1607 /* This test is intended for tag-based modes. */
1608 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1609
1610 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1611
1612 if (!kasan_vmalloc_enabled())
1613 kunit_skip(test, "Test requires kasan.vmalloc=on");
1614
1615 ptr = vmalloc(PAGE_SIZE);
1616 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1617
1618 /* Check that the returned pointer is tagged. */
1619 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1620 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1621
1622 /* Make sure exported vmalloc helpers handle tagged pointers. */
1623 KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr));
1624 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr));
1625
1626#if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
1627 {
1628 int rv;
1629
1630 /* Make sure vmalloc'ed memory permissions can be changed. */
1631 rv = set_memory_ro(addr: (unsigned long)ptr, numpages: 1);
1632 KUNIT_ASSERT_GE(test, rv, 0);
1633 rv = set_memory_rw(addr: (unsigned long)ptr, numpages: 1);
1634 KUNIT_ASSERT_GE(test, rv, 0);
1635 }
1636#endif
1637
1638 vfree(addr: ptr);
1639}
1640
1641static void vmalloc_oob(struct kunit *test)
1642{
1643 char *v_ptr, *p_ptr;
1644 struct page *page;
1645 size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
1646
1647 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1648
1649 if (!kasan_vmalloc_enabled())
1650 kunit_skip(test, "Test requires kasan.vmalloc=on");
1651
1652 v_ptr = vmalloc(size);
1653 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1654
1655 OPTIMIZER_HIDE_VAR(v_ptr);
1656
1657 /*
1658 * We have to be careful not to hit the guard page in vmalloc tests.
1659 * The MMU will catch that and crash us.
1660 */
1661
1662 /* Make sure in-bounds accesses are valid. */
1663 v_ptr[0] = 0;
1664 v_ptr[size - 1] = 0;
1665
1666 /*
1667 * An unaligned access past the requested vmalloc size.
1668 * Only generic KASAN can precisely detect these.
1669 */
1670 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1671 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
1672
1673 /* An aligned access into the first out-of-bounds granule. */
1674 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
1675
1676 /* Check that in-bounds accesses to the physical page are valid. */
1677 page = vmalloc_to_page(addr: v_ptr);
1678 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1679 p_ptr = page_address(page);
1680 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1681 p_ptr[0] = 0;
1682
1683 vfree(addr: v_ptr);
1684
1685 /*
1686 * We can't check for use-after-unmap bugs in this nor in the following
1687 * vmalloc tests, as the page might be fully unmapped and accessing it
1688 * will crash the kernel.
1689 */
1690}
1691
1692static void vmap_tags(struct kunit *test)
1693{
1694 char *p_ptr, *v_ptr;
1695 struct page *p_page, *v_page;
1696
1697 /*
1698 * This test is specifically crafted for the software tag-based mode,
1699 * the only tag-based mode that poisons vmap mappings.
1700 */
1701 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1702
1703 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1704
1705 if (!kasan_vmalloc_enabled())
1706 kunit_skip(test, "Test requires kasan.vmalloc=on");
1707
1708 p_page = alloc_pages(GFP_KERNEL, order: 1);
1709 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
1710 p_ptr = page_address(p_page);
1711 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1712
1713 v_ptr = vmap(pages: &p_page, count: 1, VM_MAP, PAGE_KERNEL);
1714 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1715
1716 /*
1717 * We can't check for out-of-bounds bugs in this nor in the following
1718 * vmalloc tests, as allocations have page granularity and accessing
1719 * the guard page will crash the kernel.
1720 */
1721
1722 KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1723 KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1724
1725 /* Make sure that in-bounds accesses through both pointers work. */
1726 *p_ptr = 0;
1727 *v_ptr = 0;
1728
1729 /* Make sure vmalloc_to_page() correctly recovers the page pointer. */
1730 v_page = vmalloc_to_page(addr: v_ptr);
1731 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page);
1732 KUNIT_EXPECT_PTR_EQ(test, p_page, v_page);
1733
1734 vunmap(addr: v_ptr);
1735 free_pages(addr: (unsigned long)p_ptr, order: 1);
1736}
1737
1738static void vm_map_ram_tags(struct kunit *test)
1739{
1740 char *p_ptr, *v_ptr;
1741 struct page *page;
1742
1743 /*
1744 * This test is specifically crafted for the software tag-based mode,
1745 * the only tag-based mode that poisons vm_map_ram mappings.
1746 */
1747 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1748
1749 page = alloc_pages(GFP_KERNEL, order: 1);
1750 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1751 p_ptr = page_address(page);
1752 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1753
1754 v_ptr = vm_map_ram(pages: &page, count: 1, node: -1);
1755 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1756
1757 KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1758 KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1759
1760 /* Make sure that in-bounds accesses through both pointers work. */
1761 *p_ptr = 0;
1762 *v_ptr = 0;
1763
1764 vm_unmap_ram(mem: v_ptr, count: 1);
1765 free_pages(addr: (unsigned long)p_ptr, order: 1);
1766}
1767
1768static void vmalloc_percpu(struct kunit *test)
1769{
1770 char __percpu *ptr;
1771 int cpu;
1772
1773 /*
1774 * This test is specifically crafted for the software tag-based mode,
1775 * the only tag-based mode that poisons percpu mappings.
1776 */
1777 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1778
1779 ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
1780
1781 for_each_possible_cpu(cpu) {
1782 char *c_ptr = per_cpu_ptr(ptr, cpu);
1783
1784 KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN);
1785 KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL);
1786
1787 /* Make sure that in-bounds accesses don't crash the kernel. */
1788 *c_ptr = 0;
1789 }
1790
1791 free_percpu(pdata: ptr);
1792}
1793
1794/*
1795 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1796 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1797 * modes.
1798 */
1799static void match_all_not_assigned(struct kunit *test)
1800{
1801 char *ptr;
1802 struct page *pages;
1803 int i, size, order;
1804
1805 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1806
1807 for (i = 0; i < 256; i++) {
1808 size = get_random_u32_inclusive(floor: 1, ceil: 1024);
1809 ptr = kmalloc(size, GFP_KERNEL);
1810 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1811 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1812 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1813 kfree(objp: ptr);
1814 }
1815
1816 for (i = 0; i < 256; i++) {
1817 order = get_random_u32_inclusive(floor: 1, ceil: 4);
1818 pages = alloc_pages(GFP_KERNEL, order);
1819 ptr = page_address(pages);
1820 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1821 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1822 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1823 free_pages(addr: (unsigned long)ptr, order);
1824 }
1825
1826 if (!kasan_vmalloc_enabled())
1827 return;
1828
1829 for (i = 0; i < 256; i++) {
1830 size = get_random_u32_inclusive(floor: 1, ceil: 1024);
1831 ptr = vmalloc(size);
1832 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1833 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1834 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1835 vfree(addr: ptr);
1836 }
1837}
1838
1839/* Check that 0xff works as a match-all pointer tag for tag-based modes. */
1840static void match_all_ptr_tag(struct kunit *test)
1841{
1842 char *ptr;
1843 u8 tag;
1844
1845 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1846
1847 ptr = kmalloc(size: 128, GFP_KERNEL);
1848 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1849
1850 /* Backup the assigned tag. */
1851 tag = get_tag(ptr);
1852 KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1853
1854 /* Reset the tag to 0xff.*/
1855 ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1856
1857 /* This access shouldn't trigger a KASAN report. */
1858 *ptr = 0;
1859
1860 /* Recover the pointer tag and free. */
1861 ptr = set_tag(ptr, tag);
1862 kfree(objp: ptr);
1863}
1864
1865/* Check that there are no match-all memory tags for tag-based modes. */
1866static void match_all_mem_tag(struct kunit *test)
1867{
1868 char *ptr;
1869 int tag;
1870
1871 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1872
1873 ptr = kmalloc(size: 128, GFP_KERNEL);
1874 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1875 KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1876
1877 /* For each possible tag value not matching the pointer tag. */
1878 for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1879 /*
1880 * For Software Tag-Based KASAN, skip the majority of tag
1881 * values to avoid the test printing too many reports.
1882 */
1883 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
1884 tag >= KASAN_TAG_MIN + 8 && tag <= KASAN_TAG_KERNEL - 8)
1885 continue;
1886
1887 if (tag == get_tag(ptr))
1888 continue;
1889
1890 /* Mark the first memory granule with the chosen memory tag. */
1891 kasan_poison(addr: ptr, KASAN_GRANULE_SIZE, value: (u8)tag, init: false);
1892
1893 /* This access must cause a KASAN report. */
1894 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1895 }
1896
1897 /* Recover the memory tag and free. */
1898 kasan_poison(addr: ptr, KASAN_GRANULE_SIZE, get_tag(ptr), init: false);
1899 kfree(objp: ptr);
1900}
1901
1902static struct kunit_case kasan_kunit_test_cases[] = {
1903 KUNIT_CASE(kmalloc_oob_right),
1904 KUNIT_CASE(kmalloc_oob_left),
1905 KUNIT_CASE(kmalloc_node_oob_right),
1906 KUNIT_CASE(kmalloc_big_oob_right),
1907 KUNIT_CASE(kmalloc_large_oob_right),
1908 KUNIT_CASE(kmalloc_large_uaf),
1909 KUNIT_CASE(kmalloc_large_invalid_free),
1910 KUNIT_CASE(page_alloc_oob_right),
1911 KUNIT_CASE(page_alloc_uaf),
1912 KUNIT_CASE(krealloc_more_oob),
1913 KUNIT_CASE(krealloc_less_oob),
1914 KUNIT_CASE(krealloc_large_more_oob),
1915 KUNIT_CASE(krealloc_large_less_oob),
1916 KUNIT_CASE(krealloc_uaf),
1917 KUNIT_CASE(kmalloc_oob_16),
1918 KUNIT_CASE(kmalloc_uaf_16),
1919 KUNIT_CASE(kmalloc_oob_in_memset),
1920 KUNIT_CASE(kmalloc_oob_memset_2),
1921 KUNIT_CASE(kmalloc_oob_memset_4),
1922 KUNIT_CASE(kmalloc_oob_memset_8),
1923 KUNIT_CASE(kmalloc_oob_memset_16),
1924 KUNIT_CASE(kmalloc_memmove_negative_size),
1925 KUNIT_CASE(kmalloc_memmove_invalid_size),
1926 KUNIT_CASE(kmalloc_uaf),
1927 KUNIT_CASE(kmalloc_uaf_memset),
1928 KUNIT_CASE(kmalloc_uaf2),
1929 KUNIT_CASE(kmalloc_uaf3),
1930 KUNIT_CASE(kmalloc_double_kzfree),
1931 KUNIT_CASE(ksize_unpoisons_memory),
1932 KUNIT_CASE(ksize_uaf),
1933 KUNIT_CASE(rcu_uaf),
1934 KUNIT_CASE(workqueue_uaf),
1935 KUNIT_CASE(kfree_via_page),
1936 KUNIT_CASE(kfree_via_phys),
1937 KUNIT_CASE(kmem_cache_oob),
1938 KUNIT_CASE(kmem_cache_double_free),
1939 KUNIT_CASE(kmem_cache_invalid_free),
1940 KUNIT_CASE(kmem_cache_double_destroy),
1941 KUNIT_CASE(kmem_cache_accounted),
1942 KUNIT_CASE(kmem_cache_bulk),
1943 KUNIT_CASE(mempool_kmalloc_oob_right),
1944 KUNIT_CASE(mempool_kmalloc_large_oob_right),
1945 KUNIT_CASE(mempool_slab_oob_right),
1946 KUNIT_CASE(mempool_kmalloc_uaf),
1947 KUNIT_CASE(mempool_kmalloc_large_uaf),
1948 KUNIT_CASE(mempool_slab_uaf),
1949 KUNIT_CASE(mempool_page_alloc_uaf),
1950 KUNIT_CASE(mempool_kmalloc_double_free),
1951 KUNIT_CASE(mempool_kmalloc_large_double_free),
1952 KUNIT_CASE(mempool_page_alloc_double_free),
1953 KUNIT_CASE(mempool_kmalloc_invalid_free),
1954 KUNIT_CASE(mempool_kmalloc_large_invalid_free),
1955 KUNIT_CASE(kasan_global_oob_right),
1956 KUNIT_CASE(kasan_global_oob_left),
1957 KUNIT_CASE(kasan_stack_oob),
1958 KUNIT_CASE(kasan_alloca_oob_left),
1959 KUNIT_CASE(kasan_alloca_oob_right),
1960 KUNIT_CASE(kasan_memchr),
1961 KUNIT_CASE(kasan_memcmp),
1962 KUNIT_CASE(kasan_strings),
1963 KUNIT_CASE(kasan_bitops_generic),
1964 KUNIT_CASE(kasan_bitops_tags),
1965 KUNIT_CASE(kasan_atomics),
1966 KUNIT_CASE(vmalloc_helpers_tags),
1967 KUNIT_CASE(vmalloc_oob),
1968 KUNIT_CASE(vmap_tags),
1969 KUNIT_CASE(vm_map_ram_tags),
1970 KUNIT_CASE(vmalloc_percpu),
1971 KUNIT_CASE(match_all_not_assigned),
1972 KUNIT_CASE(match_all_ptr_tag),
1973 KUNIT_CASE(match_all_mem_tag),
1974 {}
1975};
1976
1977static struct kunit_suite kasan_kunit_test_suite = {
1978 .name = "kasan",
1979 .test_cases = kasan_kunit_test_cases,
1980 .exit = kasan_test_exit,
1981 .suite_init = kasan_suite_init,
1982 .suite_exit = kasan_suite_exit,
1983};
1984
1985kunit_test_suite(kasan_kunit_test_suite);
1986
1987MODULE_LICENSE("GPL");
1988

source code of linux/mm/kasan/kasan_test.c