1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * This file contains KASAN runtime code that manages shadow memory for |
4 | * generic and software tag-based KASAN modes. |
5 | * |
6 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. |
7 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
8 | * |
9 | * Some code borrowed from https://github.com/xairy/kasan-prototype by |
10 | * Andrey Konovalov <andreyknvl@gmail.com> |
11 | */ |
12 | |
13 | #include <linux/init.h> |
14 | #include <linux/kasan.h> |
15 | #include <linux/kernel.h> |
16 | #include <linux/kfence.h> |
17 | #include <linux/kmemleak.h> |
18 | #include <linux/memory.h> |
19 | #include <linux/mm.h> |
20 | #include <linux/string.h> |
21 | #include <linux/types.h> |
22 | #include <linux/vmalloc.h> |
23 | |
24 | #include <asm/cacheflush.h> |
25 | #include <asm/tlbflush.h> |
26 | |
27 | #include "kasan.h" |
28 | |
29 | bool __kasan_check_read(const volatile void *p, unsigned int size) |
30 | { |
31 | return kasan_check_range((void *)p, size, false, _RET_IP_); |
32 | } |
33 | EXPORT_SYMBOL(__kasan_check_read); |
34 | |
35 | bool __kasan_check_write(const volatile void *p, unsigned int size) |
36 | { |
37 | return kasan_check_range((void *)p, size, true, _RET_IP_); |
38 | } |
39 | EXPORT_SYMBOL(__kasan_check_write); |
40 | |
41 | #if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY) |
42 | /* |
43 | * CONFIG_GENERIC_ENTRY relies on compiler emitted mem*() calls to not be |
44 | * instrumented. KASAN enabled toolchains should emit __asan_mem*() functions |
45 | * for the sites they want to instrument. |
46 | * |
47 | * If we have a compiler that can instrument meminstrinsics, never override |
48 | * these, so that non-instrumented files can safely consider them as builtins. |
49 | */ |
50 | #undef memset |
51 | void *memset(void *addr, int c, size_t len) |
52 | { |
53 | if (!kasan_check_range(addr, len, true, _RET_IP_)) |
54 | return NULL; |
55 | |
56 | return __memset(addr, c, len); |
57 | } |
58 | |
59 | #ifdef __HAVE_ARCH_MEMMOVE |
60 | #undef memmove |
61 | void *memmove(void *dest, const void *src, size_t len) |
62 | { |
63 | if (!kasan_check_range(src, len, false, _RET_IP_) || |
64 | !kasan_check_range(dest, len, true, _RET_IP_)) |
65 | return NULL; |
66 | |
67 | return __memmove(dest, src, len); |
68 | } |
69 | #endif |
70 | |
71 | #undef memcpy |
72 | void *memcpy(void *dest, const void *src, size_t len) |
73 | { |
74 | if (!kasan_check_range(src, len, false, _RET_IP_) || |
75 | !kasan_check_range(dest, len, true, _RET_IP_)) |
76 | return NULL; |
77 | |
78 | return __memcpy(dest, src, len); |
79 | } |
80 | #endif |
81 | |
82 | void *__asan_memset(void *addr, int c, ssize_t len) |
83 | { |
84 | if (!kasan_check_range(addr, len, true, _RET_IP_)) |
85 | return NULL; |
86 | |
87 | return __memset(s: addr, c, n: len); |
88 | } |
89 | EXPORT_SYMBOL(__asan_memset); |
90 | |
91 | #ifdef __HAVE_ARCH_MEMMOVE |
92 | void *__asan_memmove(void *dest, const void *src, ssize_t len) |
93 | { |
94 | if (!kasan_check_range(src, len, false, _RET_IP_) || |
95 | !kasan_check_range(dest, len, true, _RET_IP_)) |
96 | return NULL; |
97 | |
98 | return __memmove(dest, src, count: len); |
99 | } |
100 | EXPORT_SYMBOL(__asan_memmove); |
101 | #endif |
102 | |
103 | void *__asan_memcpy(void *dest, const void *src, ssize_t len) |
104 | { |
105 | if (!kasan_check_range(src, len, false, _RET_IP_) || |
106 | !kasan_check_range(dest, len, true, _RET_IP_)) |
107 | return NULL; |
108 | |
109 | return __memcpy(to: dest, from: src, len); |
110 | } |
111 | EXPORT_SYMBOL(__asan_memcpy); |
112 | |
113 | #ifdef CONFIG_KASAN_SW_TAGS |
114 | void *__hwasan_memset(void *addr, int c, ssize_t len) __alias(__asan_memset); |
115 | EXPORT_SYMBOL(__hwasan_memset); |
116 | #ifdef __HAVE_ARCH_MEMMOVE |
117 | void *__hwasan_memmove(void *dest, const void *src, ssize_t len) __alias(__asan_memmove); |
118 | EXPORT_SYMBOL(__hwasan_memmove); |
119 | #endif |
120 | void *__hwasan_memcpy(void *dest, const void *src, ssize_t len) __alias(__asan_memcpy); |
121 | EXPORT_SYMBOL(__hwasan_memcpy); |
122 | #endif |
123 | |
124 | void kasan_poison(const void *addr, size_t size, u8 value, bool init) |
125 | { |
126 | void *shadow_start, *shadow_end; |
127 | |
128 | if (!kasan_arch_is_ready()) |
129 | return; |
130 | |
131 | /* |
132 | * Perform shadow offset calculation based on untagged address, as |
133 | * some of the callers (e.g. kasan_poison_new_object) pass tagged |
134 | * addresses to this function. |
135 | */ |
136 | addr = kasan_reset_tag(addr); |
137 | |
138 | if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) |
139 | return; |
140 | if (WARN_ON(size & KASAN_GRANULE_MASK)) |
141 | return; |
142 | |
143 | shadow_start = kasan_mem_to_shadow(addr); |
144 | shadow_end = kasan_mem_to_shadow(addr + size); |
145 | |
146 | __memset(s: shadow_start, c: value, n: shadow_end - shadow_start); |
147 | } |
148 | EXPORT_SYMBOL_GPL(kasan_poison); |
149 | |
150 | #ifdef CONFIG_KASAN_GENERIC |
151 | void kasan_poison_last_granule(const void *addr, size_t size) |
152 | { |
153 | if (!kasan_arch_is_ready()) |
154 | return; |
155 | |
156 | if (size & KASAN_GRANULE_MASK) { |
157 | u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size); |
158 | *shadow = size & KASAN_GRANULE_MASK; |
159 | } |
160 | } |
161 | #endif |
162 | |
163 | void kasan_unpoison(const void *addr, size_t size, bool init) |
164 | { |
165 | u8 tag = get_tag(addr); |
166 | |
167 | /* |
168 | * Perform shadow offset calculation based on untagged address, as |
169 | * some of the callers (e.g. kasan_unpoison_new_object) pass tagged |
170 | * addresses to this function. |
171 | */ |
172 | addr = kasan_reset_tag(addr); |
173 | |
174 | if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) |
175 | return; |
176 | |
177 | /* Unpoison all granules that cover the object. */ |
178 | kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag, false); |
179 | |
180 | /* Partially poison the last granule for the generic mode. */ |
181 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) |
182 | kasan_poison_last_granule(address: addr, size); |
183 | } |
184 | |
185 | #ifdef CONFIG_MEMORY_HOTPLUG |
186 | static bool shadow_mapped(unsigned long addr) |
187 | { |
188 | pgd_t *pgd = pgd_offset_k(addr); |
189 | p4d_t *p4d; |
190 | pud_t *pud; |
191 | pmd_t *pmd; |
192 | pte_t *pte; |
193 | |
194 | if (pgd_none(pgd: *pgd)) |
195 | return false; |
196 | p4d = p4d_offset(pgd, address: addr); |
197 | if (p4d_none(p4d: *p4d)) |
198 | return false; |
199 | pud = pud_offset(p4d, address: addr); |
200 | if (pud_none(pud: *pud)) |
201 | return false; |
202 | if (pud_leaf(pud: *pud)) |
203 | return true; |
204 | pmd = pmd_offset(pud, address: addr); |
205 | if (pmd_none(pmd: *pmd)) |
206 | return false; |
207 | if (pmd_leaf(pte: *pmd)) |
208 | return true; |
209 | pte = pte_offset_kernel(pmd, address: addr); |
210 | return !pte_none(pte: ptep_get(ptep: pte)); |
211 | } |
212 | |
213 | static int __meminit kasan_mem_notifier(struct notifier_block *nb, |
214 | unsigned long action, void *data) |
215 | { |
216 | struct memory_notify *mem_data = data; |
217 | unsigned long nr_shadow_pages, start_kaddr, shadow_start; |
218 | unsigned long shadow_end, shadow_size; |
219 | |
220 | nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; |
221 | start_kaddr = (unsigned long)pfn_to_kaddr(pfn: mem_data->start_pfn); |
222 | shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); |
223 | shadow_size = nr_shadow_pages << PAGE_SHIFT; |
224 | shadow_end = shadow_start + shadow_size; |
225 | |
226 | if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) || |
227 | WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE)) |
228 | return NOTIFY_BAD; |
229 | |
230 | switch (action) { |
231 | case MEM_GOING_ONLINE: { |
232 | void *ret; |
233 | |
234 | /* |
235 | * If shadow is mapped already than it must have been mapped |
236 | * during the boot. This could happen if we onlining previously |
237 | * offlined memory. |
238 | */ |
239 | if (shadow_mapped(addr: shadow_start)) |
240 | return NOTIFY_OK; |
241 | |
242 | ret = __vmalloc_node_range(size: shadow_size, PAGE_SIZE, start: shadow_start, |
243 | end: shadow_end, GFP_KERNEL, |
244 | PAGE_KERNEL, VM_NO_GUARD, |
245 | pfn_to_nid(mem_data->start_pfn), |
246 | caller: __builtin_return_address(0)); |
247 | if (!ret) |
248 | return NOTIFY_BAD; |
249 | |
250 | kmemleak_ignore(ptr: ret); |
251 | return NOTIFY_OK; |
252 | } |
253 | case MEM_CANCEL_ONLINE: |
254 | case MEM_OFFLINE: { |
255 | struct vm_struct *vm; |
256 | |
257 | /* |
258 | * shadow_start was either mapped during boot by kasan_init() |
259 | * or during memory online by __vmalloc_node_range(). |
260 | * In the latter case we can use vfree() to free shadow. |
261 | * Non-NULL result of the find_vm_area() will tell us if |
262 | * that was the second case. |
263 | * |
264 | * Currently it's not possible to free shadow mapped |
265 | * during boot by kasan_init(). It's because the code |
266 | * to do that hasn't been written yet. So we'll just |
267 | * leak the memory. |
268 | */ |
269 | vm = find_vm_area(addr: (void *)shadow_start); |
270 | if (vm) |
271 | vfree(addr: (void *)shadow_start); |
272 | } |
273 | } |
274 | |
275 | return NOTIFY_OK; |
276 | } |
277 | |
278 | static int __init kasan_memhotplug_init(void) |
279 | { |
280 | hotplug_memory_notifier(kasan_mem_notifier, DEFAULT_CALLBACK_PRI); |
281 | |
282 | return 0; |
283 | } |
284 | |
285 | core_initcall(kasan_memhotplug_init); |
286 | #endif |
287 | |
288 | #ifdef CONFIG_KASAN_VMALLOC |
289 | |
290 | void __init __weak kasan_populate_early_vm_area_shadow(void *start, |
291 | unsigned long size) |
292 | { |
293 | } |
294 | |
295 | static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, |
296 | void *unused) |
297 | { |
298 | unsigned long page; |
299 | pte_t pte; |
300 | |
301 | if (likely(!pte_none(ptep_get(ptep)))) |
302 | return 0; |
303 | |
304 | page = __get_free_page(GFP_KERNEL); |
305 | if (!page) |
306 | return -ENOMEM; |
307 | |
308 | __memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE); |
309 | pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); |
310 | |
311 | spin_lock(&init_mm.page_table_lock); |
312 | if (likely(pte_none(ptep_get(ptep)))) { |
313 | set_pte_at(&init_mm, addr, ptep, pte); |
314 | page = 0; |
315 | } |
316 | spin_unlock(&init_mm.page_table_lock); |
317 | if (page) |
318 | free_page(page); |
319 | return 0; |
320 | } |
321 | |
322 | int kasan_populate_vmalloc(unsigned long addr, unsigned long size) |
323 | { |
324 | unsigned long shadow_start, shadow_end; |
325 | int ret; |
326 | |
327 | if (!kasan_arch_is_ready()) |
328 | return 0; |
329 | |
330 | if (!is_vmalloc_or_module_addr((void *)addr)) |
331 | return 0; |
332 | |
333 | shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr); |
334 | shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size); |
335 | |
336 | /* |
337 | * User Mode Linux maps enough shadow memory for all of virtual memory |
338 | * at boot, so doesn't need to allocate more on vmalloc, just clear it. |
339 | * |
340 | * The remaining CONFIG_UML checks in this file exist for the same |
341 | * reason. |
342 | */ |
343 | if (IS_ENABLED(CONFIG_UML)) { |
344 | __memset((void *)shadow_start, KASAN_VMALLOC_INVALID, shadow_end - shadow_start); |
345 | return 0; |
346 | } |
347 | |
348 | shadow_start = PAGE_ALIGN_DOWN(shadow_start); |
349 | shadow_end = PAGE_ALIGN(shadow_end); |
350 | |
351 | ret = apply_to_page_range(&init_mm, shadow_start, |
352 | shadow_end - shadow_start, |
353 | kasan_populate_vmalloc_pte, NULL); |
354 | if (ret) |
355 | return ret; |
356 | |
357 | flush_cache_vmap(shadow_start, shadow_end); |
358 | |
359 | /* |
360 | * We need to be careful about inter-cpu effects here. Consider: |
361 | * |
362 | * CPU#0 CPU#1 |
363 | * WRITE_ONCE(p, vmalloc(100)); while (x = READ_ONCE(p)) ; |
364 | * p[99] = 1; |
365 | * |
366 | * With compiler instrumentation, that ends up looking like this: |
367 | * |
368 | * CPU#0 CPU#1 |
369 | * // vmalloc() allocates memory |
370 | * // let a = area->addr |
371 | * // we reach kasan_populate_vmalloc |
372 | * // and call kasan_unpoison: |
373 | * STORE shadow(a), unpoison_val |
374 | * ... |
375 | * STORE shadow(a+99), unpoison_val x = LOAD p |
376 | * // rest of vmalloc process <data dependency> |
377 | * STORE p, a LOAD shadow(x+99) |
378 | * |
379 | * If there is no barrier between the end of unpoisoning the shadow |
380 | * and the store of the result to p, the stores could be committed |
381 | * in a different order by CPU#0, and CPU#1 could erroneously observe |
382 | * poison in the shadow. |
383 | * |
384 | * We need some sort of barrier between the stores. |
385 | * |
386 | * In the vmalloc() case, this is provided by a smp_wmb() in |
387 | * clear_vm_uninitialized_flag(). In the per-cpu allocator and in |
388 | * get_vm_area() and friends, the caller gets shadow allocated but |
389 | * doesn't have any pages mapped into the virtual address space that |
390 | * has been reserved. Mapping those pages in will involve taking and |
391 | * releasing a page-table lock, which will provide the barrier. |
392 | */ |
393 | |
394 | return 0; |
395 | } |
396 | |
397 | static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, |
398 | void *unused) |
399 | { |
400 | unsigned long page; |
401 | |
402 | page = (unsigned long)__va(pte_pfn(ptep_get(ptep)) << PAGE_SHIFT); |
403 | |
404 | spin_lock(&init_mm.page_table_lock); |
405 | |
406 | if (likely(!pte_none(ptep_get(ptep)))) { |
407 | pte_clear(&init_mm, addr, ptep); |
408 | free_page(page); |
409 | } |
410 | spin_unlock(&init_mm.page_table_lock); |
411 | |
412 | return 0; |
413 | } |
414 | |
415 | /* |
416 | * Release the backing for the vmalloc region [start, end), which |
417 | * lies within the free region [free_region_start, free_region_end). |
418 | * |
419 | * This can be run lazily, long after the region was freed. It runs |
420 | * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap |
421 | * infrastructure. |
422 | * |
423 | * How does this work? |
424 | * ------------------- |
425 | * |
426 | * We have a region that is page aligned, labeled as A. |
427 | * That might not map onto the shadow in a way that is page-aligned: |
428 | * |
429 | * start end |
430 | * v v |
431 | * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc |
432 | * -------- -------- -------- -------- -------- |
433 | * | | | | | |
434 | * | | | /-------/ | |
435 | * \-------\|/------/ |/---------------/ |
436 | * ||| || |
437 | * |??AAAAAA|AAAAAAAA|AA??????| < shadow |
438 | * (1) (2) (3) |
439 | * |
440 | * First we align the start upwards and the end downwards, so that the |
441 | * shadow of the region aligns with shadow page boundaries. In the |
442 | * example, this gives us the shadow page (2). This is the shadow entirely |
443 | * covered by this allocation. |
444 | * |
445 | * Then we have the tricky bits. We want to know if we can free the |
446 | * partially covered shadow pages - (1) and (3) in the example. For this, |
447 | * we are given the start and end of the free region that contains this |
448 | * allocation. Extending our previous example, we could have: |
449 | * |
450 | * free_region_start free_region_end |
451 | * | start end | |
452 | * v v v v |
453 | * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc |
454 | * -------- -------- -------- -------- -------- |
455 | * | | | | | |
456 | * | | | /-------/ | |
457 | * \-------\|/------/ |/---------------/ |
458 | * ||| || |
459 | * |FFAAAAAA|AAAAAAAA|AAF?????| < shadow |
460 | * (1) (2) (3) |
461 | * |
462 | * Once again, we align the start of the free region up, and the end of |
463 | * the free region down so that the shadow is page aligned. So we can free |
464 | * page (1) - we know no allocation currently uses anything in that page, |
465 | * because all of it is in the vmalloc free region. But we cannot free |
466 | * page (3), because we can't be sure that the rest of it is unused. |
467 | * |
468 | * We only consider pages that contain part of the original region for |
469 | * freeing: we don't try to free other pages from the free region or we'd |
470 | * end up trying to free huge chunks of virtual address space. |
471 | * |
472 | * Concurrency |
473 | * ----------- |
474 | * |
475 | * How do we know that we're not freeing a page that is simultaneously |
476 | * being used for a fresh allocation in kasan_populate_vmalloc(_pte)? |
477 | * |
478 | * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running |
479 | * at the same time. While we run under free_vmap_area_lock, the population |
480 | * code does not. |
481 | * |
482 | * free_vmap_area_lock instead operates to ensure that the larger range |
483 | * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and |
484 | * the per-cpu region-finding algorithm both run under free_vmap_area_lock, |
485 | * no space identified as free will become used while we are running. This |
486 | * means that so long as we are careful with alignment and only free shadow |
487 | * pages entirely covered by the free region, we will not run in to any |
488 | * trouble - any simultaneous allocations will be for disjoint regions. |
489 | */ |
490 | void kasan_release_vmalloc(unsigned long start, unsigned long end, |
491 | unsigned long free_region_start, |
492 | unsigned long free_region_end) |
493 | { |
494 | void *shadow_start, *shadow_end; |
495 | unsigned long region_start, region_end; |
496 | unsigned long size; |
497 | |
498 | if (!kasan_arch_is_ready()) |
499 | return; |
500 | |
501 | region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE); |
502 | region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE); |
503 | |
504 | free_region_start = ALIGN(free_region_start, KASAN_MEMORY_PER_SHADOW_PAGE); |
505 | |
506 | if (start != region_start && |
507 | free_region_start < region_start) |
508 | region_start -= KASAN_MEMORY_PER_SHADOW_PAGE; |
509 | |
510 | free_region_end = ALIGN_DOWN(free_region_end, KASAN_MEMORY_PER_SHADOW_PAGE); |
511 | |
512 | if (end != region_end && |
513 | free_region_end > region_end) |
514 | region_end += KASAN_MEMORY_PER_SHADOW_PAGE; |
515 | |
516 | shadow_start = kasan_mem_to_shadow((void *)region_start); |
517 | shadow_end = kasan_mem_to_shadow((void *)region_end); |
518 | |
519 | if (shadow_end > shadow_start) { |
520 | size = shadow_end - shadow_start; |
521 | if (IS_ENABLED(CONFIG_UML)) { |
522 | __memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start); |
523 | return; |
524 | } |
525 | apply_to_existing_page_range(&init_mm, |
526 | (unsigned long)shadow_start, |
527 | size, kasan_depopulate_vmalloc_pte, |
528 | NULL); |
529 | flush_tlb_kernel_range((unsigned long)shadow_start, |
530 | (unsigned long)shadow_end); |
531 | } |
532 | } |
533 | |
534 | void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, |
535 | kasan_vmalloc_flags_t flags) |
536 | { |
537 | /* |
538 | * Software KASAN modes unpoison both VM_ALLOC and non-VM_ALLOC |
539 | * mappings, so the KASAN_VMALLOC_VM_ALLOC flag is ignored. |
540 | * Software KASAN modes can't optimize zeroing memory by combining it |
541 | * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored. |
542 | */ |
543 | |
544 | if (!kasan_arch_is_ready()) |
545 | return (void *)start; |
546 | |
547 | if (!is_vmalloc_or_module_addr(start)) |
548 | return (void *)start; |
549 | |
550 | /* |
551 | * Don't tag executable memory with the tag-based mode. |
552 | * The kernel doesn't tolerate having the PC register tagged. |
553 | */ |
554 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) && |
555 | !(flags & KASAN_VMALLOC_PROT_NORMAL)) |
556 | return (void *)start; |
557 | |
558 | start = set_tag(start, kasan_random_tag()); |
559 | kasan_unpoison(start, size, false); |
560 | return (void *)start; |
561 | } |
562 | |
563 | /* |
564 | * Poison the shadow for a vmalloc region. Called as part of the |
565 | * freeing process at the time the region is freed. |
566 | */ |
567 | void __kasan_poison_vmalloc(const void *start, unsigned long size) |
568 | { |
569 | if (!kasan_arch_is_ready()) |
570 | return; |
571 | |
572 | if (!is_vmalloc_or_module_addr(start)) |
573 | return; |
574 | |
575 | size = round_up(size, KASAN_GRANULE_SIZE); |
576 | kasan_poison(start, size, KASAN_VMALLOC_INVALID, false); |
577 | } |
578 | |
579 | #else /* CONFIG_KASAN_VMALLOC */ |
580 | |
581 | int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) |
582 | { |
583 | void *ret; |
584 | size_t scaled_size; |
585 | size_t shadow_size; |
586 | unsigned long shadow_start; |
587 | |
588 | shadow_start = (unsigned long)kasan_mem_to_shadow(addr); |
589 | scaled_size = (size + KASAN_GRANULE_SIZE - 1) >> |
590 | KASAN_SHADOW_SCALE_SHIFT; |
591 | shadow_size = round_up(scaled_size, PAGE_SIZE); |
592 | |
593 | if (WARN_ON(!PAGE_ALIGNED(shadow_start))) |
594 | return -EINVAL; |
595 | |
596 | if (IS_ENABLED(CONFIG_UML)) { |
597 | __memset(s: (void *)shadow_start, c: KASAN_SHADOW_INIT, n: shadow_size); |
598 | return 0; |
599 | } |
600 | |
601 | ret = __vmalloc_node_range(size: shadow_size, align: 1, start: shadow_start, |
602 | end: shadow_start + shadow_size, |
603 | GFP_KERNEL, |
604 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, |
605 | caller: __builtin_return_address(0)); |
606 | |
607 | if (ret) { |
608 | struct vm_struct *vm = find_vm_area(addr); |
609 | __memset(s: ret, c: KASAN_SHADOW_INIT, n: shadow_size); |
610 | vm->flags |= VM_KASAN; |
611 | kmemleak_ignore(ptr: ret); |
612 | |
613 | if (vm->flags & VM_DEFER_KMEMLEAK) |
614 | kmemleak_vmalloc(area: vm, size, gfp: gfp_mask); |
615 | |
616 | return 0; |
617 | } |
618 | |
619 | return -ENOMEM; |
620 | } |
621 | |
622 | void kasan_free_module_shadow(const struct vm_struct *vm) |
623 | { |
624 | if (IS_ENABLED(CONFIG_UML)) |
625 | return; |
626 | |
627 | if (vm->flags & VM_KASAN) |
628 | vfree(addr: kasan_mem_to_shadow(vm->addr)); |
629 | } |
630 | |
631 | #endif |
632 | |