1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_KASAN_H |
3 | #define _LINUX_KASAN_H |
4 | |
5 | #include <linux/bug.h> |
6 | #include <linux/kasan-enabled.h> |
7 | #include <linux/kernel.h> |
8 | #include <linux/static_key.h> |
9 | #include <linux/types.h> |
10 | |
11 | struct kmem_cache; |
12 | struct page; |
13 | struct slab; |
14 | struct vm_struct; |
15 | struct task_struct; |
16 | |
17 | #ifdef CONFIG_KASAN |
18 | |
19 | #include <linux/linkage.h> |
20 | #include <asm/kasan.h> |
21 | |
22 | #endif |
23 | |
24 | typedef unsigned int __bitwise kasan_vmalloc_flags_t; |
25 | |
26 | #define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u) |
27 | #define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u) |
28 | #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u) |
29 | #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u) |
30 | |
31 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
32 | |
33 | #include <linux/pgtable.h> |
34 | |
35 | /* Software KASAN implementations use shadow memory. */ |
36 | |
37 | #ifdef CONFIG_KASAN_SW_TAGS |
38 | /* This matches KASAN_TAG_INVALID. */ |
39 | #define KASAN_SHADOW_INIT 0xFE |
40 | #else |
41 | #define KASAN_SHADOW_INIT 0 |
42 | #endif |
43 | |
44 | #ifndef PTE_HWTABLE_PTRS |
45 | #define PTE_HWTABLE_PTRS 0 |
46 | #endif |
47 | |
48 | extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; |
49 | extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]; |
50 | extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD]; |
51 | extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD]; |
52 | extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; |
53 | |
54 | int kasan_populate_early_shadow(const void *shadow_start, |
55 | const void *shadow_end); |
56 | |
57 | static inline void *kasan_mem_to_shadow(const void *addr) |
58 | { |
59 | return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) |
60 | + KASAN_SHADOW_OFFSET; |
61 | } |
62 | |
63 | int kasan_add_zero_shadow(void *start, unsigned long size); |
64 | void kasan_remove_zero_shadow(void *start, unsigned long size); |
65 | |
66 | /* Enable reporting bugs after kasan_disable_current() */ |
67 | extern void kasan_enable_current(void); |
68 | |
69 | /* Disable reporting bugs for current task */ |
70 | extern void kasan_disable_current(void); |
71 | |
72 | #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
73 | |
74 | static inline int kasan_add_zero_shadow(void *start, unsigned long size) |
75 | { |
76 | return 0; |
77 | } |
78 | static inline void kasan_remove_zero_shadow(void *start, |
79 | unsigned long size) |
80 | {} |
81 | |
82 | static inline void kasan_enable_current(void) {} |
83 | static inline void kasan_disable_current(void) {} |
84 | |
85 | #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
86 | |
87 | #ifdef CONFIG_KASAN_HW_TAGS |
88 | |
89 | #else /* CONFIG_KASAN_HW_TAGS */ |
90 | |
91 | #endif /* CONFIG_KASAN_HW_TAGS */ |
92 | |
93 | static inline bool kasan_has_integrated_init(void) |
94 | { |
95 | return kasan_hw_tags_enabled(); |
96 | } |
97 | |
98 | #ifdef CONFIG_KASAN |
99 | |
100 | struct kasan_cache { |
101 | int alloc_meta_offset; |
102 | int free_meta_offset; |
103 | bool is_kmalloc; |
104 | }; |
105 | |
106 | slab_flags_t __kasan_never_merge(void); |
107 | static __always_inline slab_flags_t kasan_never_merge(void) |
108 | { |
109 | if (kasan_enabled()) |
110 | return __kasan_never_merge(); |
111 | return 0; |
112 | } |
113 | |
114 | void __kasan_unpoison_range(const void *addr, size_t size); |
115 | static __always_inline void kasan_unpoison_range(const void *addr, size_t size) |
116 | { |
117 | if (kasan_enabled()) |
118 | __kasan_unpoison_range(addr, size); |
119 | } |
120 | |
121 | void __kasan_poison_pages(struct page *page, unsigned int order, bool init); |
122 | static __always_inline void kasan_poison_pages(struct page *page, |
123 | unsigned int order, bool init) |
124 | { |
125 | if (kasan_enabled()) |
126 | __kasan_poison_pages(page, order, init); |
127 | } |
128 | |
129 | void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init); |
130 | static __always_inline void kasan_unpoison_pages(struct page *page, |
131 | unsigned int order, bool init) |
132 | { |
133 | if (kasan_enabled()) |
134 | __kasan_unpoison_pages(page, order, init); |
135 | } |
136 | |
137 | void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, |
138 | slab_flags_t *flags); |
139 | static __always_inline void kasan_cache_create(struct kmem_cache *cache, |
140 | unsigned int *size, slab_flags_t *flags) |
141 | { |
142 | if (kasan_enabled()) |
143 | __kasan_cache_create(cache, size, flags); |
144 | } |
145 | |
146 | void __kasan_cache_create_kmalloc(struct kmem_cache *cache); |
147 | static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) |
148 | { |
149 | if (kasan_enabled()) |
150 | __kasan_cache_create_kmalloc(cache); |
151 | } |
152 | |
153 | size_t __kasan_metadata_size(struct kmem_cache *cache); |
154 | static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache) |
155 | { |
156 | if (kasan_enabled()) |
157 | return __kasan_metadata_size(cache); |
158 | return 0; |
159 | } |
160 | |
161 | void __kasan_poison_slab(struct slab *slab); |
162 | static __always_inline void kasan_poison_slab(struct slab *slab) |
163 | { |
164 | if (kasan_enabled()) |
165 | __kasan_poison_slab(slab); |
166 | } |
167 | |
168 | void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object); |
169 | static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache, |
170 | void *object) |
171 | { |
172 | if (kasan_enabled()) |
173 | __kasan_unpoison_object_data(cache, object); |
174 | } |
175 | |
176 | void __kasan_poison_object_data(struct kmem_cache *cache, void *object); |
177 | static __always_inline void kasan_poison_object_data(struct kmem_cache *cache, |
178 | void *object) |
179 | { |
180 | if (kasan_enabled()) |
181 | __kasan_poison_object_data(cache, object); |
182 | } |
183 | |
184 | void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, |
185 | const void *object); |
186 | static __always_inline void * __must_check kasan_init_slab_obj( |
187 | struct kmem_cache *cache, const void *object) |
188 | { |
189 | if (kasan_enabled()) |
190 | return __kasan_init_slab_obj(cache, object); |
191 | return (void *)object; |
192 | } |
193 | |
194 | bool __kasan_slab_free(struct kmem_cache *s, void *object, |
195 | unsigned long ip, bool init); |
196 | static __always_inline bool kasan_slab_free(struct kmem_cache *s, |
197 | void *object, bool init) |
198 | { |
199 | if (kasan_enabled()) |
200 | return __kasan_slab_free(s, object, _RET_IP_, init); |
201 | return false; |
202 | } |
203 | |
204 | void __kasan_kfree_large(void *ptr, unsigned long ip); |
205 | static __always_inline void kasan_kfree_large(void *ptr) |
206 | { |
207 | if (kasan_enabled()) |
208 | __kasan_kfree_large(ptr, _RET_IP_); |
209 | } |
210 | |
211 | void __kasan_slab_free_mempool(void *ptr, unsigned long ip); |
212 | static __always_inline void kasan_slab_free_mempool(void *ptr) |
213 | { |
214 | if (kasan_enabled()) |
215 | __kasan_slab_free_mempool(ptr, _RET_IP_); |
216 | } |
217 | |
218 | void * __must_check __kasan_slab_alloc(struct kmem_cache *s, |
219 | void *object, gfp_t flags, bool init); |
220 | static __always_inline void * __must_check kasan_slab_alloc( |
221 | struct kmem_cache *s, void *object, gfp_t flags, bool init) |
222 | { |
223 | if (kasan_enabled()) |
224 | return __kasan_slab_alloc(s, object, flags, init); |
225 | return object; |
226 | } |
227 | |
228 | void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object, |
229 | size_t size, gfp_t flags); |
230 | static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s, |
231 | const void *object, size_t size, gfp_t flags) |
232 | { |
233 | if (kasan_enabled()) |
234 | return __kasan_kmalloc(s, object, size, flags); |
235 | return (void *)object; |
236 | } |
237 | |
238 | void * __must_check __kasan_kmalloc_large(const void *ptr, |
239 | size_t size, gfp_t flags); |
240 | static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr, |
241 | size_t size, gfp_t flags) |
242 | { |
243 | if (kasan_enabled()) |
244 | return __kasan_kmalloc_large(ptr, size, flags); |
245 | return (void *)ptr; |
246 | } |
247 | |
248 | void * __must_check __kasan_krealloc(const void *object, |
249 | size_t new_size, gfp_t flags); |
250 | static __always_inline void * __must_check kasan_krealloc(const void *object, |
251 | size_t new_size, gfp_t flags) |
252 | { |
253 | if (kasan_enabled()) |
254 | return __kasan_krealloc(object, new_size, flags); |
255 | return (void *)object; |
256 | } |
257 | |
258 | /* |
259 | * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for |
260 | * the hardware tag-based mode that doesn't rely on compiler instrumentation. |
261 | */ |
262 | bool __kasan_check_byte(const void *addr, unsigned long ip); |
263 | static __always_inline bool kasan_check_byte(const void *addr) |
264 | { |
265 | if (kasan_enabled()) |
266 | return __kasan_check_byte(addr, _RET_IP_); |
267 | return true; |
268 | } |
269 | |
270 | #else /* CONFIG_KASAN */ |
271 | |
272 | static inline slab_flags_t kasan_never_merge(void) |
273 | { |
274 | return 0; |
275 | } |
276 | static inline void kasan_unpoison_range(const void *address, size_t size) {} |
277 | static inline void kasan_poison_pages(struct page *page, unsigned int order, |
278 | bool init) {} |
279 | static inline void kasan_unpoison_pages(struct page *page, unsigned int order, |
280 | bool init) {} |
281 | static inline void kasan_cache_create(struct kmem_cache *cache, |
282 | unsigned int *size, |
283 | slab_flags_t *flags) {} |
284 | static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {} |
285 | static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } |
286 | static inline void kasan_poison_slab(struct slab *slab) {} |
287 | static inline void kasan_unpoison_object_data(struct kmem_cache *cache, |
288 | void *object) {} |
289 | static inline void kasan_poison_object_data(struct kmem_cache *cache, |
290 | void *object) {} |
291 | static inline void *kasan_init_slab_obj(struct kmem_cache *cache, |
292 | const void *object) |
293 | { |
294 | return (void *)object; |
295 | } |
296 | static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init) |
297 | { |
298 | return false; |
299 | } |
300 | static inline void kasan_kfree_large(void *ptr) {} |
301 | static inline void kasan_slab_free_mempool(void *ptr) {} |
302 | static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, |
303 | gfp_t flags, bool init) |
304 | { |
305 | return object; |
306 | } |
307 | static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, |
308 | size_t size, gfp_t flags) |
309 | { |
310 | return (void *)object; |
311 | } |
312 | static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) |
313 | { |
314 | return (void *)ptr; |
315 | } |
316 | static inline void *kasan_krealloc(const void *object, size_t new_size, |
317 | gfp_t flags) |
318 | { |
319 | return (void *)object; |
320 | } |
321 | static inline bool kasan_check_byte(const void *address) |
322 | { |
323 | return true; |
324 | } |
325 | |
326 | #endif /* CONFIG_KASAN */ |
327 | |
328 | #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) |
329 | void kasan_unpoison_task_stack(struct task_struct *task); |
330 | #else |
331 | static inline void kasan_unpoison_task_stack(struct task_struct *task) {} |
332 | #endif |
333 | |
334 | #ifdef CONFIG_KASAN_GENERIC |
335 | |
336 | void kasan_cache_shrink(struct kmem_cache *cache); |
337 | void kasan_cache_shutdown(struct kmem_cache *cache); |
338 | void kasan_record_aux_stack(void *ptr); |
339 | void kasan_record_aux_stack_noalloc(void *ptr); |
340 | |
341 | #else /* CONFIG_KASAN_GENERIC */ |
342 | |
343 | static inline void kasan_cache_shrink(struct kmem_cache *cache) {} |
344 | static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} |
345 | static inline void kasan_record_aux_stack(void *ptr) {} |
346 | static inline void kasan_record_aux_stack_noalloc(void *ptr) {} |
347 | |
348 | #endif /* CONFIG_KASAN_GENERIC */ |
349 | |
350 | #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) |
351 | |
352 | static inline void *kasan_reset_tag(const void *addr) |
353 | { |
354 | return (void *)arch_kasan_reset_tag(addr); |
355 | } |
356 | |
357 | /** |
358 | * kasan_report - print a report about a bad memory access detected by KASAN |
359 | * @addr: address of the bad access |
360 | * @size: size of the bad access |
361 | * @is_write: whether the bad access is a write or a read |
362 | * @ip: instruction pointer for the accessibility check or the bad access itself |
363 | */ |
364 | bool kasan_report(unsigned long addr, size_t size, |
365 | bool is_write, unsigned long ip); |
366 | |
367 | #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ |
368 | |
369 | static inline void *kasan_reset_tag(const void *addr) |
370 | { |
371 | return (void *)addr; |
372 | } |
373 | |
374 | #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/ |
375 | |
376 | #ifdef CONFIG_KASAN_HW_TAGS |
377 | |
378 | void kasan_report_async(void); |
379 | |
380 | #endif /* CONFIG_KASAN_HW_TAGS */ |
381 | |
382 | #ifdef CONFIG_KASAN_SW_TAGS |
383 | void __init kasan_init_sw_tags(void); |
384 | #else |
385 | static inline void kasan_init_sw_tags(void) { } |
386 | #endif |
387 | |
388 | #ifdef CONFIG_KASAN_HW_TAGS |
389 | void kasan_init_hw_tags_cpu(void); |
390 | void __init kasan_init_hw_tags(void); |
391 | #else |
392 | static inline void kasan_init_hw_tags_cpu(void) { } |
393 | static inline void kasan_init_hw_tags(void) { } |
394 | #endif |
395 | |
396 | #ifdef CONFIG_KASAN_VMALLOC |
397 | |
398 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
399 | |
400 | void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); |
401 | int kasan_populate_vmalloc(unsigned long addr, unsigned long size); |
402 | void kasan_release_vmalloc(unsigned long start, unsigned long end, |
403 | unsigned long free_region_start, |
404 | unsigned long free_region_end); |
405 | |
406 | #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
407 | |
408 | static inline void kasan_populate_early_vm_area_shadow(void *start, |
409 | unsigned long size) |
410 | { } |
411 | static inline int kasan_populate_vmalloc(unsigned long start, |
412 | unsigned long size) |
413 | { |
414 | return 0; |
415 | } |
416 | static inline void kasan_release_vmalloc(unsigned long start, |
417 | unsigned long end, |
418 | unsigned long free_region_start, |
419 | unsigned long free_region_end) { } |
420 | |
421 | #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
422 | |
423 | void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, |
424 | kasan_vmalloc_flags_t flags); |
425 | static __always_inline void *kasan_unpoison_vmalloc(const void *start, |
426 | unsigned long size, |
427 | kasan_vmalloc_flags_t flags) |
428 | { |
429 | if (kasan_enabled()) |
430 | return __kasan_unpoison_vmalloc(start, size, flags); |
431 | return (void *)start; |
432 | } |
433 | |
434 | void __kasan_poison_vmalloc(const void *start, unsigned long size); |
435 | static __always_inline void kasan_poison_vmalloc(const void *start, |
436 | unsigned long size) |
437 | { |
438 | if (kasan_enabled()) |
439 | __kasan_poison_vmalloc(start, size); |
440 | } |
441 | |
442 | #else /* CONFIG_KASAN_VMALLOC */ |
443 | |
444 | static inline void kasan_populate_early_vm_area_shadow(void *start, |
445 | unsigned long size) { } |
446 | static inline int kasan_populate_vmalloc(unsigned long start, |
447 | unsigned long size) |
448 | { |
449 | return 0; |
450 | } |
451 | static inline void kasan_release_vmalloc(unsigned long start, |
452 | unsigned long end, |
453 | unsigned long free_region_start, |
454 | unsigned long free_region_end) { } |
455 | |
456 | static inline void *kasan_unpoison_vmalloc(const void *start, |
457 | unsigned long size, |
458 | kasan_vmalloc_flags_t flags) |
459 | { |
460 | return (void *)start; |
461 | } |
462 | static inline void kasan_poison_vmalloc(const void *start, unsigned long size) |
463 | { } |
464 | |
465 | #endif /* CONFIG_KASAN_VMALLOC */ |
466 | |
467 | #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ |
468 | !defined(CONFIG_KASAN_VMALLOC) |
469 | |
470 | /* |
471 | * These functions allocate and free shadow memory for kernel modules. |
472 | * They are only required when KASAN_VMALLOC is not supported, as otherwise |
473 | * shadow memory is allocated by the generic vmalloc handlers. |
474 | */ |
475 | int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask); |
476 | void kasan_free_module_shadow(const struct vm_struct *vm); |
477 | |
478 | #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ |
479 | |
480 | static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; } |
481 | static inline void kasan_free_module_shadow(const struct vm_struct *vm) {} |
482 | |
483 | #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ |
484 | |
485 | #ifdef CONFIG_KASAN_INLINE |
486 | void kasan_non_canonical_hook(unsigned long addr); |
487 | #else /* CONFIG_KASAN_INLINE */ |
488 | static inline void kasan_non_canonical_hook(unsigned long addr) { } |
489 | #endif /* CONFIG_KASAN_INLINE */ |
490 | |
491 | #endif /* LINUX_KASAN_H */ |
492 | |