1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * This file contains core generic KASAN code. |
4 | * |
5 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. |
6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
7 | * |
8 | * Some code borrowed from https://github.com/xairy/kasan-prototype by |
9 | * Andrey Konovalov <andreyknvl@gmail.com> |
10 | */ |
11 | |
12 | #include <linux/export.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/init.h> |
15 | #include <linux/kasan.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/kfence.h> |
18 | #include <linux/kmemleak.h> |
19 | #include <linux/linkage.h> |
20 | #include <linux/memblock.h> |
21 | #include <linux/memory.h> |
22 | #include <linux/mm.h> |
23 | #include <linux/module.h> |
24 | #include <linux/printk.h> |
25 | #include <linux/sched.h> |
26 | #include <linux/sched/task_stack.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/stacktrace.h> |
29 | #include <linux/string.h> |
30 | #include <linux/types.h> |
31 | #include <linux/vmalloc.h> |
32 | #include <linux/bug.h> |
33 | |
34 | #include "kasan.h" |
35 | #include "../slab.h" |
36 | |
37 | /* |
38 | * All functions below always inlined so compiler could |
39 | * perform better optimizations in each of __asan_loadX/__assn_storeX |
40 | * depending on memory access size X. |
41 | */ |
42 | |
43 | static __always_inline bool memory_is_poisoned_1(const void *addr) |
44 | { |
45 | s8 shadow_value = *(s8 *)kasan_mem_to_shadow(addr); |
46 | |
47 | if (unlikely(shadow_value)) { |
48 | s8 last_accessible_byte = (unsigned long)addr & KASAN_GRANULE_MASK; |
49 | return unlikely(last_accessible_byte >= shadow_value); |
50 | } |
51 | |
52 | return false; |
53 | } |
54 | |
55 | static __always_inline bool memory_is_poisoned_2_4_8(const void *addr, |
56 | unsigned long size) |
57 | { |
58 | u8 *shadow_addr = (u8 *)kasan_mem_to_shadow(addr); |
59 | |
60 | /* |
61 | * Access crosses 8(shadow size)-byte boundary. Such access maps |
62 | * into 2 shadow bytes, so we need to check them both. |
63 | */ |
64 | if (unlikely((((unsigned long)addr + size - 1) & KASAN_GRANULE_MASK) < size - 1)) |
65 | return *shadow_addr || memory_is_poisoned_1(addr: addr + size - 1); |
66 | |
67 | return memory_is_poisoned_1(addr: addr + size - 1); |
68 | } |
69 | |
70 | static __always_inline bool memory_is_poisoned_16(const void *addr) |
71 | { |
72 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow(addr); |
73 | |
74 | /* Unaligned 16-bytes access maps into 3 shadow bytes. */ |
75 | if (unlikely(!IS_ALIGNED((unsigned long)addr, KASAN_GRANULE_SIZE))) |
76 | return *shadow_addr || memory_is_poisoned_1(addr: addr + 15); |
77 | |
78 | return *shadow_addr; |
79 | } |
80 | |
81 | static __always_inline unsigned long bytes_is_nonzero(const u8 *start, |
82 | size_t size) |
83 | { |
84 | while (size) { |
85 | if (unlikely(*start)) |
86 | return (unsigned long)start; |
87 | start++; |
88 | size--; |
89 | } |
90 | |
91 | return 0; |
92 | } |
93 | |
94 | static __always_inline unsigned long memory_is_nonzero(const void *start, |
95 | const void *end) |
96 | { |
97 | unsigned int words; |
98 | unsigned long ret; |
99 | unsigned int prefix = (unsigned long)start % 8; |
100 | |
101 | if (end - start <= 16) |
102 | return bytes_is_nonzero(start, size: end - start); |
103 | |
104 | if (prefix) { |
105 | prefix = 8 - prefix; |
106 | ret = bytes_is_nonzero(start, size: prefix); |
107 | if (unlikely(ret)) |
108 | return ret; |
109 | start += prefix; |
110 | } |
111 | |
112 | words = (end - start) / 8; |
113 | while (words) { |
114 | if (unlikely(*(u64 *)start)) |
115 | return bytes_is_nonzero(start, size: 8); |
116 | start += 8; |
117 | words--; |
118 | } |
119 | |
120 | return bytes_is_nonzero(start, size: (end - start) % 8); |
121 | } |
122 | |
123 | static __always_inline bool memory_is_poisoned_n(const void *addr, size_t size) |
124 | { |
125 | unsigned long ret; |
126 | |
127 | ret = memory_is_nonzero(start: kasan_mem_to_shadow(addr), |
128 | end: kasan_mem_to_shadow(addr + size - 1) + 1); |
129 | |
130 | if (unlikely(ret)) { |
131 | const void *last_byte = addr + size - 1; |
132 | s8 *last_shadow = (s8 *)kasan_mem_to_shadow(last_byte); |
133 | s8 last_accessible_byte = (unsigned long)last_byte & KASAN_GRANULE_MASK; |
134 | |
135 | if (unlikely(ret != (unsigned long)last_shadow || |
136 | last_accessible_byte >= *last_shadow)) |
137 | return true; |
138 | } |
139 | return false; |
140 | } |
141 | |
142 | static __always_inline bool memory_is_poisoned(const void *addr, size_t size) |
143 | { |
144 | if (__builtin_constant_p(size)) { |
145 | switch (size) { |
146 | case 1: |
147 | return memory_is_poisoned_1(addr); |
148 | case 2: |
149 | case 4: |
150 | case 8: |
151 | return memory_is_poisoned_2_4_8(addr, size); |
152 | case 16: |
153 | return memory_is_poisoned_16(addr); |
154 | default: |
155 | BUILD_BUG(); |
156 | } |
157 | } |
158 | |
159 | return memory_is_poisoned_n(addr, size); |
160 | } |
161 | |
162 | static __always_inline bool check_region_inline(const void *addr, |
163 | size_t size, bool write, |
164 | unsigned long ret_ip) |
165 | { |
166 | if (!kasan_arch_is_ready()) |
167 | return true; |
168 | |
169 | if (unlikely(size == 0)) |
170 | return true; |
171 | |
172 | if (unlikely(addr + size < addr)) |
173 | return !kasan_report(addr, size, is_write: write, ip: ret_ip); |
174 | |
175 | if (unlikely(!addr_has_metadata(addr))) |
176 | return !kasan_report(addr, size, is_write: write, ip: ret_ip); |
177 | |
178 | if (likely(!memory_is_poisoned(addr, size))) |
179 | return true; |
180 | |
181 | return !kasan_report(addr, size, is_write: write, ip: ret_ip); |
182 | } |
183 | |
184 | bool kasan_check_range(const void *addr, size_t size, bool write, |
185 | unsigned long ret_ip) |
186 | { |
187 | return check_region_inline(addr, size, write, ret_ip); |
188 | } |
189 | |
190 | bool kasan_byte_accessible(const void *addr) |
191 | { |
192 | s8 shadow_byte; |
193 | |
194 | if (!kasan_arch_is_ready()) |
195 | return true; |
196 | |
197 | shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr)); |
198 | |
199 | return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE; |
200 | } |
201 | |
202 | void kasan_cache_shrink(struct kmem_cache *cache) |
203 | { |
204 | kasan_quarantine_remove_cache(cache); |
205 | } |
206 | |
207 | void kasan_cache_shutdown(struct kmem_cache *cache) |
208 | { |
209 | if (!__kmem_cache_empty(cache)) |
210 | kasan_quarantine_remove_cache(cache); |
211 | } |
212 | |
213 | static void register_global(struct kasan_global *global) |
214 | { |
215 | size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE); |
216 | |
217 | kasan_unpoison(addr: global->beg, size: global->size, init: false); |
218 | |
219 | kasan_poison(addr: global->beg + aligned_size, |
220 | size: global->size_with_redzone - aligned_size, |
221 | value: KASAN_GLOBAL_REDZONE, init: false); |
222 | } |
223 | |
224 | void __asan_register_globals(void *ptr, ssize_t size) |
225 | { |
226 | int i; |
227 | struct kasan_global *globals = ptr; |
228 | |
229 | for (i = 0; i < size; i++) |
230 | register_global(global: &globals[i]); |
231 | } |
232 | EXPORT_SYMBOL(__asan_register_globals); |
233 | |
234 | void __asan_unregister_globals(void *ptr, ssize_t size) |
235 | { |
236 | } |
237 | EXPORT_SYMBOL(__asan_unregister_globals); |
238 | |
239 | #define DEFINE_ASAN_LOAD_STORE(size) \ |
240 | void __asan_load##size(void *addr) \ |
241 | { \ |
242 | check_region_inline(addr, size, false, _RET_IP_); \ |
243 | } \ |
244 | EXPORT_SYMBOL(__asan_load##size); \ |
245 | __alias(__asan_load##size) \ |
246 | void __asan_load##size##_noabort(void *); \ |
247 | EXPORT_SYMBOL(__asan_load##size##_noabort); \ |
248 | void __asan_store##size(void *addr) \ |
249 | { \ |
250 | check_region_inline(addr, size, true, _RET_IP_); \ |
251 | } \ |
252 | EXPORT_SYMBOL(__asan_store##size); \ |
253 | __alias(__asan_store##size) \ |
254 | void __asan_store##size##_noabort(void *); \ |
255 | EXPORT_SYMBOL(__asan_store##size##_noabort) |
256 | |
257 | DEFINE_ASAN_LOAD_STORE(1); |
258 | DEFINE_ASAN_LOAD_STORE(2); |
259 | DEFINE_ASAN_LOAD_STORE(4); |
260 | DEFINE_ASAN_LOAD_STORE(8); |
261 | DEFINE_ASAN_LOAD_STORE(16); |
262 | |
263 | void __asan_loadN(void *addr, ssize_t size) |
264 | { |
265 | kasan_check_range(addr, size, write: false, _RET_IP_); |
266 | } |
267 | EXPORT_SYMBOL(__asan_loadN); |
268 | |
269 | __alias(__asan_loadN) |
270 | void __asan_loadN_noabort(void *, ssize_t); |
271 | EXPORT_SYMBOL(__asan_loadN_noabort); |
272 | |
273 | void __asan_storeN(void *addr, ssize_t size) |
274 | { |
275 | kasan_check_range(addr, size, write: true, _RET_IP_); |
276 | } |
277 | EXPORT_SYMBOL(__asan_storeN); |
278 | |
279 | __alias(__asan_storeN) |
280 | void __asan_storeN_noabort(void *, ssize_t); |
281 | EXPORT_SYMBOL(__asan_storeN_noabort); |
282 | |
283 | /* to shut up compiler complaints */ |
284 | void __asan_handle_no_return(void) {} |
285 | EXPORT_SYMBOL(__asan_handle_no_return); |
286 | |
287 | /* Emitted by compiler to poison alloca()ed objects. */ |
288 | void __asan_alloca_poison(void *addr, ssize_t size) |
289 | { |
290 | size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE); |
291 | size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) - |
292 | rounded_up_size; |
293 | size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE); |
294 | |
295 | const void *left_redzone = (const void *)(addr - |
296 | KASAN_ALLOCA_REDZONE_SIZE); |
297 | const void *right_redzone = (const void *)(addr + rounded_up_size); |
298 | |
299 | WARN_ON(!IS_ALIGNED((unsigned long)addr, KASAN_ALLOCA_REDZONE_SIZE)); |
300 | |
301 | kasan_unpoison(addr: (const void *)(addr + rounded_down_size), |
302 | size: size - rounded_down_size, init: false); |
303 | kasan_poison(addr: left_redzone, size: KASAN_ALLOCA_REDZONE_SIZE, |
304 | value: KASAN_ALLOCA_LEFT, init: false); |
305 | kasan_poison(addr: right_redzone, size: padding_size + KASAN_ALLOCA_REDZONE_SIZE, |
306 | value: KASAN_ALLOCA_RIGHT, init: false); |
307 | } |
308 | EXPORT_SYMBOL(__asan_alloca_poison); |
309 | |
310 | /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */ |
311 | void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom) |
312 | { |
313 | if (unlikely(!stack_top || stack_top > (void *)stack_bottom)) |
314 | return; |
315 | |
316 | kasan_unpoison(addr: stack_top, size: (void *)stack_bottom - stack_top, init: false); |
317 | } |
318 | EXPORT_SYMBOL(__asan_allocas_unpoison); |
319 | |
320 | /* Emitted by the compiler to [un]poison local variables. */ |
321 | #define DEFINE_ASAN_SET_SHADOW(byte) \ |
322 | void __asan_set_shadow_##byte(const void *addr, ssize_t size) \ |
323 | { \ |
324 | __memset((void *)addr, 0x##byte, size); \ |
325 | } \ |
326 | EXPORT_SYMBOL(__asan_set_shadow_##byte) |
327 | |
328 | DEFINE_ASAN_SET_SHADOW(00); |
329 | DEFINE_ASAN_SET_SHADOW(f1); |
330 | DEFINE_ASAN_SET_SHADOW(f2); |
331 | DEFINE_ASAN_SET_SHADOW(f3); |
332 | DEFINE_ASAN_SET_SHADOW(f5); |
333 | DEFINE_ASAN_SET_SHADOW(f8); |
334 | |
335 | /* Only allow cache merging when no per-object metadata is present. */ |
336 | slab_flags_t kasan_never_merge(void) |
337 | { |
338 | if (!kasan_requires_meta()) |
339 | return 0; |
340 | return SLAB_KASAN; |
341 | } |
342 | |
343 | /* |
344 | * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. |
345 | * For larger allocations larger redzones are used. |
346 | */ |
347 | static inline unsigned int optimal_redzone(unsigned int object_size) |
348 | { |
349 | return |
350 | object_size <= 64 - 16 ? 16 : |
351 | object_size <= 128 - 32 ? 32 : |
352 | object_size <= 512 - 64 ? 64 : |
353 | object_size <= 4096 - 128 ? 128 : |
354 | object_size <= (1 << 14) - 256 ? 256 : |
355 | object_size <= (1 << 15) - 512 ? 512 : |
356 | object_size <= (1 << 16) - 1024 ? 1024 : 2048; |
357 | } |
358 | |
359 | void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, |
360 | slab_flags_t *flags) |
361 | { |
362 | unsigned int ok_size; |
363 | unsigned int optimal_size; |
364 | |
365 | if (!kasan_requires_meta()) |
366 | return; |
367 | |
368 | /* |
369 | * SLAB_KASAN is used to mark caches that are sanitized by KASAN |
370 | * and that thus have per-object metadata. |
371 | * Currently this flag is used in two places: |
372 | * 1. In slab_ksize() to account for per-object metadata when |
373 | * calculating the size of the accessible memory within the object. |
374 | * 2. In slab_common.c via kasan_never_merge() to prevent merging of |
375 | * caches with per-object metadata. |
376 | */ |
377 | *flags |= SLAB_KASAN; |
378 | |
379 | ok_size = *size; |
380 | |
381 | /* Add alloc meta into redzone. */ |
382 | cache->kasan_info.alloc_meta_offset = *size; |
383 | *size += sizeof(struct kasan_alloc_meta); |
384 | |
385 | /* |
386 | * If alloc meta doesn't fit, don't add it. |
387 | * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal |
388 | * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for |
389 | * larger sizes. |
390 | */ |
391 | if (*size > KMALLOC_MAX_SIZE) { |
392 | cache->kasan_info.alloc_meta_offset = 0; |
393 | *size = ok_size; |
394 | /* Continue, since free meta might still fit. */ |
395 | } |
396 | |
397 | /* |
398 | * Add free meta into redzone when it's not possible to store |
399 | * it in the object. This is the case when: |
400 | * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can |
401 | * be touched after it was freed, or |
402 | * 2. Object has a constructor, which means it's expected to |
403 | * retain its content until the next allocation, or |
404 | * 3. Object is too small. |
405 | * Otherwise cache->kasan_info.free_meta_offset = 0 is implied. |
406 | */ |
407 | if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor || |
408 | cache->object_size < sizeof(struct kasan_free_meta)) { |
409 | ok_size = *size; |
410 | |
411 | cache->kasan_info.free_meta_offset = *size; |
412 | *size += sizeof(struct kasan_free_meta); |
413 | |
414 | /* If free meta doesn't fit, don't add it. */ |
415 | if (*size > KMALLOC_MAX_SIZE) { |
416 | cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; |
417 | *size = ok_size; |
418 | } |
419 | } |
420 | |
421 | /* Calculate size with optimal redzone. */ |
422 | optimal_size = cache->object_size + optimal_redzone(object_size: cache->object_size); |
423 | /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */ |
424 | if (optimal_size > KMALLOC_MAX_SIZE) |
425 | optimal_size = KMALLOC_MAX_SIZE; |
426 | /* Use optimal size if the size with added metas is not large enough. */ |
427 | if (*size < optimal_size) |
428 | *size = optimal_size; |
429 | } |
430 | |
431 | struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, |
432 | const void *object) |
433 | { |
434 | if (!cache->kasan_info.alloc_meta_offset) |
435 | return NULL; |
436 | return (void *)object + cache->kasan_info.alloc_meta_offset; |
437 | } |
438 | |
439 | struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, |
440 | const void *object) |
441 | { |
442 | BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); |
443 | if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META) |
444 | return NULL; |
445 | return (void *)object + cache->kasan_info.free_meta_offset; |
446 | } |
447 | |
448 | void kasan_init_object_meta(struct kmem_cache *cache, const void *object) |
449 | { |
450 | struct kasan_alloc_meta *alloc_meta; |
451 | |
452 | alloc_meta = kasan_get_alloc_meta(cache, object); |
453 | if (alloc_meta) |
454 | __memset(alloc_meta, 0, sizeof(*alloc_meta)); |
455 | } |
456 | |
457 | size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) |
458 | { |
459 | struct kasan_cache *info = &cache->kasan_info; |
460 | |
461 | if (!kasan_requires_meta()) |
462 | return 0; |
463 | |
464 | if (in_object) |
465 | return (info->free_meta_offset ? |
466 | 0 : sizeof(struct kasan_free_meta)); |
467 | else |
468 | return (info->alloc_meta_offset ? |
469 | sizeof(struct kasan_alloc_meta) : 0) + |
470 | ((info->free_meta_offset && |
471 | info->free_meta_offset != KASAN_NO_FREE_META) ? |
472 | sizeof(struct kasan_free_meta) : 0); |
473 | } |
474 | |
475 | static void __kasan_record_aux_stack(void *addr, bool can_alloc) |
476 | { |
477 | struct slab *slab = kasan_addr_to_slab(addr); |
478 | struct kmem_cache *cache; |
479 | struct kasan_alloc_meta *alloc_meta; |
480 | void *object; |
481 | |
482 | if (is_kfence_address(addr) || !slab) |
483 | return; |
484 | |
485 | cache = slab->slab_cache; |
486 | object = nearest_obj(cache, slab, x: addr); |
487 | alloc_meta = kasan_get_alloc_meta(cache, object); |
488 | if (!alloc_meta) |
489 | return; |
490 | |
491 | alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; |
492 | alloc_meta->aux_stack[0] = kasan_save_stack(flags: 0, can_alloc); |
493 | } |
494 | |
495 | void kasan_record_aux_stack(void *addr) |
496 | { |
497 | return __kasan_record_aux_stack(addr, can_alloc: true); |
498 | } |
499 | |
500 | void kasan_record_aux_stack_noalloc(void *addr) |
501 | { |
502 | return __kasan_record_aux_stack(addr, can_alloc: false); |
503 | } |
504 | |
505 | void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) |
506 | { |
507 | struct kasan_alloc_meta *alloc_meta; |
508 | |
509 | alloc_meta = kasan_get_alloc_meta(cache, object); |
510 | if (alloc_meta) |
511 | kasan_set_track(track: &alloc_meta->alloc_track, flags); |
512 | } |
513 | |
514 | void kasan_save_free_info(struct kmem_cache *cache, void *object) |
515 | { |
516 | struct kasan_free_meta *free_meta; |
517 | |
518 | free_meta = kasan_get_free_meta(cache, object); |
519 | if (!free_meta) |
520 | return; |
521 | |
522 | kasan_set_track(track: &free_meta->free_track, flags: 0); |
523 | /* The object was freed and has free track set. */ |
524 | *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREETRACK; |
525 | } |
526 | |