| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * This file contains core generic KASAN code. |
| 4 | * |
| 5 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. |
| 6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
| 7 | * |
| 8 | * Some code borrowed from https://github.com/xairy/kasan-prototype by |
| 9 | * Andrey Konovalov <andreyknvl@gmail.com> |
| 10 | */ |
| 11 | |
| 12 | #include <linux/export.h> |
| 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/init.h> |
| 15 | #include <linux/kasan.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/kfence.h> |
| 18 | #include <linux/kmemleak.h> |
| 19 | #include <linux/linkage.h> |
| 20 | #include <linux/memblock.h> |
| 21 | #include <linux/memory.h> |
| 22 | #include <linux/mm.h> |
| 23 | #include <linux/module.h> |
| 24 | #include <linux/printk.h> |
| 25 | #include <linux/sched.h> |
| 26 | #include <linux/sched/task_stack.h> |
| 27 | #include <linux/slab.h> |
| 28 | #include <linux/spinlock.h> |
| 29 | #include <linux/stackdepot.h> |
| 30 | #include <linux/stacktrace.h> |
| 31 | #include <linux/string.h> |
| 32 | #include <linux/types.h> |
| 33 | #include <linux/vmalloc.h> |
| 34 | #include <linux/bug.h> |
| 35 | |
| 36 | #include "kasan.h" |
| 37 | #include "../slab.h" |
| 38 | |
| 39 | /* |
| 40 | * Initialize Generic KASAN and enable runtime checks. |
| 41 | * This should be called from arch kasan_init() once shadow memory is ready. |
| 42 | */ |
| 43 | void __init kasan_init_generic(void) |
| 44 | { |
| 45 | kasan_enable(); |
| 46 | |
| 47 | pr_info("KernelAddressSanitizer initialized (generic)\n" ); |
| 48 | } |
| 49 | |
| 50 | /* |
| 51 | * All functions below always inlined so compiler could |
| 52 | * perform better optimizations in each of __asan_loadX/__assn_storeX |
| 53 | * depending on memory access size X. |
| 54 | */ |
| 55 | |
| 56 | static __always_inline bool memory_is_poisoned_1(const void *addr) |
| 57 | { |
| 58 | s8 shadow_value = *(s8 *)kasan_mem_to_shadow(addr); |
| 59 | |
| 60 | if (unlikely(shadow_value)) { |
| 61 | s8 last_accessible_byte = (unsigned long)addr & KASAN_GRANULE_MASK; |
| 62 | return unlikely(last_accessible_byte >= shadow_value); |
| 63 | } |
| 64 | |
| 65 | return false; |
| 66 | } |
| 67 | |
| 68 | static __always_inline bool memory_is_poisoned_2_4_8(const void *addr, |
| 69 | unsigned long size) |
| 70 | { |
| 71 | u8 *shadow_addr = (u8 *)kasan_mem_to_shadow(addr); |
| 72 | |
| 73 | /* |
| 74 | * Access crosses 8(shadow size)-byte boundary. Such access maps |
| 75 | * into 2 shadow bytes, so we need to check them both. |
| 76 | */ |
| 77 | if (unlikely((((unsigned long)addr + size - 1) & KASAN_GRANULE_MASK) < size - 1)) |
| 78 | return *shadow_addr || memory_is_poisoned_1(addr: addr + size - 1); |
| 79 | |
| 80 | return memory_is_poisoned_1(addr: addr + size - 1); |
| 81 | } |
| 82 | |
| 83 | static __always_inline bool memory_is_poisoned_16(const void *addr) |
| 84 | { |
| 85 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow(addr); |
| 86 | |
| 87 | /* Unaligned 16-bytes access maps into 3 shadow bytes. */ |
| 88 | if (unlikely(!IS_ALIGNED((unsigned long)addr, KASAN_GRANULE_SIZE))) |
| 89 | return *shadow_addr || memory_is_poisoned_1(addr: addr + 15); |
| 90 | |
| 91 | return *shadow_addr; |
| 92 | } |
| 93 | |
| 94 | static __always_inline unsigned long bytes_is_nonzero(const u8 *start, |
| 95 | size_t size) |
| 96 | { |
| 97 | while (size) { |
| 98 | if (unlikely(*start)) |
| 99 | return (unsigned long)start; |
| 100 | start++; |
| 101 | size--; |
| 102 | } |
| 103 | |
| 104 | return 0; |
| 105 | } |
| 106 | |
| 107 | static __always_inline unsigned long memory_is_nonzero(const void *start, |
| 108 | const void *end) |
| 109 | { |
| 110 | unsigned int words; |
| 111 | unsigned long ret; |
| 112 | unsigned int prefix = (unsigned long)start % 8; |
| 113 | |
| 114 | if (end - start <= 16) |
| 115 | return bytes_is_nonzero(start, size: end - start); |
| 116 | |
| 117 | if (prefix) { |
| 118 | prefix = 8 - prefix; |
| 119 | ret = bytes_is_nonzero(start, size: prefix); |
| 120 | if (unlikely(ret)) |
| 121 | return ret; |
| 122 | start += prefix; |
| 123 | } |
| 124 | |
| 125 | words = (end - start) / 8; |
| 126 | while (words) { |
| 127 | if (unlikely(*(u64 *)start)) |
| 128 | return bytes_is_nonzero(start, size: 8); |
| 129 | start += 8; |
| 130 | words--; |
| 131 | } |
| 132 | |
| 133 | return bytes_is_nonzero(start, size: (end - start) % 8); |
| 134 | } |
| 135 | |
| 136 | static __always_inline bool memory_is_poisoned_n(const void *addr, size_t size) |
| 137 | { |
| 138 | unsigned long ret; |
| 139 | |
| 140 | ret = memory_is_nonzero(start: kasan_mem_to_shadow(addr), |
| 141 | end: kasan_mem_to_shadow(addr: addr + size - 1) + 1); |
| 142 | |
| 143 | if (unlikely(ret)) { |
| 144 | const void *last_byte = addr + size - 1; |
| 145 | s8 *last_shadow = (s8 *)kasan_mem_to_shadow(addr: last_byte); |
| 146 | s8 last_accessible_byte = (unsigned long)last_byte & KASAN_GRANULE_MASK; |
| 147 | |
| 148 | if (unlikely(ret != (unsigned long)last_shadow || |
| 149 | last_accessible_byte >= *last_shadow)) |
| 150 | return true; |
| 151 | } |
| 152 | return false; |
| 153 | } |
| 154 | |
| 155 | static __always_inline bool memory_is_poisoned(const void *addr, size_t size) |
| 156 | { |
| 157 | if (__builtin_constant_p(size)) { |
| 158 | switch (size) { |
| 159 | case 1: |
| 160 | return memory_is_poisoned_1(addr); |
| 161 | case 2: |
| 162 | case 4: |
| 163 | case 8: |
| 164 | return memory_is_poisoned_2_4_8(addr, size); |
| 165 | case 16: |
| 166 | return memory_is_poisoned_16(addr); |
| 167 | default: |
| 168 | BUILD_BUG(); |
| 169 | } |
| 170 | } |
| 171 | |
| 172 | return memory_is_poisoned_n(addr, size); |
| 173 | } |
| 174 | |
| 175 | static __always_inline bool check_region_inline(const void *addr, |
| 176 | size_t size, bool write, |
| 177 | unsigned long ret_ip) |
| 178 | { |
| 179 | if (!kasan_enabled()) |
| 180 | return true; |
| 181 | |
| 182 | if (unlikely(size == 0)) |
| 183 | return true; |
| 184 | |
| 185 | if (unlikely(addr + size < addr)) |
| 186 | return !kasan_report(addr, size, is_write: write, ip: ret_ip); |
| 187 | |
| 188 | if (unlikely(!addr_has_metadata(addr))) |
| 189 | return !kasan_report(addr, size, is_write: write, ip: ret_ip); |
| 190 | |
| 191 | if (likely(!memory_is_poisoned(addr, size))) |
| 192 | return true; |
| 193 | |
| 194 | return !kasan_report(addr, size, is_write: write, ip: ret_ip); |
| 195 | } |
| 196 | |
| 197 | bool kasan_check_range(const void *addr, size_t size, bool write, |
| 198 | unsigned long ret_ip) |
| 199 | { |
| 200 | return check_region_inline(addr, size, write, ret_ip); |
| 201 | } |
| 202 | |
| 203 | bool kasan_byte_accessible(const void *addr) |
| 204 | { |
| 205 | s8 shadow_byte; |
| 206 | |
| 207 | if (!kasan_enabled()) |
| 208 | return true; |
| 209 | |
| 210 | shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr)); |
| 211 | |
| 212 | return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE; |
| 213 | } |
| 214 | |
| 215 | void kasan_cache_shrink(struct kmem_cache *cache) |
| 216 | { |
| 217 | kasan_quarantine_remove_cache(cache); |
| 218 | } |
| 219 | |
| 220 | void kasan_cache_shutdown(struct kmem_cache *cache) |
| 221 | { |
| 222 | if (!__kmem_cache_empty(cache)) |
| 223 | kasan_quarantine_remove_cache(cache); |
| 224 | } |
| 225 | |
| 226 | static void register_global(struct kasan_global *global) |
| 227 | { |
| 228 | size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE); |
| 229 | |
| 230 | kasan_unpoison(addr: global->beg, size: global->size, init: false); |
| 231 | |
| 232 | kasan_poison(addr: global->beg + aligned_size, |
| 233 | size: global->size_with_redzone - aligned_size, |
| 234 | KASAN_GLOBAL_REDZONE, init: false); |
| 235 | } |
| 236 | |
| 237 | void __asan_register_globals(void *ptr, ssize_t size) |
| 238 | { |
| 239 | int i; |
| 240 | struct kasan_global *globals = ptr; |
| 241 | |
| 242 | for (i = 0; i < size; i++) |
| 243 | register_global(global: &globals[i]); |
| 244 | } |
| 245 | EXPORT_SYMBOL(__asan_register_globals); |
| 246 | |
| 247 | void __asan_unregister_globals(void *ptr, ssize_t size) |
| 248 | { |
| 249 | } |
| 250 | EXPORT_SYMBOL(__asan_unregister_globals); |
| 251 | |
| 252 | #define DEFINE_ASAN_LOAD_STORE(size) \ |
| 253 | void __asan_load##size(void *addr) \ |
| 254 | { \ |
| 255 | check_region_inline(addr, size, false, _RET_IP_); \ |
| 256 | } \ |
| 257 | EXPORT_SYMBOL(__asan_load##size); \ |
| 258 | __alias(__asan_load##size) \ |
| 259 | void __asan_load##size##_noabort(void *); \ |
| 260 | EXPORT_SYMBOL(__asan_load##size##_noabort); \ |
| 261 | void __asan_store##size(void *addr) \ |
| 262 | { \ |
| 263 | check_region_inline(addr, size, true, _RET_IP_); \ |
| 264 | } \ |
| 265 | EXPORT_SYMBOL(__asan_store##size); \ |
| 266 | __alias(__asan_store##size) \ |
| 267 | void __asan_store##size##_noabort(void *); \ |
| 268 | EXPORT_SYMBOL(__asan_store##size##_noabort) |
| 269 | |
| 270 | DEFINE_ASAN_LOAD_STORE(1); |
| 271 | DEFINE_ASAN_LOAD_STORE(2); |
| 272 | DEFINE_ASAN_LOAD_STORE(4); |
| 273 | DEFINE_ASAN_LOAD_STORE(8); |
| 274 | DEFINE_ASAN_LOAD_STORE(16); |
| 275 | |
| 276 | void __asan_loadN(void *addr, ssize_t size) |
| 277 | { |
| 278 | kasan_check_range(addr, size, write: false, _RET_IP_); |
| 279 | } |
| 280 | EXPORT_SYMBOL(__asan_loadN); |
| 281 | |
| 282 | __alias(__asan_loadN) |
| 283 | void __asan_loadN_noabort(void *, ssize_t); |
| 284 | EXPORT_SYMBOL(__asan_loadN_noabort); |
| 285 | |
| 286 | void __asan_storeN(void *addr, ssize_t size) |
| 287 | { |
| 288 | kasan_check_range(addr, size, write: true, _RET_IP_); |
| 289 | } |
| 290 | EXPORT_SYMBOL(__asan_storeN); |
| 291 | |
| 292 | __alias(__asan_storeN) |
| 293 | void __asan_storeN_noabort(void *, ssize_t); |
| 294 | EXPORT_SYMBOL(__asan_storeN_noabort); |
| 295 | |
| 296 | /* to shut up compiler complaints */ |
| 297 | void __asan_handle_no_return(void) {} |
| 298 | EXPORT_SYMBOL(__asan_handle_no_return); |
| 299 | |
| 300 | /* Emitted by compiler to poison alloca()ed objects. */ |
| 301 | void __asan_alloca_poison(void *addr, ssize_t size) |
| 302 | { |
| 303 | size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE); |
| 304 | size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) - |
| 305 | rounded_up_size; |
| 306 | size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE); |
| 307 | |
| 308 | const void *left_redzone = (const void *)(addr - |
| 309 | KASAN_ALLOCA_REDZONE_SIZE); |
| 310 | const void *right_redzone = (const void *)(addr + rounded_up_size); |
| 311 | |
| 312 | WARN_ON(!IS_ALIGNED((unsigned long)addr, KASAN_ALLOCA_REDZONE_SIZE)); |
| 313 | |
| 314 | kasan_unpoison(addr: (const void *)(addr + rounded_down_size), |
| 315 | size: size - rounded_down_size, init: false); |
| 316 | kasan_poison(addr: left_redzone, KASAN_ALLOCA_REDZONE_SIZE, |
| 317 | KASAN_ALLOCA_LEFT, init: false); |
| 318 | kasan_poison(addr: right_redzone, size: padding_size + KASAN_ALLOCA_REDZONE_SIZE, |
| 319 | KASAN_ALLOCA_RIGHT, init: false); |
| 320 | } |
| 321 | EXPORT_SYMBOL(__asan_alloca_poison); |
| 322 | |
| 323 | /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */ |
| 324 | void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom) |
| 325 | { |
| 326 | if (unlikely(!stack_top || stack_top > (void *)stack_bottom)) |
| 327 | return; |
| 328 | |
| 329 | kasan_unpoison(addr: stack_top, size: (void *)stack_bottom - stack_top, init: false); |
| 330 | } |
| 331 | EXPORT_SYMBOL(__asan_allocas_unpoison); |
| 332 | |
| 333 | /* Emitted by the compiler to [un]poison local variables. */ |
| 334 | #define DEFINE_ASAN_SET_SHADOW(byte) \ |
| 335 | void __asan_set_shadow_##byte(const void *addr, ssize_t size) \ |
| 336 | { \ |
| 337 | __memset((void *)addr, 0x##byte, size); \ |
| 338 | } \ |
| 339 | EXPORT_SYMBOL(__asan_set_shadow_##byte) |
| 340 | |
| 341 | DEFINE_ASAN_SET_SHADOW(00); |
| 342 | DEFINE_ASAN_SET_SHADOW(f1); |
| 343 | DEFINE_ASAN_SET_SHADOW(f2); |
| 344 | DEFINE_ASAN_SET_SHADOW(f3); |
| 345 | DEFINE_ASAN_SET_SHADOW(f5); |
| 346 | DEFINE_ASAN_SET_SHADOW(f8); |
| 347 | |
| 348 | /* |
| 349 | * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. |
| 350 | * For larger allocations larger redzones are used. |
| 351 | */ |
| 352 | static inline unsigned int optimal_redzone(unsigned int object_size) |
| 353 | { |
| 354 | return |
| 355 | object_size <= 64 - 16 ? 16 : |
| 356 | object_size <= 128 - 32 ? 32 : |
| 357 | object_size <= 512 - 64 ? 64 : |
| 358 | object_size <= 4096 - 128 ? 128 : |
| 359 | object_size <= (1 << 14) - 256 ? 256 : |
| 360 | object_size <= (1 << 15) - 512 ? 512 : |
| 361 | object_size <= (1 << 16) - 1024 ? 1024 : 2048; |
| 362 | } |
| 363 | |
| 364 | void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, |
| 365 | slab_flags_t *flags) |
| 366 | { |
| 367 | unsigned int ok_size; |
| 368 | unsigned int optimal_size; |
| 369 | unsigned int rem_free_meta_size; |
| 370 | unsigned int orig_alloc_meta_offset; |
| 371 | |
| 372 | if (!kasan_requires_meta()) |
| 373 | return; |
| 374 | |
| 375 | /* |
| 376 | * SLAB_KASAN is used to mark caches that are sanitized by KASAN and |
| 377 | * that thus have per-object metadata. Currently, this flag is used in |
| 378 | * slab_ksize() to account for per-object metadata when calculating the |
| 379 | * size of the accessible memory within the object. Additionally, we use |
| 380 | * SLAB_NO_MERGE to prevent merging of caches with per-object metadata. |
| 381 | */ |
| 382 | *flags |= SLAB_KASAN | SLAB_NO_MERGE; |
| 383 | |
| 384 | ok_size = *size; |
| 385 | |
| 386 | /* Add alloc meta into the redzone. */ |
| 387 | cache->kasan_info.alloc_meta_offset = *size; |
| 388 | *size += sizeof(struct kasan_alloc_meta); |
| 389 | |
| 390 | /* If alloc meta doesn't fit, don't add it. */ |
| 391 | if (*size > KMALLOC_MAX_SIZE) { |
| 392 | cache->kasan_info.alloc_meta_offset = 0; |
| 393 | *size = ok_size; |
| 394 | /* Continue, since free meta might still fit. */ |
| 395 | } |
| 396 | |
| 397 | ok_size = *size; |
| 398 | orig_alloc_meta_offset = cache->kasan_info.alloc_meta_offset; |
| 399 | |
| 400 | /* |
| 401 | * Store free meta in the redzone when it's not possible to store |
| 402 | * it in the object. This is the case when: |
| 403 | * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can |
| 404 | * be touched after it was freed, or |
| 405 | * 2. Object has a constructor, which means it's expected to |
| 406 | * retain its content until the next allocation, or |
| 407 | * 3. It is from a kmalloc cache which enables the debug option |
| 408 | * to store original size. |
| 409 | */ |
| 410 | if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor || |
| 411 | slub_debug_orig_size(s: cache)) { |
| 412 | cache->kasan_info.free_meta_offset = *size; |
| 413 | *size += sizeof(struct kasan_free_meta); |
| 414 | goto free_meta_added; |
| 415 | } |
| 416 | |
| 417 | /* |
| 418 | * Otherwise, if the object is large enough to contain free meta, |
| 419 | * store it within the object. |
| 420 | */ |
| 421 | if (sizeof(struct kasan_free_meta) <= cache->object_size) { |
| 422 | /* cache->kasan_info.free_meta_offset = 0 is implied. */ |
| 423 | goto free_meta_added; |
| 424 | } |
| 425 | |
| 426 | /* |
| 427 | * For smaller objects, store the beginning of free meta within the |
| 428 | * object and the end in the redzone. And thus shift the location of |
| 429 | * alloc meta to free up space for free meta. |
| 430 | * This is only possible when slub_debug is disabled, as otherwise |
| 431 | * the end of free meta will overlap with slub_debug metadata. |
| 432 | */ |
| 433 | if (!__slub_debug_enabled()) { |
| 434 | rem_free_meta_size = sizeof(struct kasan_free_meta) - |
| 435 | cache->object_size; |
| 436 | *size += rem_free_meta_size; |
| 437 | if (cache->kasan_info.alloc_meta_offset != 0) |
| 438 | cache->kasan_info.alloc_meta_offset += rem_free_meta_size; |
| 439 | goto free_meta_added; |
| 440 | } |
| 441 | |
| 442 | /* |
| 443 | * If the object is small and slub_debug is enabled, store free meta |
| 444 | * in the redzone after alloc meta. |
| 445 | */ |
| 446 | cache->kasan_info.free_meta_offset = *size; |
| 447 | *size += sizeof(struct kasan_free_meta); |
| 448 | |
| 449 | free_meta_added: |
| 450 | /* If free meta doesn't fit, don't add it. */ |
| 451 | if (*size > KMALLOC_MAX_SIZE) { |
| 452 | cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; |
| 453 | cache->kasan_info.alloc_meta_offset = orig_alloc_meta_offset; |
| 454 | *size = ok_size; |
| 455 | } |
| 456 | |
| 457 | /* Calculate size with optimal redzone. */ |
| 458 | optimal_size = cache->object_size + optimal_redzone(object_size: cache->object_size); |
| 459 | /* Limit it with KMALLOC_MAX_SIZE. */ |
| 460 | if (optimal_size > KMALLOC_MAX_SIZE) |
| 461 | optimal_size = KMALLOC_MAX_SIZE; |
| 462 | /* Use optimal size if the size with added metas is not large enough. */ |
| 463 | if (*size < optimal_size) |
| 464 | *size = optimal_size; |
| 465 | } |
| 466 | |
| 467 | struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, |
| 468 | const void *object) |
| 469 | { |
| 470 | if (!cache->kasan_info.alloc_meta_offset) |
| 471 | return NULL; |
| 472 | return (void *)object + cache->kasan_info.alloc_meta_offset; |
| 473 | } |
| 474 | |
| 475 | struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, |
| 476 | const void *object) |
| 477 | { |
| 478 | BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); |
| 479 | if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META) |
| 480 | return NULL; |
| 481 | return (void *)object + cache->kasan_info.free_meta_offset; |
| 482 | } |
| 483 | |
| 484 | void kasan_init_object_meta(struct kmem_cache *cache, const void *object) |
| 485 | { |
| 486 | struct kasan_alloc_meta *alloc_meta; |
| 487 | |
| 488 | alloc_meta = kasan_get_alloc_meta(cache, object); |
| 489 | if (alloc_meta) { |
| 490 | /* Zero out alloc meta to mark it as invalid. */ |
| 491 | __memset(s: alloc_meta, c: 0, n: sizeof(*alloc_meta)); |
| 492 | } |
| 493 | |
| 494 | /* |
| 495 | * Explicitly marking free meta as invalid is not required: the shadow |
| 496 | * value for the first 8 bytes of a newly allocated object is not |
| 497 | * KASAN_SLAB_FREE_META. |
| 498 | */ |
| 499 | } |
| 500 | |
| 501 | static void release_alloc_meta(struct kasan_alloc_meta *meta) |
| 502 | { |
| 503 | /* Zero out alloc meta to mark it as invalid. */ |
| 504 | __memset(s: meta, c: 0, n: sizeof(*meta)); |
| 505 | } |
| 506 | |
| 507 | static void release_free_meta(const void *object, struct kasan_free_meta *meta) |
| 508 | { |
| 509 | /* Check if free meta is valid. */ |
| 510 | if (*(u8 *)kasan_mem_to_shadow(addr: object) != KASAN_SLAB_FREE_META) |
| 511 | return; |
| 512 | |
| 513 | /* Mark free meta as invalid. */ |
| 514 | *(u8 *)kasan_mem_to_shadow(addr: object) = KASAN_SLAB_FREE; |
| 515 | } |
| 516 | |
| 517 | size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) |
| 518 | { |
| 519 | struct kasan_cache *info = &cache->kasan_info; |
| 520 | |
| 521 | if (!kasan_requires_meta()) |
| 522 | return 0; |
| 523 | |
| 524 | if (in_object) |
| 525 | return (info->free_meta_offset ? |
| 526 | 0 : sizeof(struct kasan_free_meta)); |
| 527 | else |
| 528 | return (info->alloc_meta_offset ? |
| 529 | sizeof(struct kasan_alloc_meta) : 0) + |
| 530 | ((info->free_meta_offset && |
| 531 | info->free_meta_offset != KASAN_NO_FREE_META) ? |
| 532 | sizeof(struct kasan_free_meta) : 0); |
| 533 | } |
| 534 | |
| 535 | /* |
| 536 | * This function avoids dynamic memory allocations and thus can be called from |
| 537 | * contexts that do not allow allocating memory. |
| 538 | */ |
| 539 | void kasan_record_aux_stack(void *addr) |
| 540 | { |
| 541 | struct slab *slab = kasan_addr_to_slab(addr); |
| 542 | struct kmem_cache *cache; |
| 543 | struct kasan_alloc_meta *alloc_meta; |
| 544 | void *object; |
| 545 | |
| 546 | if (is_kfence_address(addr) || !slab) |
| 547 | return; |
| 548 | |
| 549 | cache = slab->slab_cache; |
| 550 | object = nearest_obj(cache, slab, x: addr); |
| 551 | alloc_meta = kasan_get_alloc_meta(cache, object); |
| 552 | if (!alloc_meta) |
| 553 | return; |
| 554 | |
| 555 | alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; |
| 556 | alloc_meta->aux_stack[0] = kasan_save_stack(flags: 0, depot_flags: 0); |
| 557 | } |
| 558 | |
| 559 | void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) |
| 560 | { |
| 561 | struct kasan_alloc_meta *alloc_meta; |
| 562 | |
| 563 | alloc_meta = kasan_get_alloc_meta(cache, object); |
| 564 | if (!alloc_meta) |
| 565 | return; |
| 566 | |
| 567 | /* Invalidate previous stack traces (might exist for krealloc or mempool). */ |
| 568 | release_alloc_meta(meta: alloc_meta); |
| 569 | |
| 570 | kasan_save_track(track: &alloc_meta->alloc_track, flags); |
| 571 | } |
| 572 | |
| 573 | void kasan_save_free_info(struct kmem_cache *cache, void *object) |
| 574 | { |
| 575 | struct kasan_free_meta *free_meta; |
| 576 | |
| 577 | free_meta = kasan_get_free_meta(cache, object); |
| 578 | if (!free_meta) |
| 579 | return; |
| 580 | |
| 581 | /* Invalidate previous stack trace (might exist for mempool). */ |
| 582 | release_free_meta(object, meta: free_meta); |
| 583 | |
| 584 | kasan_save_track(track: &free_meta->free_track, flags: 0); |
| 585 | |
| 586 | /* Mark free meta as valid. */ |
| 587 | *(u8 *)kasan_mem_to_shadow(addr: object) = KASAN_SLAB_FREE_META; |
| 588 | } |
| 589 | |