linux/mm/kasan/kasan.c
<<
>>
Prefs
   1/*
   2 * This file contains shadow memory manipulation code.
   3 *
   4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
   5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
   6 *
   7 * Some code borrowed from https://github.com/xairy/kasan-prototype by
   8 *        Andrey Konovalov <adech.fo@gmail.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 *
  14 */
  15
  16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17#define DISABLE_BRANCH_PROFILING
  18
  19#include <linux/export.h>
  20#include <linux/interrupt.h>
  21#include <linux/init.h>
  22#include <linux/kasan.h>
  23#include <linux/kernel.h>
  24#include <linux/kmemleak.h>
  25#include <linux/linkage.h>
  26#include <linux/memblock.h>
  27#include <linux/memory.h>
  28#include <linux/mm.h>
  29#include <linux/module.h>
  30#include <linux/printk.h>
  31#include <linux/sched.h>
  32#include <linux/slab.h>
  33#include <linux/stacktrace.h>
  34#include <linux/string.h>
  35#include <linux/types.h>
  36#include <linux/vmalloc.h>
  37
  38#include "kasan.h"
  39#include "../slab.h"
  40
  41/*
  42 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
  43 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
  44 */
  45static void kasan_poison_shadow(const void *address, size_t size, u8 value)
  46{
  47        void *shadow_start, *shadow_end;
  48
  49        shadow_start = kasan_mem_to_shadow(address);
  50        shadow_end = kasan_mem_to_shadow(address + size);
  51
  52        memset(shadow_start, value, shadow_end - shadow_start);
  53}
  54
  55void kasan_unpoison_shadow(const void *address, size_t size)
  56{
  57        kasan_poison_shadow(address, size, 0);
  58
  59        if (size & KASAN_SHADOW_MASK) {
  60                u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
  61                *shadow = size & KASAN_SHADOW_MASK;
  62        }
  63}
  64
  65static void __kasan_unpoison_stack(struct task_struct *task, void *sp)
  66{
  67        void *base = task_stack_page(task);
  68        size_t size = sp - base;
  69
  70        kasan_unpoison_shadow(base, size);
  71}
  72
  73/* Unpoison the entire stack for a task. */
  74void kasan_unpoison_task_stack(struct task_struct *task)
  75{
  76        __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
  77}
  78
  79/* Unpoison the stack for the current task beyond a watermark sp value. */
  80asmlinkage void kasan_unpoison_remaining_stack(void *sp)
  81{
  82        __kasan_unpoison_stack(current, sp);
  83}
  84
  85/*
  86 * All functions below always inlined so compiler could
  87 * perform better optimizations in each of __asan_loadX/__assn_storeX
  88 * depending on memory access size X.
  89 */
  90
  91static __always_inline bool memory_is_poisoned_1(unsigned long addr)
  92{
  93        s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
  94
  95        if (unlikely(shadow_value)) {
  96                s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
  97                return unlikely(last_accessible_byte >= shadow_value);
  98        }
  99
 100        return false;
 101}
 102
 103static __always_inline bool memory_is_poisoned_2(unsigned long addr)
 104{
 105        u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
 106
 107        if (unlikely(*shadow_addr)) {
 108                if (memory_is_poisoned_1(addr + 1))
 109                        return true;
 110
 111                /*
 112                 * If single shadow byte covers 2-byte access, we don't
 113                 * need to do anything more. Otherwise, test the first
 114                 * shadow byte.
 115                 */
 116                if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
 117                        return false;
 118
 119                return unlikely(*(u8 *)shadow_addr);
 120        }
 121
 122        return false;
 123}
 124
 125static __always_inline bool memory_is_poisoned_4(unsigned long addr)
 126{
 127        u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
 128
 129        if (unlikely(*shadow_addr)) {
 130                if (memory_is_poisoned_1(addr + 3))
 131                        return true;
 132
 133                /*
 134                 * If single shadow byte covers 4-byte access, we don't
 135                 * need to do anything more. Otherwise, test the first
 136                 * shadow byte.
 137                 */
 138                if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
 139                        return false;
 140
 141                return unlikely(*(u8 *)shadow_addr);
 142        }
 143
 144        return false;
 145}
 146
 147static __always_inline bool memory_is_poisoned_8(unsigned long addr)
 148{
 149        u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
 150
 151        if (unlikely(*shadow_addr)) {
 152                if (memory_is_poisoned_1(addr + 7))
 153                        return true;
 154
 155                /*
 156                 * If single shadow byte covers 8-byte access, we don't
 157                 * need to do anything more. Otherwise, test the first
 158                 * shadow byte.
 159                 */
 160                if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
 161                        return false;
 162
 163                return unlikely(*(u8 *)shadow_addr);
 164        }
 165
 166        return false;
 167}
 168
 169static __always_inline bool memory_is_poisoned_16(unsigned long addr)
 170{
 171        u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
 172
 173        if (unlikely(*shadow_addr)) {
 174                u16 shadow_first_bytes = *(u16 *)shadow_addr;
 175
 176                if (unlikely(shadow_first_bytes))
 177                        return true;
 178
 179                /*
 180                 * If two shadow bytes covers 16-byte access, we don't
 181                 * need to do anything more. Otherwise, test the last
 182                 * shadow byte.
 183                 */
 184                if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
 185                        return false;
 186
 187                return memory_is_poisoned_1(addr + 15);
 188        }
 189
 190        return false;
 191}
 192
 193static __always_inline unsigned long bytes_is_zero(const u8 *start,
 194                                        size_t size)
 195{
 196        while (size) {
 197                if (unlikely(*start))
 198                        return (unsigned long)start;
 199                start++;
 200                size--;
 201        }
 202
 203        return 0;
 204}
 205
 206static __always_inline unsigned long memory_is_zero(const void *start,
 207                                                const void *end)
 208{
 209        unsigned int words;
 210        unsigned long ret;
 211        unsigned int prefix = (unsigned long)start % 8;
 212
 213        if (end - start <= 16)
 214                return bytes_is_zero(start, end - start);
 215
 216        if (prefix) {
 217                prefix = 8 - prefix;
 218                ret = bytes_is_zero(start, prefix);
 219                if (unlikely(ret))
 220                        return ret;
 221                start += prefix;
 222        }
 223
 224        words = (end - start) / 8;
 225        while (words) {
 226                if (unlikely(*(u64 *)start))
 227                        return bytes_is_zero(start, 8);
 228                start += 8;
 229                words--;
 230        }
 231
 232        return bytes_is_zero(start, (end - start) % 8);
 233}
 234
 235static __always_inline bool memory_is_poisoned_n(unsigned long addr,
 236                                                size_t size)
 237{
 238        unsigned long ret;
 239
 240        ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
 241                        kasan_mem_to_shadow((void *)addr + size - 1) + 1);
 242
 243        if (unlikely(ret)) {
 244                unsigned long last_byte = addr + size - 1;
 245                s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
 246
 247                if (unlikely(ret != (unsigned long)last_shadow ||
 248                        ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
 249                        return true;
 250        }
 251        return false;
 252}
 253
 254static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
 255{
 256        if (__builtin_constant_p(size)) {
 257                switch (size) {
 258                case 1:
 259                        return memory_is_poisoned_1(addr);
 260                case 2:
 261                        return memory_is_poisoned_2(addr);
 262                case 4:
 263                        return memory_is_poisoned_4(addr);
 264                case 8:
 265                        return memory_is_poisoned_8(addr);
 266                case 16:
 267                        return memory_is_poisoned_16(addr);
 268                default:
 269                        BUILD_BUG();
 270                }
 271        }
 272
 273        return memory_is_poisoned_n(addr, size);
 274}
 275
 276
 277static __always_inline void check_memory_region(unsigned long addr,
 278                                                size_t size, bool write)
 279{
 280        if (unlikely(size == 0))
 281                return;
 282
 283        if (unlikely((void *)addr <
 284                kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
 285                kasan_report(addr, size, write, _RET_IP_);
 286                return;
 287        }
 288
 289        if (likely(!memory_is_poisoned(addr, size)))
 290                return;
 291
 292        kasan_report(addr, size, write, _RET_IP_);
 293}
 294
 295void __asan_loadN(unsigned long addr, size_t size);
 296void __asan_storeN(unsigned long addr, size_t size);
 297
 298#undef memset
 299void *memset(void *addr, int c, size_t len)
 300{
 301        __asan_storeN((unsigned long)addr, len);
 302
 303        return __memset(addr, c, len);
 304}
 305
 306#undef memmove
 307void *memmove(void *dest, const void *src, size_t len)
 308{
 309        __asan_loadN((unsigned long)src, len);
 310        __asan_storeN((unsigned long)dest, len);
 311
 312        return __memmove(dest, src, len);
 313}
 314
 315#undef memcpy
 316void *memcpy(void *dest, const void *src, size_t len)
 317{
 318        __asan_loadN((unsigned long)src, len);
 319        __asan_storeN((unsigned long)dest, len);
 320
 321        return __memcpy(dest, src, len);
 322}
 323
 324void kasan_alloc_pages(struct page *page, unsigned int order)
 325{
 326        if (likely(!PageHighMem(page)))
 327                kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
 328}
 329
 330void kasan_free_pages(struct page *page, unsigned int order)
 331{
 332        if (likely(!PageHighMem(page)))
 333                kasan_poison_shadow(page_address(page),
 334                                PAGE_SIZE << order,
 335                                KASAN_FREE_PAGE);
 336}
 337
 338#ifdef CONFIG_SLAB
 339/*
 340 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
 341 * For larger allocations larger redzones are used.
 342 */
 343static size_t optimal_redzone(size_t object_size)
 344{
 345        int rz =
 346                object_size <= 64        - 16   ? 16 :
 347                object_size <= 128       - 32   ? 32 :
 348                object_size <= 512       - 64   ? 64 :
 349                object_size <= 4096      - 128  ? 128 :
 350                object_size <= (1 << 14) - 256  ? 256 :
 351                object_size <= (1 << 15) - 512  ? 512 :
 352                object_size <= (1 << 16) - 1024 ? 1024 : 2048;
 353        return rz;
 354}
 355
 356void kasan_cache_create(struct kmem_cache *cache, size_t *size,
 357                        unsigned long *flags)
 358{
 359        int redzone_adjust;
 360        /* Make sure the adjusted size is still less than
 361         * KMALLOC_MAX_CACHE_SIZE.
 362         * TODO: this check is only useful for SLAB, but not SLUB. We'll need
 363         * to skip it for SLUB when it starts using kasan_cache_create().
 364         */
 365        if (*size > KMALLOC_MAX_CACHE_SIZE -
 366            sizeof(struct kasan_alloc_meta) -
 367            sizeof(struct kasan_free_meta))
 368                return;
 369        *flags |= SLAB_KASAN;
 370        /* Add alloc meta. */
 371        cache->kasan_info.alloc_meta_offset = *size;
 372        *size += sizeof(struct kasan_alloc_meta);
 373
 374        /* Add free meta. */
 375        if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
 376            cache->object_size < sizeof(struct kasan_free_meta)) {
 377                cache->kasan_info.free_meta_offset = *size;
 378                *size += sizeof(struct kasan_free_meta);
 379        }
 380        redzone_adjust = optimal_redzone(cache->object_size) -
 381                (*size - cache->object_size);
 382        if (redzone_adjust > 0)
 383                *size += redzone_adjust;
 384        *size = min(KMALLOC_MAX_CACHE_SIZE,
 385                    max(*size,
 386                        cache->object_size +
 387                        optimal_redzone(cache->object_size)));
 388}
 389#endif
 390
 391void kasan_poison_slab(struct page *page)
 392{
 393        kasan_poison_shadow(page_address(page),
 394                        PAGE_SIZE << compound_order(page),
 395                        KASAN_KMALLOC_REDZONE);
 396}
 397
 398void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
 399{
 400        kasan_unpoison_shadow(object, cache->object_size);
 401}
 402
 403void kasan_poison_object_data(struct kmem_cache *cache, void *object)
 404{
 405        kasan_poison_shadow(object,
 406                        round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
 407                        KASAN_KMALLOC_REDZONE);
 408#ifdef CONFIG_SLAB
 409        if (cache->flags & SLAB_KASAN) {
 410                struct kasan_alloc_meta *alloc_info =
 411                        get_alloc_info(cache, object);
 412                alloc_info->state = KASAN_STATE_INIT;
 413        }
 414#endif
 415}
 416
 417#ifdef CONFIG_SLAB
 418static inline int in_irqentry_text(unsigned long ptr)
 419{
 420        return (ptr >= (unsigned long)&__irqentry_text_start &&
 421                ptr < (unsigned long)&__irqentry_text_end) ||
 422                (ptr >= (unsigned long)&__softirqentry_text_start &&
 423                 ptr < (unsigned long)&__softirqentry_text_end);
 424}
 425
 426static inline void filter_irq_stacks(struct stack_trace *trace)
 427{
 428        int i;
 429
 430        if (!trace->nr_entries)
 431                return;
 432        for (i = 0; i < trace->nr_entries; i++)
 433                if (in_irqentry_text(trace->entries[i])) {
 434                        /* Include the irqentry function into the stack. */
 435                        trace->nr_entries = i + 1;
 436                        break;
 437                }
 438}
 439
 440static inline depot_stack_handle_t save_stack(gfp_t flags)
 441{
 442        unsigned long entries[KASAN_STACK_DEPTH];
 443        struct stack_trace trace = {
 444                .nr_entries = 0,
 445                .entries = entries,
 446                .max_entries = KASAN_STACK_DEPTH,
 447                .skip = 0
 448        };
 449
 450        save_stack_trace(&trace);
 451        filter_irq_stacks(&trace);
 452        if (trace.nr_entries != 0 &&
 453            trace.entries[trace.nr_entries-1] == ULONG_MAX)
 454                trace.nr_entries--;
 455
 456        return depot_save_stack(&trace, flags);
 457}
 458
 459static inline void set_track(struct kasan_track *track, gfp_t flags)
 460{
 461        track->pid = current->pid;
 462        track->stack = save_stack(flags);
 463}
 464
 465struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
 466                                        const void *object)
 467{
 468        BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
 469        return (void *)object + cache->kasan_info.alloc_meta_offset;
 470}
 471
 472struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
 473                                      const void *object)
 474{
 475        BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
 476        return (void *)object + cache->kasan_info.free_meta_offset;
 477}
 478#endif
 479
 480void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
 481{
 482        kasan_kmalloc(cache, object, cache->object_size, flags);
 483}
 484
 485void kasan_slab_free(struct kmem_cache *cache, void *object)
 486{
 487        unsigned long size = cache->object_size;
 488        unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
 489
 490        /* RCU slabs could be legally used after free within the RCU period */
 491        if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
 492                return;
 493
 494#ifdef CONFIG_SLAB
 495        if (cache->flags & SLAB_KASAN) {
 496                struct kasan_free_meta *free_info =
 497                        get_free_info(cache, object);
 498                struct kasan_alloc_meta *alloc_info =
 499                        get_alloc_info(cache, object);
 500                alloc_info->state = KASAN_STATE_FREE;
 501                set_track(&free_info->track, GFP_NOWAIT);
 502        }
 503#endif
 504
 505        kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
 506}
 507
 508void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
 509                   gfp_t flags)
 510{
 511        unsigned long redzone_start;
 512        unsigned long redzone_end;
 513
 514        if (unlikely(object == NULL))
 515                return;
 516
 517        redzone_start = round_up((unsigned long)(object + size),
 518                                KASAN_SHADOW_SCALE_SIZE);
 519        redzone_end = round_up((unsigned long)object + cache->object_size,
 520                                KASAN_SHADOW_SCALE_SIZE);
 521
 522        kasan_unpoison_shadow(object, size);
 523        kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
 524                KASAN_KMALLOC_REDZONE);
 525#ifdef CONFIG_SLAB
 526        if (cache->flags & SLAB_KASAN) {
 527                struct kasan_alloc_meta *alloc_info =
 528                        get_alloc_info(cache, object);
 529
 530                alloc_info->state = KASAN_STATE_ALLOC;
 531                alloc_info->alloc_size = size;
 532                set_track(&alloc_info->track, flags);
 533        }
 534#endif
 535}
 536EXPORT_SYMBOL(kasan_kmalloc);
 537
 538void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
 539{
 540        struct page *page;
 541        unsigned long redzone_start;
 542        unsigned long redzone_end;
 543
 544        if (unlikely(ptr == NULL))
 545                return;
 546
 547        page = virt_to_page(ptr);
 548        redzone_start = round_up((unsigned long)(ptr + size),
 549                                KASAN_SHADOW_SCALE_SIZE);
 550        redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
 551
 552        kasan_unpoison_shadow(ptr, size);
 553        kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
 554                KASAN_PAGE_REDZONE);
 555}
 556
 557void kasan_krealloc(const void *object, size_t size, gfp_t flags)
 558{
 559        struct page *page;
 560
 561        if (unlikely(object == ZERO_SIZE_PTR))
 562                return;
 563
 564        page = virt_to_head_page(object);
 565
 566        if (unlikely(!PageSlab(page)))
 567                kasan_kmalloc_large(object, size, flags);
 568        else
 569                kasan_kmalloc(page->slab_cache, object, size, flags);
 570}
 571
 572void kasan_kfree(void *ptr)
 573{
 574        struct page *page;
 575
 576        page = virt_to_head_page(ptr);
 577
 578        if (unlikely(!PageSlab(page)))
 579                kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
 580                                KASAN_FREE_PAGE);
 581        else
 582                kasan_slab_free(page->slab_cache, ptr);
 583}
 584
 585void kasan_kfree_large(const void *ptr)
 586{
 587        struct page *page = virt_to_page(ptr);
 588
 589        kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
 590                        KASAN_FREE_PAGE);
 591}
 592
 593int kasan_module_alloc(void *addr, size_t size)
 594{
 595        void *ret;
 596        size_t shadow_size;
 597        unsigned long shadow_start;
 598
 599        shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
 600        shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
 601                        PAGE_SIZE);
 602
 603        if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
 604                return -EINVAL;
 605
 606        ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
 607                        shadow_start + shadow_size,
 608                        GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
 609                        PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
 610                        __builtin_return_address(0));
 611
 612        if (ret) {
 613                find_vm_area(addr)->flags |= VM_KASAN;
 614                kmemleak_ignore(ret);
 615                return 0;
 616        }
 617
 618        return -ENOMEM;
 619}
 620
 621void kasan_free_shadow(const struct vm_struct *vm)
 622{
 623        if (vm->flags & VM_KASAN)
 624                vfree(kasan_mem_to_shadow(vm->addr));
 625}
 626
 627static void register_global(struct kasan_global *global)
 628{
 629        size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
 630
 631        kasan_unpoison_shadow(global->beg, global->size);
 632
 633        kasan_poison_shadow(global->beg + aligned_size,
 634                global->size_with_redzone - aligned_size,
 635                KASAN_GLOBAL_REDZONE);
 636}
 637
 638void __asan_register_globals(struct kasan_global *globals, size_t size)
 639{
 640        int i;
 641
 642        for (i = 0; i < size; i++)
 643                register_global(&globals[i]);
 644}
 645EXPORT_SYMBOL(__asan_register_globals);
 646
 647void __asan_unregister_globals(struct kasan_global *globals, size_t size)
 648{
 649}
 650EXPORT_SYMBOL(__asan_unregister_globals);
 651
 652#define DEFINE_ASAN_LOAD_STORE(size)                            \
 653        void __asan_load##size(unsigned long addr)              \
 654        {                                                       \
 655                check_memory_region(addr, size, false);         \
 656        }                                                       \
 657        EXPORT_SYMBOL(__asan_load##size);                       \
 658        __alias(__asan_load##size)                              \
 659        void __asan_load##size##_noabort(unsigned long);        \
 660        EXPORT_SYMBOL(__asan_load##size##_noabort);             \
 661        void __asan_store##size(unsigned long addr)             \
 662        {                                                       \
 663                check_memory_region(addr, size, true);          \
 664        }                                                       \
 665        EXPORT_SYMBOL(__asan_store##size);                      \
 666        __alias(__asan_store##size)                             \
 667        void __asan_store##size##_noabort(unsigned long);       \
 668        EXPORT_SYMBOL(__asan_store##size##_noabort)
 669
 670DEFINE_ASAN_LOAD_STORE(1);
 671DEFINE_ASAN_LOAD_STORE(2);
 672DEFINE_ASAN_LOAD_STORE(4);
 673DEFINE_ASAN_LOAD_STORE(8);
 674DEFINE_ASAN_LOAD_STORE(16);
 675
 676void __asan_loadN(unsigned long addr, size_t size)
 677{
 678        check_memory_region(addr, size, false);
 679}
 680EXPORT_SYMBOL(__asan_loadN);
 681
 682__alias(__asan_loadN)
 683void __asan_loadN_noabort(unsigned long, size_t);
 684EXPORT_SYMBOL(__asan_loadN_noabort);
 685
 686void __asan_storeN(unsigned long addr, size_t size)
 687{
 688        check_memory_region(addr, size, true);
 689}
 690EXPORT_SYMBOL(__asan_storeN);
 691
 692__alias(__asan_storeN)
 693void __asan_storeN_noabort(unsigned long, size_t);
 694EXPORT_SYMBOL(__asan_storeN_noabort);
 695
 696/* to shut up compiler complaints */
 697void __asan_handle_no_return(void) {}
 698EXPORT_SYMBOL(__asan_handle_no_return);
 699
 700#ifdef CONFIG_MEMORY_HOTPLUG
 701static int kasan_mem_notifier(struct notifier_block *nb,
 702                        unsigned long action, void *data)
 703{
 704        return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
 705}
 706
 707static int __init kasan_memhotplug_init(void)
 708{
 709        pr_err("WARNING: KASAN doesn't support memory hot-add\n");
 710        pr_err("Memory hot-add will be disabled\n");
 711
 712        hotplug_memory_notifier(kasan_mem_notifier, 0);
 713
 714        return 0;
 715}
 716
 717module_init(kasan_memhotplug_init);
 718#endif
 719