linux/mm/kasan/kasan.c
<<
>>
Prefs
   1/*
   2 * This file contains shadow memory manipulation code.
   3 *
   4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
   5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
   6 *
   7 * Some code borrowed from https://github.com/xairy/kasan-prototype by
   8 *        Andrey Konovalov <adech.fo@gmail.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 *
  14 */
  15
  16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17#define DISABLE_BRANCH_PROFILING
  18
  19#include <linux/export.h>
  20#include <linux/interrupt.h>
  21#include <linux/init.h>
  22#include <linux/kasan.h>
  23#include <linux/kernel.h>
  24#include <linux/kmemleak.h>
  25#include <linux/linkage.h>
  26#include <linux/memblock.h>
  27#include <linux/memory.h>
  28#include <linux/mm.h>
  29#include <linux/module.h>
  30#include <linux/printk.h>
  31#include <linux/sched.h>
  32#include <linux/sched/task_stack.h>
  33#include <linux/slab.h>
  34#include <linux/stacktrace.h>
  35#include <linux/string.h>
  36#include <linux/types.h>
  37#include <linux/vmalloc.h>
  38#include <linux/bug.h>
  39
  40#include "kasan.h"
  41#include "../slab.h"
  42
  43void kasan_enable_current(void)
  44{
  45        current->kasan_depth++;
  46}
  47
  48void kasan_disable_current(void)
  49{
  50        current->kasan_depth--;
  51}
  52
  53/*
  54 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
  55 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
  56 */
  57static void kasan_poison_shadow(const void *address, size_t size, u8 value)
  58{
  59        void *shadow_start, *shadow_end;
  60
  61        shadow_start = kasan_mem_to_shadow(address);
  62        shadow_end = kasan_mem_to_shadow(address + size);
  63
  64        memset(shadow_start, value, shadow_end - shadow_start);
  65}
  66
  67void kasan_unpoison_shadow(const void *address, size_t size)
  68{
  69        kasan_poison_shadow(address, size, 0);
  70
  71        if (size & KASAN_SHADOW_MASK) {
  72                u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
  73                *shadow = size & KASAN_SHADOW_MASK;
  74        }
  75}
  76
  77static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
  78{
  79        void *base = task_stack_page(task);
  80        size_t size = sp - base;
  81
  82        kasan_unpoison_shadow(base, size);
  83}
  84
  85/* Unpoison the entire stack for a task. */
  86void kasan_unpoison_task_stack(struct task_struct *task)
  87{
  88        __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
  89}
  90
  91/* Unpoison the stack for the current task beyond a watermark sp value. */
  92asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
  93{
  94        /*
  95         * Calculate the task stack base address.  Avoid using 'current'
  96         * because this function is called by early resume code which hasn't
  97         * yet set up the percpu register (%gs).
  98         */
  99        void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
 100
 101        kasan_unpoison_shadow(base, watermark - base);
 102}
 103
 104/*
 105 * Clear all poison for the region between the current SP and a provided
 106 * watermark value, as is sometimes required prior to hand-crafted asm function
 107 * returns in the middle of functions.
 108 */
 109void kasan_unpoison_stack_above_sp_to(const void *watermark)
 110{
 111        const void *sp = __builtin_frame_address(0);
 112        size_t size = watermark - sp;
 113
 114        if (WARN_ON(sp > watermark))
 115                return;
 116        kasan_unpoison_shadow(sp, size);
 117}
 118
 119/*
 120 * All functions below always inlined so compiler could
 121 * perform better optimizations in each of __asan_loadX/__assn_storeX
 122 * depending on memory access size X.
 123 */
 124
 125static __always_inline bool memory_is_poisoned_1(unsigned long addr)
 126{
 127        s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
 128
 129        if (unlikely(shadow_value)) {
 130                s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
 131                return unlikely(last_accessible_byte >= shadow_value);
 132        }
 133
 134        return false;
 135}
 136
 137static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
 138                                                unsigned long size)
 139{
 140        u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
 141
 142        /*
 143         * Access crosses 8(shadow size)-byte boundary. Such access maps
 144         * into 2 shadow bytes, so we need to check them both.
 145         */
 146        if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
 147                return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
 148
 149        return memory_is_poisoned_1(addr + size - 1);
 150}
 151
 152static __always_inline bool memory_is_poisoned_16(unsigned long addr)
 153{
 154        u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
 155
 156        /* Unaligned 16-bytes access maps into 3 shadow bytes. */
 157        if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
 158                return *shadow_addr || memory_is_poisoned_1(addr + 15);
 159
 160        return *shadow_addr;
 161}
 162
 163static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
 164                                        size_t size)
 165{
 166        while (size) {
 167                if (unlikely(*start))
 168                        return (unsigned long)start;
 169                start++;
 170                size--;
 171        }
 172
 173        return 0;
 174}
 175
 176static __always_inline unsigned long memory_is_nonzero(const void *start,
 177                                                const void *end)
 178{
 179        unsigned int words;
 180        unsigned long ret;
 181        unsigned int prefix = (unsigned long)start % 8;
 182
 183        if (end - start <= 16)
 184                return bytes_is_nonzero(start, end - start);
 185
 186        if (prefix) {
 187                prefix = 8 - prefix;
 188                ret = bytes_is_nonzero(start, prefix);
 189                if (unlikely(ret))
 190                        return ret;
 191                start += prefix;
 192        }
 193
 194        words = (end - start) / 8;
 195        while (words) {
 196                if (unlikely(*(u64 *)start))
 197                        return bytes_is_nonzero(start, 8);
 198                start += 8;
 199                words--;
 200        }
 201
 202        return bytes_is_nonzero(start, (end - start) % 8);
 203}
 204
 205static __always_inline bool memory_is_poisoned_n(unsigned long addr,
 206                                                size_t size)
 207{
 208        unsigned long ret;
 209
 210        ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
 211                        kasan_mem_to_shadow((void *)addr + size - 1) + 1);
 212
 213        if (unlikely(ret)) {
 214                unsigned long last_byte = addr + size - 1;
 215                s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
 216
 217                if (unlikely(ret != (unsigned long)last_shadow ||
 218                        ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
 219                        return true;
 220        }
 221        return false;
 222}
 223
 224static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
 225{
 226        if (__builtin_constant_p(size)) {
 227                switch (size) {
 228                case 1:
 229                        return memory_is_poisoned_1(addr);
 230                case 2:
 231                case 4:
 232                case 8:
 233                        return memory_is_poisoned_2_4_8(addr, size);
 234                case 16:
 235                        return memory_is_poisoned_16(addr);
 236                default:
 237                        BUILD_BUG();
 238                }
 239        }
 240
 241        return memory_is_poisoned_n(addr, size);
 242}
 243
 244static __always_inline void check_memory_region_inline(unsigned long addr,
 245                                                size_t size, bool write,
 246                                                unsigned long ret_ip)
 247{
 248        if (unlikely(size == 0))
 249                return;
 250
 251        if (unlikely((void *)addr <
 252                kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
 253                kasan_report(addr, size, write, ret_ip);
 254                return;
 255        }
 256
 257        if (likely(!memory_is_poisoned(addr, size)))
 258                return;
 259
 260        kasan_report(addr, size, write, ret_ip);
 261}
 262
 263static void check_memory_region(unsigned long addr,
 264                                size_t size, bool write,
 265                                unsigned long ret_ip)
 266{
 267        check_memory_region_inline(addr, size, write, ret_ip);
 268}
 269
 270void kasan_check_read(const volatile void *p, unsigned int size)
 271{
 272        check_memory_region((unsigned long)p, size, false, _RET_IP_);
 273}
 274EXPORT_SYMBOL(kasan_check_read);
 275
 276void kasan_check_write(const volatile void *p, unsigned int size)
 277{
 278        check_memory_region((unsigned long)p, size, true, _RET_IP_);
 279}
 280EXPORT_SYMBOL(kasan_check_write);
 281
 282#undef memset
 283void *memset(void *addr, int c, size_t len)
 284{
 285        check_memory_region((unsigned long)addr, len, true, _RET_IP_);
 286
 287        return __memset(addr, c, len);
 288}
 289
 290#undef memmove
 291void *memmove(void *dest, const void *src, size_t len)
 292{
 293        check_memory_region((unsigned long)src, len, false, _RET_IP_);
 294        check_memory_region((unsigned long)dest, len, true, _RET_IP_);
 295
 296        return __memmove(dest, src, len);
 297}
 298
 299#undef memcpy
 300void *memcpy(void *dest, const void *src, size_t len)
 301{
 302        check_memory_region((unsigned long)src, len, false, _RET_IP_);
 303        check_memory_region((unsigned long)dest, len, true, _RET_IP_);
 304
 305        return __memcpy(dest, src, len);
 306}
 307
 308void kasan_alloc_pages(struct page *page, unsigned int order)
 309{
 310        if (likely(!PageHighMem(page)))
 311                kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
 312}
 313
 314void kasan_free_pages(struct page *page, unsigned int order)
 315{
 316        if (likely(!PageHighMem(page)))
 317                kasan_poison_shadow(page_address(page),
 318                                PAGE_SIZE << order,
 319                                KASAN_FREE_PAGE);
 320}
 321
 322/*
 323 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
 324 * For larger allocations larger redzones are used.
 325 */
 326static size_t optimal_redzone(size_t object_size)
 327{
 328        int rz =
 329                object_size <= 64        - 16   ? 16 :
 330                object_size <= 128       - 32   ? 32 :
 331                object_size <= 512       - 64   ? 64 :
 332                object_size <= 4096      - 128  ? 128 :
 333                object_size <= (1 << 14) - 256  ? 256 :
 334                object_size <= (1 << 15) - 512  ? 512 :
 335                object_size <= (1 << 16) - 1024 ? 1024 : 2048;
 336        return rz;
 337}
 338
 339void kasan_cache_create(struct kmem_cache *cache, size_t *size,
 340                        unsigned long *flags)
 341{
 342        int redzone_adjust;
 343        int orig_size = *size;
 344
 345        /* Add alloc meta. */
 346        cache->kasan_info.alloc_meta_offset = *size;
 347        *size += sizeof(struct kasan_alloc_meta);
 348
 349        /* Add free meta. */
 350        if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
 351            cache->object_size < sizeof(struct kasan_free_meta)) {
 352                cache->kasan_info.free_meta_offset = *size;
 353                *size += sizeof(struct kasan_free_meta);
 354        }
 355        redzone_adjust = optimal_redzone(cache->object_size) -
 356                (*size - cache->object_size);
 357
 358        if (redzone_adjust > 0)
 359                *size += redzone_adjust;
 360
 361        *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
 362                                        optimal_redzone(cache->object_size)));
 363
 364        /*
 365         * If the metadata doesn't fit, don't enable KASAN at all.
 366         */
 367        if (*size <= cache->kasan_info.alloc_meta_offset ||
 368                        *size <= cache->kasan_info.free_meta_offset) {
 369                cache->kasan_info.alloc_meta_offset = 0;
 370                cache->kasan_info.free_meta_offset = 0;
 371                *size = orig_size;
 372                return;
 373        }
 374
 375        *flags |= SLAB_KASAN;
 376}
 377
 378void kasan_cache_shrink(struct kmem_cache *cache)
 379{
 380        quarantine_remove_cache(cache);
 381}
 382
 383void kasan_cache_shutdown(struct kmem_cache *cache)
 384{
 385        quarantine_remove_cache(cache);
 386}
 387
 388size_t kasan_metadata_size(struct kmem_cache *cache)
 389{
 390        return (cache->kasan_info.alloc_meta_offset ?
 391                sizeof(struct kasan_alloc_meta) : 0) +
 392                (cache->kasan_info.free_meta_offset ?
 393                sizeof(struct kasan_free_meta) : 0);
 394}
 395
 396void kasan_poison_slab(struct page *page)
 397{
 398        kasan_poison_shadow(page_address(page),
 399                        PAGE_SIZE << compound_order(page),
 400                        KASAN_KMALLOC_REDZONE);
 401}
 402
 403void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
 404{
 405        kasan_unpoison_shadow(object, cache->object_size);
 406}
 407
 408void kasan_poison_object_data(struct kmem_cache *cache, void *object)
 409{
 410        kasan_poison_shadow(object,
 411                        round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
 412                        KASAN_KMALLOC_REDZONE);
 413}
 414
 415static inline int in_irqentry_text(unsigned long ptr)
 416{
 417        return (ptr >= (unsigned long)&__irqentry_text_start &&
 418                ptr < (unsigned long)&__irqentry_text_end) ||
 419                (ptr >= (unsigned long)&__softirqentry_text_start &&
 420                 ptr < (unsigned long)&__softirqentry_text_end);
 421}
 422
 423static inline void filter_irq_stacks(struct stack_trace *trace)
 424{
 425        int i;
 426
 427        if (!trace->nr_entries)
 428                return;
 429        for (i = 0; i < trace->nr_entries; i++)
 430                if (in_irqentry_text(trace->entries[i])) {
 431                        /* Include the irqentry function into the stack. */
 432                        trace->nr_entries = i + 1;
 433                        break;
 434                }
 435}
 436
 437static inline depot_stack_handle_t save_stack(gfp_t flags)
 438{
 439        unsigned long entries[KASAN_STACK_DEPTH];
 440        struct stack_trace trace = {
 441                .nr_entries = 0,
 442                .entries = entries,
 443                .max_entries = KASAN_STACK_DEPTH,
 444                .skip = 0
 445        };
 446
 447        save_stack_trace(&trace);
 448        filter_irq_stacks(&trace);
 449        if (trace.nr_entries != 0 &&
 450            trace.entries[trace.nr_entries-1] == ULONG_MAX)
 451                trace.nr_entries--;
 452
 453        return depot_save_stack(&trace, flags);
 454}
 455
 456static inline void set_track(struct kasan_track *track, gfp_t flags)
 457{
 458        track->pid = current->pid;
 459        track->stack = save_stack(flags);
 460}
 461
 462struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
 463                                        const void *object)
 464{
 465        BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
 466        return (void *)object + cache->kasan_info.alloc_meta_offset;
 467}
 468
 469struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
 470                                      const void *object)
 471{
 472        BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
 473        return (void *)object + cache->kasan_info.free_meta_offset;
 474}
 475
 476void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
 477{
 478        struct kasan_alloc_meta *alloc_info;
 479
 480        if (!(cache->flags & SLAB_KASAN))
 481                return;
 482
 483        alloc_info = get_alloc_info(cache, object);
 484        __memset(alloc_info, 0, sizeof(*alloc_info));
 485}
 486
 487void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
 488{
 489        kasan_kmalloc(cache, object, cache->object_size, flags);
 490}
 491
 492static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
 493{
 494        unsigned long size = cache->object_size;
 495        unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
 496
 497        /* RCU slabs could be legally used after free within the RCU period */
 498        if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
 499                return;
 500
 501        kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
 502}
 503
 504bool kasan_slab_free(struct kmem_cache *cache, void *object)
 505{
 506        s8 shadow_byte;
 507
 508        /* RCU slabs could be legally used after free within the RCU period */
 509        if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
 510                return false;
 511
 512        shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
 513        if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
 514                kasan_report_double_free(cache, object,
 515                                __builtin_return_address(1));
 516                return true;
 517        }
 518
 519        kasan_poison_slab_free(cache, object);
 520
 521        if (unlikely(!(cache->flags & SLAB_KASAN)))
 522                return false;
 523
 524        set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
 525        quarantine_put(get_free_info(cache, object), cache);
 526        return true;
 527}
 528
 529void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
 530                   gfp_t flags)
 531{
 532        unsigned long redzone_start;
 533        unsigned long redzone_end;
 534
 535        if (gfpflags_allow_blocking(flags))
 536                quarantine_reduce();
 537
 538        if (unlikely(object == NULL))
 539                return;
 540
 541        redzone_start = round_up((unsigned long)(object + size),
 542                                KASAN_SHADOW_SCALE_SIZE);
 543        redzone_end = round_up((unsigned long)object + cache->object_size,
 544                                KASAN_SHADOW_SCALE_SIZE);
 545
 546        kasan_unpoison_shadow(object, size);
 547        kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
 548                KASAN_KMALLOC_REDZONE);
 549
 550        if (cache->flags & SLAB_KASAN)
 551                set_track(&get_alloc_info(cache, object)->alloc_track, flags);
 552}
 553EXPORT_SYMBOL(kasan_kmalloc);
 554
 555void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
 556{
 557        struct page *page;
 558        unsigned long redzone_start;
 559        unsigned long redzone_end;
 560
 561        if (gfpflags_allow_blocking(flags))
 562                quarantine_reduce();
 563
 564        if (unlikely(ptr == NULL))
 565                return;
 566
 567        page = virt_to_page(ptr);
 568        redzone_start = round_up((unsigned long)(ptr + size),
 569                                KASAN_SHADOW_SCALE_SIZE);
 570        redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
 571
 572        kasan_unpoison_shadow(ptr, size);
 573        kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
 574                KASAN_PAGE_REDZONE);
 575}
 576
 577void kasan_krealloc(const void *object, size_t size, gfp_t flags)
 578{
 579        struct page *page;
 580
 581        if (unlikely(object == ZERO_SIZE_PTR))
 582                return;
 583
 584        page = virt_to_head_page(object);
 585
 586        if (unlikely(!PageSlab(page)))
 587                kasan_kmalloc_large(object, size, flags);
 588        else
 589                kasan_kmalloc(page->slab_cache, object, size, flags);
 590}
 591
 592void kasan_poison_kfree(void *ptr)
 593{
 594        struct page *page;
 595
 596        page = virt_to_head_page(ptr);
 597
 598        if (unlikely(!PageSlab(page)))
 599                kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
 600                                KASAN_FREE_PAGE);
 601        else
 602                kasan_poison_slab_free(page->slab_cache, ptr);
 603}
 604
 605void kasan_kfree_large(const void *ptr)
 606{
 607        struct page *page = virt_to_page(ptr);
 608
 609        kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
 610                        KASAN_FREE_PAGE);
 611}
 612
 613int kasan_module_alloc(void *addr, size_t size)
 614{
 615        void *ret;
 616        size_t shadow_size;
 617        unsigned long shadow_start;
 618
 619        shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
 620        shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
 621                        PAGE_SIZE);
 622
 623        if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
 624                return -EINVAL;
 625
 626        ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
 627                        shadow_start + shadow_size,
 628                        GFP_KERNEL | __GFP_ZERO,
 629                        PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
 630                        __builtin_return_address(0));
 631
 632        if (ret) {
 633                find_vm_area(addr)->flags |= VM_KASAN;
 634                kmemleak_ignore(ret);
 635                return 0;
 636        }
 637
 638        return -ENOMEM;
 639}
 640
 641void kasan_free_shadow(const struct vm_struct *vm)
 642{
 643        if (vm->flags & VM_KASAN)
 644                vfree(kasan_mem_to_shadow(vm->addr));
 645}
 646
 647static void register_global(struct kasan_global *global)
 648{
 649        size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
 650
 651        kasan_unpoison_shadow(global->beg, global->size);
 652
 653        kasan_poison_shadow(global->beg + aligned_size,
 654                global->size_with_redzone - aligned_size,
 655                KASAN_GLOBAL_REDZONE);
 656}
 657
 658void __asan_register_globals(struct kasan_global *globals, size_t size)
 659{
 660        int i;
 661
 662        for (i = 0; i < size; i++)
 663                register_global(&globals[i]);
 664}
 665EXPORT_SYMBOL(__asan_register_globals);
 666
 667void __asan_unregister_globals(struct kasan_global *globals, size_t size)
 668{
 669}
 670EXPORT_SYMBOL(__asan_unregister_globals);
 671
 672#define DEFINE_ASAN_LOAD_STORE(size)                                    \
 673        void __asan_load##size(unsigned long addr)                      \
 674        {                                                               \
 675                check_memory_region_inline(addr, size, false, _RET_IP_);\
 676        }                                                               \
 677        EXPORT_SYMBOL(__asan_load##size);                               \
 678        __alias(__asan_load##size)                                      \
 679        void __asan_load##size##_noabort(unsigned long);                \
 680        EXPORT_SYMBOL(__asan_load##size##_noabort);                     \
 681        void __asan_store##size(unsigned long addr)                     \
 682        {                                                               \
 683                check_memory_region_inline(addr, size, true, _RET_IP_); \
 684        }                                                               \
 685        EXPORT_SYMBOL(__asan_store##size);                              \
 686        __alias(__asan_store##size)                                     \
 687        void __asan_store##size##_noabort(unsigned long);               \
 688        EXPORT_SYMBOL(__asan_store##size##_noabort)
 689
 690DEFINE_ASAN_LOAD_STORE(1);
 691DEFINE_ASAN_LOAD_STORE(2);
 692DEFINE_ASAN_LOAD_STORE(4);
 693DEFINE_ASAN_LOAD_STORE(8);
 694DEFINE_ASAN_LOAD_STORE(16);
 695
 696void __asan_loadN(unsigned long addr, size_t size)
 697{
 698        check_memory_region(addr, size, false, _RET_IP_);
 699}
 700EXPORT_SYMBOL(__asan_loadN);
 701
 702__alias(__asan_loadN)
 703void __asan_loadN_noabort(unsigned long, size_t);
 704EXPORT_SYMBOL(__asan_loadN_noabort);
 705
 706void __asan_storeN(unsigned long addr, size_t size)
 707{
 708        check_memory_region(addr, size, true, _RET_IP_);
 709}
 710EXPORT_SYMBOL(__asan_storeN);
 711
 712__alias(__asan_storeN)
 713void __asan_storeN_noabort(unsigned long, size_t);
 714EXPORT_SYMBOL(__asan_storeN_noabort);
 715
 716/* to shut up compiler complaints */
 717void __asan_handle_no_return(void) {}
 718EXPORT_SYMBOL(__asan_handle_no_return);
 719
 720/* Emitted by compiler to poison large objects when they go out of scope. */
 721void __asan_poison_stack_memory(const void *addr, size_t size)
 722{
 723        /*
 724         * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
 725         * by redzones, so we simply round up size to simplify logic.
 726         */
 727        kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
 728                            KASAN_USE_AFTER_SCOPE);
 729}
 730EXPORT_SYMBOL(__asan_poison_stack_memory);
 731
 732/* Emitted by compiler to unpoison large objects when they go into scope. */
 733void __asan_unpoison_stack_memory(const void *addr, size_t size)
 734{
 735        kasan_unpoison_shadow(addr, size);
 736}
 737EXPORT_SYMBOL(__asan_unpoison_stack_memory);
 738
 739#ifdef CONFIG_MEMORY_HOTPLUG
 740static int __meminit kasan_mem_notifier(struct notifier_block *nb,
 741                        unsigned long action, void *data)
 742{
 743        struct memory_notify *mem_data = data;
 744        unsigned long nr_shadow_pages, start_kaddr, shadow_start;
 745        unsigned long shadow_end, shadow_size;
 746
 747        nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
 748        start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
 749        shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
 750        shadow_size = nr_shadow_pages << PAGE_SHIFT;
 751        shadow_end = shadow_start + shadow_size;
 752
 753        if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
 754                WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
 755                return NOTIFY_BAD;
 756
 757        switch (action) {
 758        case MEM_GOING_ONLINE: {
 759                void *ret;
 760
 761                ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
 762                                        shadow_end, GFP_KERNEL,
 763                                        PAGE_KERNEL, VM_NO_GUARD,
 764                                        pfn_to_nid(mem_data->start_pfn),
 765                                        __builtin_return_address(0));
 766                if (!ret)
 767                        return NOTIFY_BAD;
 768
 769                kmemleak_ignore(ret);
 770                return NOTIFY_OK;
 771        }
 772        case MEM_OFFLINE:
 773                vfree((void *)shadow_start);
 774        }
 775
 776        return NOTIFY_OK;
 777}
 778
 779static int __init kasan_memhotplug_init(void)
 780{
 781        hotplug_memory_notifier(kasan_mem_notifier, 0);
 782
 783        return 0;
 784}
 785
 786module_init(kasan_memhotplug_init);
 787#endif
 788