linux/mm/kasan/generic.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * This file contains core generic KASAN code.
   4 *
   5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
   6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
   7 *
   8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
   9 *        Andrey Konovalov <andreyknvl@gmail.com>
  10 */
  11
  12#include <linux/export.h>
  13#include <linux/interrupt.h>
  14#include <linux/init.h>
  15#include <linux/kasan.h>
  16#include <linux/kernel.h>
  17#include <linux/kfence.h>
  18#include <linux/kmemleak.h>
  19#include <linux/linkage.h>
  20#include <linux/memblock.h>
  21#include <linux/memory.h>
  22#include <linux/mm.h>
  23#include <linux/module.h>
  24#include <linux/printk.h>
  25#include <linux/sched.h>
  26#include <linux/sched/task_stack.h>
  27#include <linux/slab.h>
  28#include <linux/stacktrace.h>
  29#include <linux/string.h>
  30#include <linux/types.h>
  31#include <linux/vmalloc.h>
  32#include <linux/bug.h>
  33
  34#include "kasan.h"
  35#include "../slab.h"
  36
  37/*
  38 * All functions below always inlined so compiler could
  39 * perform better optimizations in each of __asan_loadX/__assn_storeX
  40 * depending on memory access size X.
  41 */
  42
  43static __always_inline bool memory_is_poisoned_1(unsigned long addr)
  44{
  45        s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
  46
  47        if (unlikely(shadow_value)) {
  48                s8 last_accessible_byte = addr & KASAN_GRANULE_MASK;
  49                return unlikely(last_accessible_byte >= shadow_value);
  50        }
  51
  52        return false;
  53}
  54
  55static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
  56                                                unsigned long size)
  57{
  58        u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
  59
  60        /*
  61         * Access crosses 8(shadow size)-byte boundary. Such access maps
  62         * into 2 shadow bytes, so we need to check them both.
  63         */
  64        if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
  65                return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
  66
  67        return memory_is_poisoned_1(addr + size - 1);
  68}
  69
  70static __always_inline bool memory_is_poisoned_16(unsigned long addr)
  71{
  72        u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
  73
  74        /* Unaligned 16-bytes access maps into 3 shadow bytes. */
  75        if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE)))
  76                return *shadow_addr || memory_is_poisoned_1(addr + 15);
  77
  78        return *shadow_addr;
  79}
  80
  81static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
  82                                        size_t size)
  83{
  84        while (size) {
  85                if (unlikely(*start))
  86                        return (unsigned long)start;
  87                start++;
  88                size--;
  89        }
  90
  91        return 0;
  92}
  93
  94static __always_inline unsigned long memory_is_nonzero(const void *start,
  95                                                const void *end)
  96{
  97        unsigned int words;
  98        unsigned long ret;
  99        unsigned int prefix = (unsigned long)start % 8;
 100
 101        if (end - start <= 16)
 102                return bytes_is_nonzero(start, end - start);
 103
 104        if (prefix) {
 105                prefix = 8 - prefix;
 106                ret = bytes_is_nonzero(start, prefix);
 107                if (unlikely(ret))
 108                        return ret;
 109                start += prefix;
 110        }
 111
 112        words = (end - start) / 8;
 113        while (words) {
 114                if (unlikely(*(u64 *)start))
 115                        return bytes_is_nonzero(start, 8);
 116                start += 8;
 117                words--;
 118        }
 119
 120        return bytes_is_nonzero(start, (end - start) % 8);
 121}
 122
 123static __always_inline bool memory_is_poisoned_n(unsigned long addr,
 124                                                size_t size)
 125{
 126        unsigned long ret;
 127
 128        ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
 129                        kasan_mem_to_shadow((void *)addr + size - 1) + 1);
 130
 131        if (unlikely(ret)) {
 132                unsigned long last_byte = addr + size - 1;
 133                s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
 134
 135                if (unlikely(ret != (unsigned long)last_shadow ||
 136                        ((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
 137                        return true;
 138        }
 139        return false;
 140}
 141
 142static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
 143{
 144        if (__builtin_constant_p(size)) {
 145                switch (size) {
 146                case 1:
 147                        return memory_is_poisoned_1(addr);
 148                case 2:
 149                case 4:
 150                case 8:
 151                        return memory_is_poisoned_2_4_8(addr, size);
 152                case 16:
 153                        return memory_is_poisoned_16(addr);
 154                default:
 155                        BUILD_BUG();
 156                }
 157        }
 158
 159        return memory_is_poisoned_n(addr, size);
 160}
 161
 162static __always_inline bool check_region_inline(unsigned long addr,
 163                                                size_t size, bool write,
 164                                                unsigned long ret_ip)
 165{
 166        if (!kasan_arch_is_ready())
 167                return true;
 168
 169        if (unlikely(size == 0))
 170                return true;
 171
 172        if (unlikely(addr + size < addr))
 173                return !kasan_report(addr, size, write, ret_ip);
 174
 175        if (unlikely((void *)addr <
 176                kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
 177                return !kasan_report(addr, size, write, ret_ip);
 178        }
 179
 180        if (likely(!memory_is_poisoned(addr, size)))
 181                return true;
 182
 183        return !kasan_report(addr, size, write, ret_ip);
 184}
 185
 186bool kasan_check_range(unsigned long addr, size_t size, bool write,
 187                                        unsigned long ret_ip)
 188{
 189        return check_region_inline(addr, size, write, ret_ip);
 190}
 191
 192bool kasan_byte_accessible(const void *addr)
 193{
 194        s8 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
 195
 196        return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
 197}
 198
 199void kasan_cache_shrink(struct kmem_cache *cache)
 200{
 201        kasan_quarantine_remove_cache(cache);
 202}
 203
 204void kasan_cache_shutdown(struct kmem_cache *cache)
 205{
 206        if (!__kmem_cache_empty(cache))
 207                kasan_quarantine_remove_cache(cache);
 208}
 209
 210static void register_global(struct kasan_global *global)
 211{
 212        size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
 213
 214        kasan_unpoison(global->beg, global->size, false);
 215
 216        kasan_poison(global->beg + aligned_size,
 217                     global->size_with_redzone - aligned_size,
 218                     KASAN_GLOBAL_REDZONE, false);
 219}
 220
 221void __asan_register_globals(struct kasan_global *globals, size_t size)
 222{
 223        int i;
 224
 225        for (i = 0; i < size; i++)
 226                register_global(&globals[i]);
 227}
 228EXPORT_SYMBOL(__asan_register_globals);
 229
 230void __asan_unregister_globals(struct kasan_global *globals, size_t size)
 231{
 232}
 233EXPORT_SYMBOL(__asan_unregister_globals);
 234
 235#define DEFINE_ASAN_LOAD_STORE(size)                                    \
 236        void __asan_load##size(unsigned long addr)                      \
 237        {                                                               \
 238                check_region_inline(addr, size, false, _RET_IP_);       \
 239        }                                                               \
 240        EXPORT_SYMBOL(__asan_load##size);                               \
 241        __alias(__asan_load##size)                                      \
 242        void __asan_load##size##_noabort(unsigned long);                \
 243        EXPORT_SYMBOL(__asan_load##size##_noabort);                     \
 244        void __asan_store##size(unsigned long addr)                     \
 245        {                                                               \
 246                check_region_inline(addr, size, true, _RET_IP_);        \
 247        }                                                               \
 248        EXPORT_SYMBOL(__asan_store##size);                              \
 249        __alias(__asan_store##size)                                     \
 250        void __asan_store##size##_noabort(unsigned long);               \
 251        EXPORT_SYMBOL(__asan_store##size##_noabort)
 252
 253DEFINE_ASAN_LOAD_STORE(1);
 254DEFINE_ASAN_LOAD_STORE(2);
 255DEFINE_ASAN_LOAD_STORE(4);
 256DEFINE_ASAN_LOAD_STORE(8);
 257DEFINE_ASAN_LOAD_STORE(16);
 258
 259void __asan_loadN(unsigned long addr, size_t size)
 260{
 261        kasan_check_range(addr, size, false, _RET_IP_);
 262}
 263EXPORT_SYMBOL(__asan_loadN);
 264
 265__alias(__asan_loadN)
 266void __asan_loadN_noabort(unsigned long, size_t);
 267EXPORT_SYMBOL(__asan_loadN_noabort);
 268
 269void __asan_storeN(unsigned long addr, size_t size)
 270{
 271        kasan_check_range(addr, size, true, _RET_IP_);
 272}
 273EXPORT_SYMBOL(__asan_storeN);
 274
 275__alias(__asan_storeN)
 276void __asan_storeN_noabort(unsigned long, size_t);
 277EXPORT_SYMBOL(__asan_storeN_noabort);
 278
 279/* to shut up compiler complaints */
 280void __asan_handle_no_return(void) {}
 281EXPORT_SYMBOL(__asan_handle_no_return);
 282
 283/* Emitted by compiler to poison alloca()ed objects. */
 284void __asan_alloca_poison(unsigned long addr, size_t size)
 285{
 286        size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
 287        size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
 288                        rounded_up_size;
 289        size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
 290
 291        const void *left_redzone = (const void *)(addr -
 292                        KASAN_ALLOCA_REDZONE_SIZE);
 293        const void *right_redzone = (const void *)(addr + rounded_up_size);
 294
 295        WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
 296
 297        kasan_unpoison((const void *)(addr + rounded_down_size),
 298                        size - rounded_down_size, false);
 299        kasan_poison(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
 300                     KASAN_ALLOCA_LEFT, false);
 301        kasan_poison(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
 302                     KASAN_ALLOCA_RIGHT, false);
 303}
 304EXPORT_SYMBOL(__asan_alloca_poison);
 305
 306/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
 307void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
 308{
 309        if (unlikely(!stack_top || stack_top > stack_bottom))
 310                return;
 311
 312        kasan_unpoison(stack_top, stack_bottom - stack_top, false);
 313}
 314EXPORT_SYMBOL(__asan_allocas_unpoison);
 315
 316/* Emitted by the compiler to [un]poison local variables. */
 317#define DEFINE_ASAN_SET_SHADOW(byte) \
 318        void __asan_set_shadow_##byte(const void *addr, size_t size)    \
 319        {                                                               \
 320                __memset((void *)addr, 0x##byte, size);                 \
 321        }                                                               \
 322        EXPORT_SYMBOL(__asan_set_shadow_##byte)
 323
 324DEFINE_ASAN_SET_SHADOW(00);
 325DEFINE_ASAN_SET_SHADOW(f1);
 326DEFINE_ASAN_SET_SHADOW(f2);
 327DEFINE_ASAN_SET_SHADOW(f3);
 328DEFINE_ASAN_SET_SHADOW(f5);
 329DEFINE_ASAN_SET_SHADOW(f8);
 330
 331void kasan_record_aux_stack(void *addr)
 332{
 333        struct page *page = kasan_addr_to_page(addr);
 334        struct kmem_cache *cache;
 335        struct kasan_alloc_meta *alloc_meta;
 336        void *object;
 337
 338        if (is_kfence_address(addr) || !(page && PageSlab(page)))
 339                return;
 340
 341        cache = page->slab_cache;
 342        object = nearest_obj(cache, page, addr);
 343        alloc_meta = kasan_get_alloc_meta(cache, object);
 344        if (!alloc_meta)
 345                return;
 346
 347        alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
 348        alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
 349}
 350
 351void kasan_set_free_info(struct kmem_cache *cache,
 352                                void *object, u8 tag)
 353{
 354        struct kasan_free_meta *free_meta;
 355
 356        free_meta = kasan_get_free_meta(cache, object);
 357        if (!free_meta)
 358                return;
 359
 360        kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
 361        /* The object was freed and has free track set. */
 362        *(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREETRACK;
 363}
 364
 365struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
 366                                void *object, u8 tag)
 367{
 368        if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_KMALLOC_FREETRACK)
 369                return NULL;
 370        /* Free meta must be present with KASAN_KMALLOC_FREETRACK. */
 371        return &kasan_get_free_meta(cache, object)->free_track;
 372}
 373