linux/include/linux/kasan.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_KASAN_H
   3#define _LINUX_KASAN_H
   4
   5#include <linux/bug.h>
   6#include <linux/kernel.h>
   7#include <linux/static_key.h>
   8#include <linux/types.h>
   9
  10struct kmem_cache;
  11struct page;
  12struct vm_struct;
  13struct task_struct;
  14
  15#ifdef CONFIG_KASAN
  16
  17#include <linux/linkage.h>
  18#include <asm/kasan.h>
  19
  20/* kasan_data struct is used in KUnit tests for KASAN expected failures */
  21struct kunit_kasan_expectation {
  22        bool report_found;
  23};
  24
  25#endif
  26
  27#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
  28
  29#include <linux/pgtable.h>
  30
  31/* Software KASAN implementations use shadow memory. */
  32
  33#ifdef CONFIG_KASAN_SW_TAGS
  34/* This matches KASAN_TAG_INVALID. */
  35#define KASAN_SHADOW_INIT 0xFE
  36#else
  37#define KASAN_SHADOW_INIT 0
  38#endif
  39
  40#ifndef PTE_HWTABLE_PTRS
  41#define PTE_HWTABLE_PTRS 0
  42#endif
  43
  44extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
  45extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
  46extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
  47extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
  48extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
  49
  50int kasan_populate_early_shadow(const void *shadow_start,
  51                                const void *shadow_end);
  52
  53static inline void *kasan_mem_to_shadow(const void *addr)
  54{
  55        return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
  56                + KASAN_SHADOW_OFFSET;
  57}
  58
  59int kasan_add_zero_shadow(void *start, unsigned long size);
  60void kasan_remove_zero_shadow(void *start, unsigned long size);
  61
  62/* Enable reporting bugs after kasan_disable_current() */
  63extern void kasan_enable_current(void);
  64
  65/* Disable reporting bugs for current task */
  66extern void kasan_disable_current(void);
  67
  68#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
  69
  70static inline int kasan_add_zero_shadow(void *start, unsigned long size)
  71{
  72        return 0;
  73}
  74static inline void kasan_remove_zero_shadow(void *start,
  75                                        unsigned long size)
  76{}
  77
  78static inline void kasan_enable_current(void) {}
  79static inline void kasan_disable_current(void) {}
  80
  81#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
  82
  83#ifdef CONFIG_KASAN_HW_TAGS
  84
  85DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
  86
  87static __always_inline bool kasan_enabled(void)
  88{
  89        return static_branch_likely(&kasan_flag_enabled);
  90}
  91
  92static inline bool kasan_has_integrated_init(void)
  93{
  94        return kasan_enabled();
  95}
  96
  97void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
  98void kasan_free_pages(struct page *page, unsigned int order);
  99
 100#else /* CONFIG_KASAN_HW_TAGS */
 101
 102static inline bool kasan_enabled(void)
 103{
 104        return IS_ENABLED(CONFIG_KASAN);
 105}
 106
 107static inline bool kasan_has_integrated_init(void)
 108{
 109        return false;
 110}
 111
 112static __always_inline void kasan_alloc_pages(struct page *page,
 113                                              unsigned int order, gfp_t flags)
 114{
 115        /* Only available for integrated init. */
 116        BUILD_BUG();
 117}
 118
 119static __always_inline void kasan_free_pages(struct page *page,
 120                                             unsigned int order)
 121{
 122        /* Only available for integrated init. */
 123        BUILD_BUG();
 124}
 125
 126#endif /* CONFIG_KASAN_HW_TAGS */
 127
 128#ifdef CONFIG_KASAN
 129
 130struct kasan_cache {
 131        int alloc_meta_offset;
 132        int free_meta_offset;
 133        bool is_kmalloc;
 134};
 135
 136slab_flags_t __kasan_never_merge(void);
 137static __always_inline slab_flags_t kasan_never_merge(void)
 138{
 139        if (kasan_enabled())
 140                return __kasan_never_merge();
 141        return 0;
 142}
 143
 144void __kasan_unpoison_range(const void *addr, size_t size);
 145static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
 146{
 147        if (kasan_enabled())
 148                __kasan_unpoison_range(addr, size);
 149}
 150
 151void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
 152static __always_inline void kasan_poison_pages(struct page *page,
 153                                                unsigned int order, bool init)
 154{
 155        if (kasan_enabled())
 156                __kasan_poison_pages(page, order, init);
 157}
 158
 159void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
 160static __always_inline void kasan_unpoison_pages(struct page *page,
 161                                                 unsigned int order, bool init)
 162{
 163        if (kasan_enabled())
 164                __kasan_unpoison_pages(page, order, init);
 165}
 166
 167void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
 168                                slab_flags_t *flags);
 169static __always_inline void kasan_cache_create(struct kmem_cache *cache,
 170                                unsigned int *size, slab_flags_t *flags)
 171{
 172        if (kasan_enabled())
 173                __kasan_cache_create(cache, size, flags);
 174}
 175
 176void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
 177static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
 178{
 179        if (kasan_enabled())
 180                __kasan_cache_create_kmalloc(cache);
 181}
 182
 183size_t __kasan_metadata_size(struct kmem_cache *cache);
 184static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
 185{
 186        if (kasan_enabled())
 187                return __kasan_metadata_size(cache);
 188        return 0;
 189}
 190
 191void __kasan_poison_slab(struct page *page);
 192static __always_inline void kasan_poison_slab(struct page *page)
 193{
 194        if (kasan_enabled())
 195                __kasan_poison_slab(page);
 196}
 197
 198void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
 199static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
 200                                                        void *object)
 201{
 202        if (kasan_enabled())
 203                __kasan_unpoison_object_data(cache, object);
 204}
 205
 206void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
 207static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
 208                                                        void *object)
 209{
 210        if (kasan_enabled())
 211                __kasan_poison_object_data(cache, object);
 212}
 213
 214void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
 215                                          const void *object);
 216static __always_inline void * __must_check kasan_init_slab_obj(
 217                                struct kmem_cache *cache, const void *object)
 218{
 219        if (kasan_enabled())
 220                return __kasan_init_slab_obj(cache, object);
 221        return (void *)object;
 222}
 223
 224bool __kasan_slab_free(struct kmem_cache *s, void *object,
 225                        unsigned long ip, bool init);
 226static __always_inline bool kasan_slab_free(struct kmem_cache *s,
 227                                                void *object, bool init)
 228{
 229        if (kasan_enabled())
 230                return __kasan_slab_free(s, object, _RET_IP_, init);
 231        return false;
 232}
 233
 234void __kasan_kfree_large(void *ptr, unsigned long ip);
 235static __always_inline void kasan_kfree_large(void *ptr)
 236{
 237        if (kasan_enabled())
 238                __kasan_kfree_large(ptr, _RET_IP_);
 239}
 240
 241void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
 242static __always_inline void kasan_slab_free_mempool(void *ptr)
 243{
 244        if (kasan_enabled())
 245                __kasan_slab_free_mempool(ptr, _RET_IP_);
 246}
 247
 248void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
 249                                       void *object, gfp_t flags, bool init);
 250static __always_inline void * __must_check kasan_slab_alloc(
 251                struct kmem_cache *s, void *object, gfp_t flags, bool init)
 252{
 253        if (kasan_enabled())
 254                return __kasan_slab_alloc(s, object, flags, init);
 255        return object;
 256}
 257
 258void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
 259                                    size_t size, gfp_t flags);
 260static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
 261                                const void *object, size_t size, gfp_t flags)
 262{
 263        if (kasan_enabled())
 264                return __kasan_kmalloc(s, object, size, flags);
 265        return (void *)object;
 266}
 267
 268void * __must_check __kasan_kmalloc_large(const void *ptr,
 269                                          size_t size, gfp_t flags);
 270static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
 271                                                      size_t size, gfp_t flags)
 272{
 273        if (kasan_enabled())
 274                return __kasan_kmalloc_large(ptr, size, flags);
 275        return (void *)ptr;
 276}
 277
 278void * __must_check __kasan_krealloc(const void *object,
 279                                     size_t new_size, gfp_t flags);
 280static __always_inline void * __must_check kasan_krealloc(const void *object,
 281                                                 size_t new_size, gfp_t flags)
 282{
 283        if (kasan_enabled())
 284                return __kasan_krealloc(object, new_size, flags);
 285        return (void *)object;
 286}
 287
 288/*
 289 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
 290 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
 291 */
 292bool __kasan_check_byte(const void *addr, unsigned long ip);
 293static __always_inline bool kasan_check_byte(const void *addr)
 294{
 295        if (kasan_enabled())
 296                return __kasan_check_byte(addr, _RET_IP_);
 297        return true;
 298}
 299
 300
 301bool kasan_save_enable_multi_shot(void);
 302void kasan_restore_multi_shot(bool enabled);
 303
 304#else /* CONFIG_KASAN */
 305
 306static inline slab_flags_t kasan_never_merge(void)
 307{
 308        return 0;
 309}
 310static inline void kasan_unpoison_range(const void *address, size_t size) {}
 311static inline void kasan_poison_pages(struct page *page, unsigned int order,
 312                                      bool init) {}
 313static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
 314                                        bool init) {}
 315static inline void kasan_cache_create(struct kmem_cache *cache,
 316                                      unsigned int *size,
 317                                      slab_flags_t *flags) {}
 318static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
 319static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
 320static inline void kasan_poison_slab(struct page *page) {}
 321static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
 322                                        void *object) {}
 323static inline void kasan_poison_object_data(struct kmem_cache *cache,
 324                                        void *object) {}
 325static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
 326                                const void *object)
 327{
 328        return (void *)object;
 329}
 330static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
 331{
 332        return false;
 333}
 334static inline void kasan_kfree_large(void *ptr) {}
 335static inline void kasan_slab_free_mempool(void *ptr) {}
 336static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
 337                                   gfp_t flags, bool init)
 338{
 339        return object;
 340}
 341static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
 342                                size_t size, gfp_t flags)
 343{
 344        return (void *)object;
 345}
 346static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
 347{
 348        return (void *)ptr;
 349}
 350static inline void *kasan_krealloc(const void *object, size_t new_size,
 351                                 gfp_t flags)
 352{
 353        return (void *)object;
 354}
 355static inline bool kasan_check_byte(const void *address)
 356{
 357        return true;
 358}
 359
 360#endif /* CONFIG_KASAN */
 361
 362#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
 363void kasan_unpoison_task_stack(struct task_struct *task);
 364#else
 365static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
 366#endif
 367
 368#ifdef CONFIG_KASAN_GENERIC
 369
 370void kasan_cache_shrink(struct kmem_cache *cache);
 371void kasan_cache_shutdown(struct kmem_cache *cache);
 372void kasan_record_aux_stack(void *ptr);
 373
 374#else /* CONFIG_KASAN_GENERIC */
 375
 376static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
 377static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
 378static inline void kasan_record_aux_stack(void *ptr) {}
 379
 380#endif /* CONFIG_KASAN_GENERIC */
 381
 382#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
 383
 384static inline void *kasan_reset_tag(const void *addr)
 385{
 386        return (void *)arch_kasan_reset_tag(addr);
 387}
 388
 389/**
 390 * kasan_report - print a report about a bad memory access detected by KASAN
 391 * @addr: address of the bad access
 392 * @size: size of the bad access
 393 * @is_write: whether the bad access is a write or a read
 394 * @ip: instruction pointer for the accessibility check or the bad access itself
 395 */
 396bool kasan_report(unsigned long addr, size_t size,
 397                bool is_write, unsigned long ip);
 398
 399#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
 400
 401static inline void *kasan_reset_tag(const void *addr)
 402{
 403        return (void *)addr;
 404}
 405
 406#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
 407
 408#ifdef CONFIG_KASAN_HW_TAGS
 409
 410void kasan_report_async(void);
 411
 412#endif /* CONFIG_KASAN_HW_TAGS */
 413
 414#ifdef CONFIG_KASAN_SW_TAGS
 415void __init kasan_init_sw_tags(void);
 416#else
 417static inline void kasan_init_sw_tags(void) { }
 418#endif
 419
 420#ifdef CONFIG_KASAN_HW_TAGS
 421void kasan_init_hw_tags_cpu(void);
 422void __init kasan_init_hw_tags(void);
 423#else
 424static inline void kasan_init_hw_tags_cpu(void) { }
 425static inline void kasan_init_hw_tags(void) { }
 426#endif
 427
 428#ifdef CONFIG_KASAN_VMALLOC
 429
 430int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
 431void kasan_poison_vmalloc(const void *start, unsigned long size);
 432void kasan_unpoison_vmalloc(const void *start, unsigned long size);
 433void kasan_release_vmalloc(unsigned long start, unsigned long end,
 434                           unsigned long free_region_start,
 435                           unsigned long free_region_end);
 436
 437#else /* CONFIG_KASAN_VMALLOC */
 438
 439static inline int kasan_populate_vmalloc(unsigned long start,
 440                                        unsigned long size)
 441{
 442        return 0;
 443}
 444
 445static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
 446{ }
 447static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
 448{ }
 449static inline void kasan_release_vmalloc(unsigned long start,
 450                                         unsigned long end,
 451                                         unsigned long free_region_start,
 452                                         unsigned long free_region_end) {}
 453
 454#endif /* CONFIG_KASAN_VMALLOC */
 455
 456#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
 457                !defined(CONFIG_KASAN_VMALLOC)
 458
 459/*
 460 * These functions provide a special case to support backing module
 461 * allocations with real shadow memory. With KASAN vmalloc, the special
 462 * case is unnecessary, as the work is handled in the generic case.
 463 */
 464int kasan_module_alloc(void *addr, size_t size);
 465void kasan_free_shadow(const struct vm_struct *vm);
 466
 467#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
 468
 469static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
 470static inline void kasan_free_shadow(const struct vm_struct *vm) {}
 471
 472#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
 473
 474#ifdef CONFIG_KASAN_INLINE
 475void kasan_non_canonical_hook(unsigned long addr);
 476#else /* CONFIG_KASAN_INLINE */
 477static inline void kasan_non_canonical_hook(unsigned long addr) { }
 478#endif /* CONFIG_KASAN_INLINE */
 479
 480#endif /* LINUX_KASAN_H */
 481