linux/arch/x86/mm/kasan_init_64.c
<<
>>
Prefs
   1#define pr_fmt(fmt) "kasan: " fmt
   2#include <linux/bootmem.h>
   3#include <linux/kasan.h>
   4#include <linux/kdebug.h>
   5#include <linux/mm.h>
   6#include <linux/sched.h>
   7#include <linux/vmalloc.h>
   8
   9#include <asm/tlbflush.h>
  10#include <asm/sections.h>
  11
  12extern pgd_t early_level4_pgt[PTRS_PER_PGD];
  13extern struct range pfn_mapped[E820_X_MAX];
  14
  15static int __init map_range(struct range *range)
  16{
  17        unsigned long start;
  18        unsigned long end;
  19
  20        start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
  21        end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
  22
  23        /*
  24         * end + 1 here is intentional. We check several shadow bytes in advance
  25         * to slightly speed up fastpath. In some rare cases we could cross
  26         * boundary of mapped shadow, so we just map some more here.
  27         */
  28        return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
  29}
  30
  31static void __init clear_pgds(unsigned long start,
  32                        unsigned long end)
  33{
  34        for (; start < end; start += PGDIR_SIZE)
  35                pgd_clear(pgd_offset_k(start));
  36}
  37
  38static void __init kasan_map_early_shadow(pgd_t *pgd)
  39{
  40        int i;
  41        unsigned long start = KASAN_SHADOW_START;
  42        unsigned long end = KASAN_SHADOW_END;
  43
  44        for (i = pgd_index(start); start < end; i++) {
  45                pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud)
  46                                | _KERNPG_TABLE);
  47                start += PGDIR_SIZE;
  48        }
  49}
  50
  51#ifdef CONFIG_KASAN_INLINE
  52static int kasan_die_handler(struct notifier_block *self,
  53                             unsigned long val,
  54                             void *data)
  55{
  56        if (val == DIE_GPF) {
  57                pr_emerg("CONFIG_KASAN_INLINE enabled");
  58                pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
  59        }
  60        return NOTIFY_OK;
  61}
  62
  63static struct notifier_block kasan_die_notifier = {
  64        .notifier_call = kasan_die_handler,
  65};
  66#endif
  67
  68void __init kasan_early_init(void)
  69{
  70        int i;
  71        pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
  72        pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
  73        pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
  74
  75        for (i = 0; i < PTRS_PER_PTE; i++)
  76                kasan_zero_pte[i] = __pte(pte_val);
  77
  78        for (i = 0; i < PTRS_PER_PMD; i++)
  79                kasan_zero_pmd[i] = __pmd(pmd_val);
  80
  81        for (i = 0; i < PTRS_PER_PUD; i++)
  82                kasan_zero_pud[i] = __pud(pud_val);
  83
  84        kasan_map_early_shadow(early_level4_pgt);
  85        kasan_map_early_shadow(init_level4_pgt);
  86}
  87
  88void __init kasan_init(void)
  89{
  90        int i;
  91
  92#ifdef CONFIG_KASAN_INLINE
  93        register_die_notifier(&kasan_die_notifier);
  94#endif
  95
  96        memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
  97        load_cr3(early_level4_pgt);
  98        __flush_tlb_all();
  99
 100        clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 101
 102        kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
 103                        kasan_mem_to_shadow((void *)PAGE_OFFSET));
 104
 105        for (i = 0; i < E820_X_MAX; i++) {
 106                if (pfn_mapped[i].end == 0)
 107                        break;
 108
 109                if (map_range(&pfn_mapped[i]))
 110                        panic("kasan: unable to allocate shadow!");
 111        }
 112        kasan_populate_zero_shadow(
 113                kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
 114                kasan_mem_to_shadow((void *)__START_KERNEL_map));
 115
 116        vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
 117                        (unsigned long)kasan_mem_to_shadow(_end),
 118                        NUMA_NO_NODE);
 119
 120        kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
 121                        (void *)KASAN_SHADOW_END);
 122
 123        load_cr3(init_level4_pgt);
 124        __flush_tlb_all();
 125
 126        /*
 127         * kasan_zero_page has been used as early shadow memory, thus it may
 128         * contain some garbage. Now we can clear and write protect it, since
 129         * after the TLB flush no one should write to it.
 130         */
 131        memset(kasan_zero_page, 0, PAGE_SIZE);
 132        for (i = 0; i < PTRS_PER_PTE; i++) {
 133                pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
 134                set_pte(&kasan_zero_pte[i], pte);
 135        }
 136        /* Flush TLBs again to be sure that write protection applied. */
 137        __flush_tlb_all();
 138
 139        init_task.kasan_depth = 0;
 140        pr_info("KernelAddressSanitizer initialized\n");
 141}
 142