linux/arch/x86/mm/kasan_init_64.c
<<
>>
Prefs
   1#define DISABLE_BRANCH_PROFILING
   2#define pr_fmt(fmt) "kasan: " fmt
   3#include <linux/bootmem.h>
   4#include <linux/kasan.h>
   5#include <linux/kdebug.h>
   6#include <linux/mm.h>
   7#include <linux/sched.h>
   8#include <linux/sched/task.h>
   9#include <linux/vmalloc.h>
  10
  11#include <asm/e820/types.h>
  12#include <asm/tlbflush.h>
  13#include <asm/sections.h>
  14
  15extern pgd_t early_top_pgt[PTRS_PER_PGD];
  16extern struct range pfn_mapped[E820_MAX_ENTRIES];
  17
  18static int __init map_range(struct range *range)
  19{
  20        unsigned long start;
  21        unsigned long end;
  22
  23        start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
  24        end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
  25
  26        return vmemmap_populate(start, end, NUMA_NO_NODE);
  27}
  28
  29static void __init clear_pgds(unsigned long start,
  30                        unsigned long end)
  31{
  32        pgd_t *pgd;
  33
  34        for (; start < end; start += PGDIR_SIZE) {
  35                pgd = pgd_offset_k(start);
  36                /*
  37                 * With folded p4d, pgd_clear() is nop, use p4d_clear()
  38                 * instead.
  39                 */
  40                if (CONFIG_PGTABLE_LEVELS < 5)
  41                        p4d_clear(p4d_offset(pgd, start));
  42                else
  43                        pgd_clear(pgd);
  44        }
  45}
  46
  47static void __init kasan_map_early_shadow(pgd_t *pgd)
  48{
  49        int i;
  50        unsigned long start = KASAN_SHADOW_START;
  51        unsigned long end = KASAN_SHADOW_END;
  52
  53        for (i = pgd_index(start); start < end; i++) {
  54                switch (CONFIG_PGTABLE_LEVELS) {
  55                case 4:
  56                        pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) |
  57                                        _KERNPG_TABLE);
  58                        break;
  59                case 5:
  60                        pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) |
  61                                        _KERNPG_TABLE);
  62                        break;
  63                default:
  64                        BUILD_BUG();
  65                }
  66                start += PGDIR_SIZE;
  67        }
  68}
  69
  70#ifdef CONFIG_KASAN_INLINE
  71static int kasan_die_handler(struct notifier_block *self,
  72                             unsigned long val,
  73                             void *data)
  74{
  75        if (val == DIE_GPF) {
  76                pr_emerg("CONFIG_KASAN_INLINE enabled\n");
  77                pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
  78        }
  79        return NOTIFY_OK;
  80}
  81
  82static struct notifier_block kasan_die_notifier = {
  83        .notifier_call = kasan_die_handler,
  84};
  85#endif
  86
  87void __init kasan_early_init(void)
  88{
  89        int i;
  90        pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
  91        pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
  92        pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
  93        p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
  94
  95        for (i = 0; i < PTRS_PER_PTE; i++)
  96                kasan_zero_pte[i] = __pte(pte_val);
  97
  98        for (i = 0; i < PTRS_PER_PMD; i++)
  99                kasan_zero_pmd[i] = __pmd(pmd_val);
 100
 101        for (i = 0; i < PTRS_PER_PUD; i++)
 102                kasan_zero_pud[i] = __pud(pud_val);
 103
 104        for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
 105                kasan_zero_p4d[i] = __p4d(p4d_val);
 106
 107        kasan_map_early_shadow(early_top_pgt);
 108        kasan_map_early_shadow(init_top_pgt);
 109}
 110
 111void __init kasan_init(void)
 112{
 113        int i;
 114
 115#ifdef CONFIG_KASAN_INLINE
 116        register_die_notifier(&kasan_die_notifier);
 117#endif
 118
 119        memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
 120        load_cr3(early_top_pgt);
 121        __flush_tlb_all();
 122
 123        clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 124
 125        kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
 126                        kasan_mem_to_shadow((void *)PAGE_OFFSET));
 127
 128        for (i = 0; i < E820_MAX_ENTRIES; i++) {
 129                if (pfn_mapped[i].end == 0)
 130                        break;
 131
 132                if (map_range(&pfn_mapped[i]))
 133                        panic("kasan: unable to allocate shadow!");
 134        }
 135        kasan_populate_zero_shadow(
 136                kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
 137                kasan_mem_to_shadow((void *)__START_KERNEL_map));
 138
 139        vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
 140                        (unsigned long)kasan_mem_to_shadow(_end),
 141                        NUMA_NO_NODE);
 142
 143        kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
 144                        (void *)KASAN_SHADOW_END);
 145
 146        load_cr3(init_top_pgt);
 147        __flush_tlb_all();
 148
 149        /*
 150         * kasan_zero_page has been used as early shadow memory, thus it may
 151         * contain some garbage. Now we can clear and write protect it, since
 152         * after the TLB flush no one should write to it.
 153         */
 154        memset(kasan_zero_page, 0, PAGE_SIZE);
 155        for (i = 0; i < PTRS_PER_PTE; i++) {
 156                pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
 157                set_pte(&kasan_zero_pte[i], pte);
 158        }
 159        /* Flush TLBs again to be sure that write protection applied. */
 160        __flush_tlb_all();
 161
 162        init_task.kasan_depth = 0;
 163        pr_info("KernelAddressSanitizer initialized\n");
 164}
 165