linux/arch/powerpc/mm/kasan/8xx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#define DISABLE_BRANCH_PROFILING
   4
   5#include <linux/kasan.h>
   6#include <linux/memblock.h>
   7#include <linux/hugetlb.h>
   8
   9static int __init
  10kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block)
  11{
  12        pmd_t *pmd = pmd_off_k(k_start);
  13        unsigned long k_cur, k_next;
  14
  15        for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd += 2, block += SZ_8M) {
  16                pte_basic_t *new;
  17
  18                k_next = pgd_addr_end(k_cur, k_end);
  19                k_next = pgd_addr_end(k_next, k_end);
  20                if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
  21                        continue;
  22
  23                new = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
  24                if (!new)
  25                        return -ENOMEM;
  26
  27                *new = pte_val(pte_mkhuge(pfn_pte(PHYS_PFN(__pa(block)), PAGE_KERNEL)));
  28
  29                hugepd_populate_kernel((hugepd_t *)pmd, (pte_t *)new, PAGE_SHIFT_8M);
  30                hugepd_populate_kernel((hugepd_t *)pmd + 1, (pte_t *)new, PAGE_SHIFT_8M);
  31        }
  32        return 0;
  33}
  34
  35int __init kasan_init_region(void *start, size_t size)
  36{
  37        unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
  38        unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
  39        unsigned long k_cur;
  40        int ret;
  41        void *block;
  42
  43        block = memblock_alloc(k_end - k_start, SZ_8M);
  44        if (!block)
  45                return -ENOMEM;
  46
  47        if (IS_ALIGNED(k_start, SZ_8M)) {
  48                kasan_init_shadow_8M(k_start, ALIGN_DOWN(k_end, SZ_8M), block);
  49                k_cur = ALIGN_DOWN(k_end, SZ_8M);
  50                if (k_cur == k_end)
  51                        goto finish;
  52        } else {
  53                k_cur = k_start;
  54        }
  55
  56        ret = kasan_init_shadow_page_tables(k_start, k_end);
  57        if (ret)
  58                return ret;
  59
  60        for (; k_cur < k_end; k_cur += PAGE_SIZE) {
  61                pmd_t *pmd = pmd_off_k(k_cur);
  62                void *va = block + k_cur - k_start;
  63                pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
  64
  65                if (k_cur < ALIGN_DOWN(k_end, SZ_512K))
  66                        pte = pte_mkhuge(pte);
  67
  68                __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
  69        }
  70finish:
  71        flush_tlb_kernel_range(k_start, k_end);
  72        return 0;
  73}
  74