linux/arch/arm64/kernel/kaslr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
   4 */
   5
   6#include <linux/cache.h>
   7#include <linux/crc32.h>
   8#include <linux/init.h>
   9#include <linux/libfdt.h>
  10#include <linux/mm_types.h>
  11#include <linux/sched.h>
  12#include <linux/types.h>
  13
  14#include <asm/cacheflush.h>
  15#include <asm/fixmap.h>
  16#include <asm/kernel-pgtable.h>
  17#include <asm/memory.h>
  18#include <asm/mmu.h>
  19#include <asm/pgtable.h>
  20#include <asm/sections.h>
  21
  22u64 __ro_after_init module_alloc_base;
  23u16 __initdata memstart_offset_seed;
  24
  25static __init u64 get_kaslr_seed(void *fdt)
  26{
  27        int node, len;
  28        fdt64_t *prop;
  29        u64 ret;
  30
  31        node = fdt_path_offset(fdt, "/chosen");
  32        if (node < 0)
  33                return 0;
  34
  35        prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
  36        if (!prop || len != sizeof(u64))
  37                return 0;
  38
  39        ret = fdt64_to_cpu(*prop);
  40        *prop = 0;
  41        return ret;
  42}
  43
  44static __init const u8 *kaslr_get_cmdline(void *fdt)
  45{
  46        static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
  47
  48        if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
  49                int node;
  50                const u8 *prop;
  51
  52                node = fdt_path_offset(fdt, "/chosen");
  53                if (node < 0)
  54                        goto out;
  55
  56                prop = fdt_getprop(fdt, node, "bootargs", NULL);
  57                if (!prop)
  58                        goto out;
  59                return prop;
  60        }
  61out:
  62        return default_cmdline;
  63}
  64
  65/*
  66 * This routine will be executed with the kernel mapped at its default virtual
  67 * address, and if it returns successfully, the kernel will be remapped, and
  68 * start_kernel() will be executed from a randomized virtual offset. The
  69 * relocation will result in all absolute references (e.g., static variables
  70 * containing function pointers) to be reinitialized, and zero-initialized
  71 * .bss variables will be reset to 0.
  72 */
  73u64 __init kaslr_early_init(u64 dt_phys)
  74{
  75        void *fdt;
  76        u64 seed, offset, mask, module_range;
  77        const u8 *cmdline, *str;
  78        int size;
  79
  80        /*
  81         * Set a reasonable default for module_alloc_base in case
  82         * we end up running with module randomization disabled.
  83         */
  84        module_alloc_base = (u64)_etext - MODULES_VSIZE;
  85        __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
  86
  87        /*
  88         * Try to map the FDT early. If this fails, we simply bail,
  89         * and proceed with KASLR disabled. We will make another
  90         * attempt at mapping the FDT in setup_machine()
  91         */
  92        early_fixmap_init();
  93        fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
  94        if (!fdt)
  95                return 0;
  96
  97        /*
  98         * Retrieve (and wipe) the seed from the FDT
  99         */
 100        seed = get_kaslr_seed(fdt);
 101        if (!seed)
 102                return 0;
 103
 104        /*
 105         * Check if 'nokaslr' appears on the command line, and
 106         * return 0 if that is the case.
 107         */
 108        cmdline = kaslr_get_cmdline(fdt);
 109        str = strstr(cmdline, "nokaslr");
 110        if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
 111                return 0;
 112
 113        /*
 114         * OK, so we are proceeding with KASLR enabled. Calculate a suitable
 115         * kernel image offset from the seed. Let's place the kernel in the
 116         * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of
 117         * the lower and upper quarters to avoid colliding with other
 118         * allocations.
 119         * Even if we could randomize at page granularity for 16k and 64k pages,
 120         * let's always round to 2 MB so we don't interfere with the ability to
 121         * map using contiguous PTEs
 122         */
 123        mask = ((1UL << (VA_BITS_MIN - 2)) - 1) & ~(SZ_2M - 1);
 124        offset = BIT(VA_BITS_MIN - 3) + (seed & mask);
 125
 126        /* use the top 16 bits to randomize the linear region */
 127        memstart_offset_seed = seed >> 48;
 128
 129        if (IS_ENABLED(CONFIG_KASAN))
 130                /*
 131                 * KASAN does not expect the module region to intersect the
 132                 * vmalloc region, since shadow memory is allocated for each
 133                 * module at load time, whereas the vmalloc region is shadowed
 134                 * by KASAN zero pages. So keep modules out of the vmalloc
 135                 * region if KASAN is enabled, and put the kernel well within
 136                 * 4 GB of the module region.
 137                 */
 138                return offset % SZ_2G;
 139
 140        if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
 141                /*
 142                 * Randomize the module region over a 2 GB window covering the
 143                 * kernel. This reduces the risk of modules leaking information
 144                 * about the address of the kernel itself, but results in
 145                 * branches between modules and the core kernel that are
 146                 * resolved via PLTs. (Branches between modules will be
 147                 * resolved normally.)
 148                 */
 149                module_range = SZ_2G - (u64)(_end - _stext);
 150                module_alloc_base = max((u64)_end + offset - SZ_2G,
 151                                        (u64)MODULES_VADDR);
 152        } else {
 153                /*
 154                 * Randomize the module region by setting module_alloc_base to
 155                 * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
 156                 * _stext) . This guarantees that the resulting region still
 157                 * covers [_stext, _etext], and that all relative branches can
 158                 * be resolved without veneers.
 159                 */
 160                module_range = MODULES_VSIZE - (u64)(_etext - _stext);
 161                module_alloc_base = (u64)_etext + offset - MODULES_VSIZE;
 162        }
 163
 164        /* use the lower 21 bits to randomize the base of the module region */
 165        module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
 166        module_alloc_base &= PAGE_MASK;
 167
 168        __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
 169        __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
 170
 171        return offset;
 172}
 173