linux/arch/x86/realmode/init.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/io.h>
   3#include <linux/slab.h>
   4#include <linux/memblock.h>
   5#include <linux/mem_encrypt.h>
   6
   7#include <asm/set_memory.h>
   8#include <asm/pgtable.h>
   9#include <asm/realmode.h>
  10#include <asm/tlbflush.h>
  11#include <asm/crash.h>
  12
  13struct real_mode_header *real_mode_header;
  14u32 *trampoline_cr4_features;
  15
  16/* Hold the pgd entry used on booting additional CPUs */
  17pgd_t trampoline_pgd_entry;
  18
  19void __init reserve_real_mode(void)
  20{
  21        phys_addr_t mem;
  22        size_t size = real_mode_size_needed();
  23
  24        if (!size)
  25                return;
  26
  27        WARN_ON(slab_is_available());
  28
  29        /* Has to be under 1M so we can execute real-mode AP code. */
  30        mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
  31        if (!mem) {
  32                pr_info("No sub-1M memory is available for the trampoline\n");
  33                return;
  34        }
  35
  36        memblock_reserve(mem, size);
  37        set_real_mode_mem(mem);
  38        crash_reserve_low_1M();
  39}
  40
  41static void __init setup_real_mode(void)
  42{
  43        u16 real_mode_seg;
  44        const u32 *rel;
  45        u32 count;
  46        unsigned char *base;
  47        unsigned long phys_base;
  48        struct trampoline_header *trampoline_header;
  49        size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
  50#ifdef CONFIG_X86_64
  51        u64 *trampoline_pgd;
  52        u64 efer;
  53#endif
  54
  55        base = (unsigned char *)real_mode_header;
  56
  57        /*
  58         * If SME is active, the trampoline area will need to be in
  59         * decrypted memory in order to bring up other processors
  60         * successfully. This is not needed for SEV.
  61         */
  62        if (sme_active())
  63                set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT);
  64
  65        memcpy(base, real_mode_blob, size);
  66
  67        phys_base = __pa(base);
  68        real_mode_seg = phys_base >> 4;
  69
  70        rel = (u32 *) real_mode_relocs;
  71
  72        /* 16-bit segment relocations. */
  73        count = *rel++;
  74        while (count--) {
  75                u16 *seg = (u16 *) (base + *rel++);
  76                *seg = real_mode_seg;
  77        }
  78
  79        /* 32-bit linear relocations. */
  80        count = *rel++;
  81        while (count--) {
  82                u32 *ptr = (u32 *) (base + *rel++);
  83                *ptr += phys_base;
  84        }
  85
  86        /* Must be perfomed *after* relocation. */
  87        trampoline_header = (struct trampoline_header *)
  88                __va(real_mode_header->trampoline_header);
  89
  90#ifdef CONFIG_X86_32
  91        trampoline_header->start = __pa_symbol(startup_32_smp);
  92        trampoline_header->gdt_limit = __BOOT_DS + 7;
  93        trampoline_header->gdt_base = __pa_symbol(boot_gdt);
  94#else
  95        /*
  96         * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
  97         * so we need to mask it out.
  98         */
  99        rdmsrl(MSR_EFER, efer);
 100        trampoline_header->efer = efer & ~EFER_LMA;
 101
 102        trampoline_header->start = (u64) secondary_startup_64;
 103        trampoline_cr4_features = &trampoline_header->cr4;
 104        *trampoline_cr4_features = mmu_cr4_features;
 105
 106        trampoline_header->flags = 0;
 107        if (sme_active())
 108                trampoline_header->flags |= TH_FLAGS_SME_ACTIVE;
 109
 110        trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
 111        trampoline_pgd[0] = trampoline_pgd_entry.pgd;
 112        trampoline_pgd[511] = init_top_pgt[511].pgd;
 113#endif
 114}
 115
 116/*
 117 * reserve_real_mode() gets called very early, to guarantee the
 118 * availability of low memory. This is before the proper kernel page
 119 * tables are set up, so we cannot set page permissions in that
 120 * function. Also trampoline code will be executed by APs so we
 121 * need to mark it executable at do_pre_smp_initcalls() at least,
 122 * thus run it as a early_initcall().
 123 */
 124static void __init set_real_mode_permissions(void)
 125{
 126        unsigned char *base = (unsigned char *) real_mode_header;
 127        size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
 128
 129        size_t ro_size =
 130                PAGE_ALIGN(real_mode_header->ro_end) -
 131                __pa(base);
 132
 133        size_t text_size =
 134                PAGE_ALIGN(real_mode_header->ro_end) -
 135                real_mode_header->text_start;
 136
 137        unsigned long text_start =
 138                (unsigned long) __va(real_mode_header->text_start);
 139
 140        set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
 141        set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
 142        set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
 143}
 144
 145static int __init init_real_mode(void)
 146{
 147        if (!real_mode_header)
 148                panic("Real mode trampoline was not allocated");
 149
 150        setup_real_mode();
 151        set_real_mode_permissions();
 152
 153        return 0;
 154}
 155early_initcall(init_real_mode);
 156