linux/arch/x86/boot/compressed/ident_map_64.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * This code is used on x86_64 to create page table identity mappings on
   4 * demand by building up a new set of page tables (or appending to the
   5 * existing ones), and then switching over to them when ready.
   6 *
   7 * Copyright (C) 2015-2016  Yinghai Lu
   8 * Copyright (C)      2016  Kees Cook
   9 */
  10
  11/*
  12 * Since we're dealing with identity mappings, physical and virtual
  13 * addresses are the same, so override these defines which are ultimately
  14 * used by the headers in misc.h.
  15 */
  16#define __pa(x)  ((unsigned long)(x))
  17#define __va(x)  ((void *)((unsigned long)(x)))
  18
  19/* No PAGE_TABLE_ISOLATION support needed either: */
  20#undef CONFIG_PAGE_TABLE_ISOLATION
  21
  22#include "error.h"
  23#include "misc.h"
  24
  25/* These actually do the work of building the kernel identity maps. */
  26#include <asm/cmpxchg.h>
  27#include <asm/trap_pf.h>
  28#include <asm/trapnr.h>
  29#include <asm/init.h>
  30#include <asm/pgtable.h>
  31/* Use the static base for this part of the boot process */
  32#undef __PAGE_OFFSET
  33#define __PAGE_OFFSET __PAGE_OFFSET_BASE
  34#include "../../mm/ident_map.c"
  35
  36#define _SETUP
  37#include <asm/setup.h>  /* For COMMAND_LINE_SIZE */
  38#undef _SETUP
  39
  40extern unsigned long get_cmd_line_ptr(void);
  41
  42/* Used by PAGE_KERN* macros: */
  43pteval_t __default_kernel_pte_mask __read_mostly = ~0;
  44
  45/* Used to track our page table allocation area. */
  46struct alloc_pgt_data {
  47        unsigned char *pgt_buf;
  48        unsigned long pgt_buf_size;
  49        unsigned long pgt_buf_offset;
  50};
  51
  52/*
  53 * Allocates space for a page table entry, using struct alloc_pgt_data
  54 * above. Besides the local callers, this is used as the allocation
  55 * callback in mapping_info below.
  56 */
  57static void *alloc_pgt_page(void *context)
  58{
  59        struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context;
  60        unsigned char *entry;
  61
  62        /* Validate there is space available for a new page. */
  63        if (pages->pgt_buf_offset >= pages->pgt_buf_size) {
  64                debug_putstr("out of pgt_buf in " __FILE__ "!?\n");
  65                debug_putaddr(pages->pgt_buf_offset);
  66                debug_putaddr(pages->pgt_buf_size);
  67                return NULL;
  68        }
  69
  70        entry = pages->pgt_buf + pages->pgt_buf_offset;
  71        pages->pgt_buf_offset += PAGE_SIZE;
  72
  73        return entry;
  74}
  75
  76/* Used to track our allocated page tables. */
  77static struct alloc_pgt_data pgt_data;
  78
  79/* The top level page table entry pointer. */
  80static unsigned long top_level_pgt;
  81
  82phys_addr_t physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
  83
  84/*
  85 * Mapping information structure passed to kernel_ident_mapping_init().
  86 * Due to relocation, pointers must be assigned at run time not build time.
  87 */
  88static struct x86_mapping_info mapping_info;
  89
  90/*
  91 * Adds the specified range to the identity mappings.
  92 */
  93static void add_identity_map(unsigned long start, unsigned long end)
  94{
  95        int ret;
  96
  97        /* Align boundary to 2M. */
  98        start = round_down(start, PMD_SIZE);
  99        end = round_up(end, PMD_SIZE);
 100        if (start >= end)
 101                return;
 102
 103        /* Build the mapping. */
 104        ret = kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt, start, end);
 105        if (ret)
 106                error("Error: kernel_ident_mapping_init() failed\n");
 107}
 108
 109/* Locates and clears a region for a new top level page table. */
 110void initialize_identity_maps(void *rmode)
 111{
 112        unsigned long cmdline;
 113
 114        /* Exclude the encryption mask from __PHYSICAL_MASK */
 115        physical_mask &= ~sme_me_mask;
 116
 117        /* Init mapping_info with run-time function/buffer pointers. */
 118        mapping_info.alloc_pgt_page = alloc_pgt_page;
 119        mapping_info.context = &pgt_data;
 120        mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sme_me_mask;
 121        mapping_info.kernpg_flag = _KERNPG_TABLE;
 122
 123        /*
 124         * It should be impossible for this not to already be true,
 125         * but since calling this a second time would rewind the other
 126         * counters, let's just make sure this is reset too.
 127         */
 128        pgt_data.pgt_buf_offset = 0;
 129
 130        /*
 131         * If we came here via startup_32(), cr3 will be _pgtable already
 132         * and we must append to the existing area instead of entirely
 133         * overwriting it.
 134         *
 135         * With 5-level paging, we use '_pgtable' to allocate the p4d page table,
 136         * the top-level page table is allocated separately.
 137         *
 138         * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level
 139         * cases. On 4-level paging it's equal to 'top_level_pgt'.
 140         */
 141        top_level_pgt = read_cr3_pa();
 142        if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) {
 143                pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE;
 144                pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE;
 145                memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
 146        } else {
 147                pgt_data.pgt_buf = _pgtable;
 148                pgt_data.pgt_buf_size = BOOT_PGT_SIZE;
 149                memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
 150                top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data);
 151        }
 152
 153        /*
 154         * New page-table is set up - map the kernel image, boot_params and the
 155         * command line. The uncompressed kernel requires boot_params and the
 156         * command line to be mapped in the identity mapping. Map them
 157         * explicitly here in case the compressed kernel does not touch them,
 158         * or does not touch all the pages covering them.
 159         */
 160        add_identity_map((unsigned long)_head, (unsigned long)_end);
 161        boot_params = rmode;
 162        add_identity_map((unsigned long)boot_params, (unsigned long)(boot_params + 1));
 163        cmdline = get_cmd_line_ptr();
 164        add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
 165
 166        /* Load the new page-table. */
 167        sev_verify_cbit(top_level_pgt);
 168        write_cr3(top_level_pgt);
 169}
 170
 171/*
 172 * This switches the page tables to the new level4 that has been built
 173 * via calls to add_identity_map() above. If booted via startup_32(),
 174 * this is effectively a no-op.
 175 */
 176void finalize_identity_maps(void)
 177{
 178        write_cr3(top_level_pgt);
 179}
 180
 181static pte_t *split_large_pmd(struct x86_mapping_info *info,
 182                              pmd_t *pmdp, unsigned long __address)
 183{
 184        unsigned long page_flags;
 185        unsigned long address;
 186        pte_t *pte;
 187        pmd_t pmd;
 188        int i;
 189
 190        pte = (pte_t *)info->alloc_pgt_page(info->context);
 191        if (!pte)
 192                return NULL;
 193
 194        address     = __address & PMD_MASK;
 195        /* No large page - clear PSE flag */
 196        page_flags  = info->page_flag & ~_PAGE_PSE;
 197
 198        /* Populate the PTEs */
 199        for (i = 0; i < PTRS_PER_PMD; i++) {
 200                set_pte(&pte[i], __pte(address | page_flags));
 201                address += PAGE_SIZE;
 202        }
 203
 204        /*
 205         * Ideally we need to clear the large PMD first and do a TLB
 206         * flush before we write the new PMD. But the 2M range of the
 207         * PMD might contain the code we execute and/or the stack
 208         * we are on, so we can't do that. But that should be safe here
 209         * because we are going from large to small mappings and we are
 210         * also the only user of the page-table, so there is no chance
 211         * of a TLB multihit.
 212         */
 213        pmd = __pmd((unsigned long)pte | info->kernpg_flag);
 214        set_pmd(pmdp, pmd);
 215        /* Flush TLB to establish the new PMD */
 216        write_cr3(top_level_pgt);
 217
 218        return pte + pte_index(__address);
 219}
 220
 221static void clflush_page(unsigned long address)
 222{
 223        unsigned int flush_size;
 224        char *cl, *start, *end;
 225
 226        /*
 227         * Hardcode cl-size to 64 - CPUID can't be used here because that might
 228         * cause another #VC exception and the GHCB is not ready to use yet.
 229         */
 230        flush_size = 64;
 231        start      = (char *)(address & PAGE_MASK);
 232        end        = start + PAGE_SIZE;
 233
 234        /*
 235         * First make sure there are no pending writes on the cache-lines to
 236         * flush.
 237         */
 238        asm volatile("mfence" : : : "memory");
 239
 240        for (cl = start; cl != end; cl += flush_size)
 241                clflush(cl);
 242}
 243
 244static int set_clr_page_flags(struct x86_mapping_info *info,
 245                              unsigned long address,
 246                              pteval_t set, pteval_t clr)
 247{
 248        pgd_t *pgdp = (pgd_t *)top_level_pgt;
 249        p4d_t *p4dp;
 250        pud_t *pudp;
 251        pmd_t *pmdp;
 252        pte_t *ptep, pte;
 253
 254        /*
 255         * First make sure there is a PMD mapping for 'address'.
 256         * It should already exist, but keep things generic.
 257         *
 258         * To map the page just read from it and fault it in if there is no
 259         * mapping yet. add_identity_map() can't be called here because that
 260         * would unconditionally map the address on PMD level, destroying any
 261         * PTE-level mappings that might already exist. Use assembly here so
 262         * the access won't be optimized away.
 263         */
 264        asm volatile("mov %[address], %%r9"
 265                     :: [address] "g" (*(unsigned long *)address)
 266                     : "r9", "memory");
 267
 268        /*
 269         * The page is mapped at least with PMD size - so skip checks and walk
 270         * directly to the PMD.
 271         */
 272        p4dp = p4d_offset(pgdp, address);
 273        pudp = pud_offset(p4dp, address);
 274        pmdp = pmd_offset(pudp, address);
 275
 276        if (pmd_large(*pmdp))
 277                ptep = split_large_pmd(info, pmdp, address);
 278        else
 279                ptep = pte_offset_kernel(pmdp, address);
 280
 281        if (!ptep)
 282                return -ENOMEM;
 283
 284        /*
 285         * Changing encryption attributes of a page requires to flush it from
 286         * the caches.
 287         */
 288        if ((set | clr) & _PAGE_ENC)
 289                clflush_page(address);
 290
 291        /* Update PTE */
 292        pte = *ptep;
 293        pte = pte_set_flags(pte, set);
 294        pte = pte_clear_flags(pte, clr);
 295        set_pte(ptep, pte);
 296
 297        /* Flush TLB after changing encryption attribute */
 298        write_cr3(top_level_pgt);
 299
 300        return 0;
 301}
 302
 303int set_page_decrypted(unsigned long address)
 304{
 305        return set_clr_page_flags(&mapping_info, address, 0, _PAGE_ENC);
 306}
 307
 308int set_page_encrypted(unsigned long address)
 309{
 310        return set_clr_page_flags(&mapping_info, address, _PAGE_ENC, 0);
 311}
 312
 313int set_page_non_present(unsigned long address)
 314{
 315        return set_clr_page_flags(&mapping_info, address, 0, _PAGE_PRESENT);
 316}
 317
 318static void do_pf_error(const char *msg, unsigned long error_code,
 319                        unsigned long address, unsigned long ip)
 320{
 321        error_putstr(msg);
 322
 323        error_putstr("\nError Code: ");
 324        error_puthex(error_code);
 325        error_putstr("\nCR2: 0x");
 326        error_puthex(address);
 327        error_putstr("\nRIP relative to _head: 0x");
 328        error_puthex(ip - (unsigned long)_head);
 329        error_putstr("\n");
 330
 331        error("Stopping.\n");
 332}
 333
 334void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code)
 335{
 336        unsigned long address = native_read_cr2();
 337        unsigned long end;
 338        bool ghcb_fault;
 339
 340        ghcb_fault = sev_es_check_ghcb_fault(address);
 341
 342        address   &= PMD_MASK;
 343        end        = address + PMD_SIZE;
 344
 345        /*
 346         * Check for unexpected error codes. Unexpected are:
 347         *      - Faults on present pages
 348         *      - User faults
 349         *      - Reserved bits set
 350         */
 351        if (error_code & (X86_PF_PROT | X86_PF_USER | X86_PF_RSVD))
 352                do_pf_error("Unexpected page-fault:", error_code, address, regs->ip);
 353        else if (ghcb_fault)
 354                do_pf_error("Page-fault on GHCB page:", error_code, address, regs->ip);
 355
 356        /*
 357         * Error code is sane - now identity map the 2M region around
 358         * the faulting address.
 359         */
 360        add_identity_map(address, end);
 361}
 362