linux/arch/x86/platform/efi/efi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Common EFI (Extensible Firmware Interface) support functions
   4 * Based on Extensible Firmware Interface Specification version 1.0
   5 *
   6 * Copyright (C) 1999 VA Linux Systems
   7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
   8 * Copyright (C) 1999-2002 Hewlett-Packard Co.
   9 *      David Mosberger-Tang <davidm@hpl.hp.com>
  10 *      Stephane Eranian <eranian@hpl.hp.com>
  11 * Copyright (C) 2005-2008 Intel Co.
  12 *      Fenghua Yu <fenghua.yu@intel.com>
  13 *      Bibo Mao <bibo.mao@intel.com>
  14 *      Chandramouli Narayanan <mouli@linux.intel.com>
  15 *      Huang Ying <ying.huang@intel.com>
  16 * Copyright (C) 2013 SuSE Labs
  17 *      Borislav Petkov <bp@suse.de> - runtime services VA mapping
  18 *
  19 * Copied from efi_32.c to eliminate the duplicated code between EFI
  20 * 32/64 support code. --ying 2007-10-26
  21 *
  22 * All EFI Runtime Services are not implemented yet as EFI only
  23 * supports physical mode addressing on SoftSDV. This is to be fixed
  24 * in a future version.  --drummond 1999-07-20
  25 *
  26 * Implemented EFI runtime services and virtual mode calls.  --davidm
  27 *
  28 * Goutham Rao: <goutham.rao@intel.com>
  29 *      Skip non-WB memory and ignore empty memory ranges.
  30 */
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#include <linux/kernel.h>
  35#include <linux/init.h>
  36#include <linux/efi.h>
  37#include <linux/efi-bgrt.h>
  38#include <linux/export.h>
  39#include <linux/memblock.h>
  40#include <linux/slab.h>
  41#include <linux/spinlock.h>
  42#include <linux/uaccess.h>
  43#include <linux/time.h>
  44#include <linux/io.h>
  45#include <linux/reboot.h>
  46#include <linux/bcd.h>
  47
  48#include <asm/setup.h>
  49#include <asm/efi.h>
  50#include <asm/e820/api.h>
  51#include <asm/time.h>
  52#include <asm/tlbflush.h>
  53#include <asm/x86_init.h>
  54#include <asm/uv/uv.h>
  55
  56static unsigned long efi_systab_phys __initdata;
  57static unsigned long prop_phys = EFI_INVALID_TABLE_ADDR;
  58static unsigned long uga_phys = EFI_INVALID_TABLE_ADDR;
  59static unsigned long efi_runtime, efi_nr_tables;
  60
  61unsigned long efi_fw_vendor, efi_config_table;
  62
  63static const efi_config_table_type_t arch_tables[] __initconst = {
  64        {EFI_PROPERTIES_TABLE_GUID,     &prop_phys,             "PROP"          },
  65        {UGA_IO_PROTOCOL_GUID,          &uga_phys,              "UGA"           },
  66#ifdef CONFIG_X86_UV
  67        {UV_SYSTEM_TABLE_GUID,          &uv_systab_phys,        "UVsystab"      },
  68#endif
  69        {},
  70};
  71
  72static const unsigned long * const efi_tables[] = {
  73        &efi.acpi,
  74        &efi.acpi20,
  75        &efi.smbios,
  76        &efi.smbios3,
  77        &uga_phys,
  78#ifdef CONFIG_X86_UV
  79        &uv_systab_phys,
  80#endif
  81        &efi_fw_vendor,
  82        &efi_runtime,
  83        &efi_config_table,
  84        &efi.esrt,
  85        &prop_phys,
  86        &efi_mem_attr_table,
  87#ifdef CONFIG_EFI_RCI2_TABLE
  88        &rci2_table_phys,
  89#endif
  90        &efi.tpm_log,
  91        &efi.tpm_final_log,
  92        &efi_rng_seed,
  93#ifdef CONFIG_LOAD_UEFI_KEYS
  94        &efi.mokvar_table,
  95#endif
  96};
  97
  98u64 efi_setup;          /* efi setup_data physical address */
  99
 100static int add_efi_memmap __initdata;
 101static int __init setup_add_efi_memmap(char *arg)
 102{
 103        add_efi_memmap = 1;
 104        return 0;
 105}
 106early_param("add_efi_memmap", setup_add_efi_memmap);
 107
 108void __init efi_find_mirror(void)
 109{
 110        efi_memory_desc_t *md;
 111        u64 mirror_size = 0, total_size = 0;
 112
 113        if (!efi_enabled(EFI_MEMMAP))
 114                return;
 115
 116        for_each_efi_memory_desc(md) {
 117                unsigned long long start = md->phys_addr;
 118                unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
 119
 120                total_size += size;
 121                if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
 122                        memblock_mark_mirror(start, size);
 123                        mirror_size += size;
 124                }
 125        }
 126        if (mirror_size)
 127                pr_info("Memory: %lldM/%lldM mirrored memory\n",
 128                        mirror_size>>20, total_size>>20);
 129}
 130
 131/*
 132 * Tell the kernel about the EFI memory map.  This might include
 133 * more than the max 128 entries that can fit in the passed in e820
 134 * legacy (zeropage) memory map, but the kernel's e820 table can hold
 135 * E820_MAX_ENTRIES.
 136 */
 137
 138static void __init do_add_efi_memmap(void)
 139{
 140        efi_memory_desc_t *md;
 141
 142        if (!efi_enabled(EFI_MEMMAP))
 143                return;
 144
 145        for_each_efi_memory_desc(md) {
 146                unsigned long long start = md->phys_addr;
 147                unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
 148                int e820_type;
 149
 150                switch (md->type) {
 151                case EFI_LOADER_CODE:
 152                case EFI_LOADER_DATA:
 153                case EFI_BOOT_SERVICES_CODE:
 154                case EFI_BOOT_SERVICES_DATA:
 155                case EFI_CONVENTIONAL_MEMORY:
 156                        if (efi_soft_reserve_enabled()
 157                            && (md->attribute & EFI_MEMORY_SP))
 158                                e820_type = E820_TYPE_SOFT_RESERVED;
 159                        else if (md->attribute & EFI_MEMORY_WB)
 160                                e820_type = E820_TYPE_RAM;
 161                        else
 162                                e820_type = E820_TYPE_RESERVED;
 163                        break;
 164                case EFI_ACPI_RECLAIM_MEMORY:
 165                        e820_type = E820_TYPE_ACPI;
 166                        break;
 167                case EFI_ACPI_MEMORY_NVS:
 168                        e820_type = E820_TYPE_NVS;
 169                        break;
 170                case EFI_UNUSABLE_MEMORY:
 171                        e820_type = E820_TYPE_UNUSABLE;
 172                        break;
 173                case EFI_PERSISTENT_MEMORY:
 174                        e820_type = E820_TYPE_PMEM;
 175                        break;
 176                default:
 177                        /*
 178                         * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
 179                         * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
 180                         * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
 181                         */
 182                        e820_type = E820_TYPE_RESERVED;
 183                        break;
 184                }
 185
 186                e820__range_add(start, size, e820_type);
 187        }
 188        e820__update_table(e820_table);
 189}
 190
 191/*
 192 * Given add_efi_memmap defaults to 0 and there there is no alternative
 193 * e820 mechanism for soft-reserved memory, import the full EFI memory
 194 * map if soft reservations are present and enabled. Otherwise, the
 195 * mechanism to disable the kernel's consideration of EFI_MEMORY_SP is
 196 * the efi=nosoftreserve option.
 197 */
 198static bool do_efi_soft_reserve(void)
 199{
 200        efi_memory_desc_t *md;
 201
 202        if (!efi_enabled(EFI_MEMMAP))
 203                return false;
 204
 205        if (!efi_soft_reserve_enabled())
 206                return false;
 207
 208        for_each_efi_memory_desc(md)
 209                if (md->type == EFI_CONVENTIONAL_MEMORY &&
 210                    (md->attribute & EFI_MEMORY_SP))
 211                        return true;
 212        return false;
 213}
 214
 215int __init efi_memblock_x86_reserve_range(void)
 216{
 217        struct efi_info *e = &boot_params.efi_info;
 218        struct efi_memory_map_data data;
 219        phys_addr_t pmap;
 220        int rv;
 221
 222        if (efi_enabled(EFI_PARAVIRT))
 223                return 0;
 224
 225        /* Can't handle firmware tables above 4GB on i386 */
 226        if (IS_ENABLED(CONFIG_X86_32) && e->efi_memmap_hi > 0) {
 227                pr_err("Memory map is above 4GB, disabling EFI.\n");
 228                return -EINVAL;
 229        }
 230        pmap = (phys_addr_t)(e->efi_memmap | ((u64)e->efi_memmap_hi << 32));
 231
 232        data.phys_map           = pmap;
 233        data.size               = e->efi_memmap_size;
 234        data.desc_size          = e->efi_memdesc_size;
 235        data.desc_version       = e->efi_memdesc_version;
 236
 237        rv = efi_memmap_init_early(&data);
 238        if (rv)
 239                return rv;
 240
 241        if (add_efi_memmap || do_efi_soft_reserve())
 242                do_add_efi_memmap();
 243
 244        efi_fake_memmap_early();
 245
 246        WARN(efi.memmap.desc_version != 1,
 247             "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
 248             efi.memmap.desc_version);
 249
 250        memblock_reserve(pmap, efi.memmap.nr_map * efi.memmap.desc_size);
 251        set_bit(EFI_PRESERVE_BS_REGIONS, &efi.flags);
 252
 253        return 0;
 254}
 255
 256#define OVERFLOW_ADDR_SHIFT     (64 - EFI_PAGE_SHIFT)
 257#define OVERFLOW_ADDR_MASK      (U64_MAX << OVERFLOW_ADDR_SHIFT)
 258#define U64_HIGH_BIT            (~(U64_MAX >> 1))
 259
 260static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
 261{
 262        u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
 263        u64 end_hi = 0;
 264        char buf[64];
 265
 266        if (md->num_pages == 0) {
 267                end = 0;
 268        } else if (md->num_pages > EFI_PAGES_MAX ||
 269                   EFI_PAGES_MAX - md->num_pages <
 270                   (md->phys_addr >> EFI_PAGE_SHIFT)) {
 271                end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
 272                        >> OVERFLOW_ADDR_SHIFT;
 273
 274                if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
 275                        end_hi += 1;
 276        } else {
 277                return true;
 278        }
 279
 280        pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");
 281
 282        if (end_hi) {
 283                pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
 284                        i, efi_md_typeattr_format(buf, sizeof(buf), md),
 285                        md->phys_addr, end_hi, end);
 286        } else {
 287                pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
 288                        i, efi_md_typeattr_format(buf, sizeof(buf), md),
 289                        md->phys_addr, end);
 290        }
 291        return false;
 292}
 293
 294static void __init efi_clean_memmap(void)
 295{
 296        efi_memory_desc_t *out = efi.memmap.map;
 297        const efi_memory_desc_t *in = out;
 298        const efi_memory_desc_t *end = efi.memmap.map_end;
 299        int i, n_removal;
 300
 301        for (i = n_removal = 0; in < end; i++) {
 302                if (efi_memmap_entry_valid(in, i)) {
 303                        if (out != in)
 304                                memcpy(out, in, efi.memmap.desc_size);
 305                        out = (void *)out + efi.memmap.desc_size;
 306                } else {
 307                        n_removal++;
 308                }
 309                in = (void *)in + efi.memmap.desc_size;
 310        }
 311
 312        if (n_removal > 0) {
 313                struct efi_memory_map_data data = {
 314                        .phys_map       = efi.memmap.phys_map,
 315                        .desc_version   = efi.memmap.desc_version,
 316                        .desc_size      = efi.memmap.desc_size,
 317                        .size           = efi.memmap.desc_size * (efi.memmap.nr_map - n_removal),
 318                        .flags          = 0,
 319                };
 320
 321                pr_warn("Removing %d invalid memory map entries.\n", n_removal);
 322                efi_memmap_install(&data);
 323        }
 324}
 325
 326void __init efi_print_memmap(void)
 327{
 328        efi_memory_desc_t *md;
 329        int i = 0;
 330
 331        for_each_efi_memory_desc(md) {
 332                char buf[64];
 333
 334                pr_info("mem%02u: %s range=[0x%016llx-0x%016llx] (%lluMB)\n",
 335                        i++, efi_md_typeattr_format(buf, sizeof(buf), md),
 336                        md->phys_addr,
 337                        md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1,
 338                        (md->num_pages >> (20 - EFI_PAGE_SHIFT)));
 339        }
 340}
 341
 342static int __init efi_systab_init(unsigned long phys)
 343{
 344        int size = efi_enabled(EFI_64BIT) ? sizeof(efi_system_table_64_t)
 345                                          : sizeof(efi_system_table_32_t);
 346        const efi_table_hdr_t *hdr;
 347        bool over4g = false;
 348        void *p;
 349        int ret;
 350
 351        hdr = p = early_memremap_ro(phys, size);
 352        if (p == NULL) {
 353                pr_err("Couldn't map the system table!\n");
 354                return -ENOMEM;
 355        }
 356
 357        ret = efi_systab_check_header(hdr, 1);
 358        if (ret) {
 359                early_memunmap(p, size);
 360                return ret;
 361        }
 362
 363        if (efi_enabled(EFI_64BIT)) {
 364                const efi_system_table_64_t *systab64 = p;
 365
 366                efi_runtime     = systab64->runtime;
 367                over4g          = systab64->runtime > U32_MAX;
 368
 369                if (efi_setup) {
 370                        struct efi_setup_data *data;
 371
 372                        data = early_memremap_ro(efi_setup, sizeof(*data));
 373                        if (!data) {
 374                                early_memunmap(p, size);
 375                                return -ENOMEM;
 376                        }
 377
 378                        efi_fw_vendor           = (unsigned long)data->fw_vendor;
 379                        efi_config_table        = (unsigned long)data->tables;
 380
 381                        over4g |= data->fw_vendor       > U32_MAX ||
 382                                  data->tables          > U32_MAX;
 383
 384                        early_memunmap(data, sizeof(*data));
 385                } else {
 386                        efi_fw_vendor           = systab64->fw_vendor;
 387                        efi_config_table        = systab64->tables;
 388
 389                        over4g |= systab64->fw_vendor   > U32_MAX ||
 390                                  systab64->tables      > U32_MAX;
 391                }
 392                efi_nr_tables = systab64->nr_tables;
 393        } else {
 394                const efi_system_table_32_t *systab32 = p;
 395
 396                efi_fw_vendor           = systab32->fw_vendor;
 397                efi_runtime             = systab32->runtime;
 398                efi_config_table        = systab32->tables;
 399                efi_nr_tables           = systab32->nr_tables;
 400        }
 401
 402        efi.runtime_version = hdr->revision;
 403
 404        efi_systab_report_header(hdr, efi_fw_vendor);
 405        early_memunmap(p, size);
 406
 407        if (IS_ENABLED(CONFIG_X86_32) && over4g) {
 408                pr_err("EFI data located above 4GB, disabling EFI.\n");
 409                return -EINVAL;
 410        }
 411
 412        return 0;
 413}
 414
 415static int __init efi_config_init(const efi_config_table_type_t *arch_tables)
 416{
 417        void *config_tables;
 418        int sz, ret;
 419
 420        if (efi_nr_tables == 0)
 421                return 0;
 422
 423        if (efi_enabled(EFI_64BIT))
 424                sz = sizeof(efi_config_table_64_t);
 425        else
 426                sz = sizeof(efi_config_table_32_t);
 427
 428        /*
 429         * Let's see what config tables the firmware passed to us.
 430         */
 431        config_tables = early_memremap(efi_config_table, efi_nr_tables * sz);
 432        if (config_tables == NULL) {
 433                pr_err("Could not map Configuration table!\n");
 434                return -ENOMEM;
 435        }
 436
 437        ret = efi_config_parse_tables(config_tables, efi_nr_tables,
 438                                      arch_tables);
 439
 440        early_memunmap(config_tables, efi_nr_tables * sz);
 441        return ret;
 442}
 443
 444void __init efi_init(void)
 445{
 446        if (IS_ENABLED(CONFIG_X86_32) &&
 447            (boot_params.efi_info.efi_systab_hi ||
 448             boot_params.efi_info.efi_memmap_hi)) {
 449                pr_info("Table located above 4GB, disabling EFI.\n");
 450                return;
 451        }
 452
 453        efi_systab_phys = boot_params.efi_info.efi_systab |
 454                          ((__u64)boot_params.efi_info.efi_systab_hi << 32);
 455
 456        if (efi_systab_init(efi_systab_phys))
 457                return;
 458
 459        if (efi_reuse_config(efi_config_table, efi_nr_tables))
 460                return;
 461
 462        if (efi_config_init(arch_tables))
 463                return;
 464
 465        /*
 466         * Note: We currently don't support runtime services on an EFI
 467         * that doesn't match the kernel 32/64-bit mode.
 468         */
 469
 470        if (!efi_runtime_supported())
 471                pr_err("No EFI runtime due to 32/64-bit mismatch with kernel\n");
 472
 473        if (!efi_runtime_supported() || efi_runtime_disabled()) {
 474                efi_memmap_unmap();
 475                return;
 476        }
 477
 478        /* Parse the EFI Properties table if it exists */
 479        if (prop_phys != EFI_INVALID_TABLE_ADDR) {
 480                efi_properties_table_t *tbl;
 481
 482                tbl = early_memremap_ro(prop_phys, sizeof(*tbl));
 483                if (tbl == NULL) {
 484                        pr_err("Could not map Properties table!\n");
 485                } else {
 486                        if (tbl->memory_protection_attribute &
 487                            EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA)
 488                                set_bit(EFI_NX_PE_DATA, &efi.flags);
 489
 490                        early_memunmap(tbl, sizeof(*tbl));
 491                }
 492        }
 493
 494        set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 495        efi_clean_memmap();
 496
 497        if (efi_enabled(EFI_DBG))
 498                efi_print_memmap();
 499}
 500
 501/* Merge contiguous regions of the same type and attribute */
 502static void __init efi_merge_regions(void)
 503{
 504        efi_memory_desc_t *md, *prev_md = NULL;
 505
 506        for_each_efi_memory_desc(md) {
 507                u64 prev_size;
 508
 509                if (!prev_md) {
 510                        prev_md = md;
 511                        continue;
 512                }
 513
 514                if (prev_md->type != md->type ||
 515                    prev_md->attribute != md->attribute) {
 516                        prev_md = md;
 517                        continue;
 518                }
 519
 520                prev_size = prev_md->num_pages << EFI_PAGE_SHIFT;
 521
 522                if (md->phys_addr == (prev_md->phys_addr + prev_size)) {
 523                        prev_md->num_pages += md->num_pages;
 524                        md->type = EFI_RESERVED_TYPE;
 525                        md->attribute = 0;
 526                        continue;
 527                }
 528                prev_md = md;
 529        }
 530}
 531
 532static void *realloc_pages(void *old_memmap, int old_shift)
 533{
 534        void *ret;
 535
 536        ret = (void *)__get_free_pages(GFP_KERNEL, old_shift + 1);
 537        if (!ret)
 538                goto out;
 539
 540        /*
 541         * A first-time allocation doesn't have anything to copy.
 542         */
 543        if (!old_memmap)
 544                return ret;
 545
 546        memcpy(ret, old_memmap, PAGE_SIZE << old_shift);
 547
 548out:
 549        free_pages((unsigned long)old_memmap, old_shift);
 550        return ret;
 551}
 552
 553/*
 554 * Iterate the EFI memory map in reverse order because the regions
 555 * will be mapped top-down. The end result is the same as if we had
 556 * mapped things forward, but doesn't require us to change the
 557 * existing implementation of efi_map_region().
 558 */
 559static inline void *efi_map_next_entry_reverse(void *entry)
 560{
 561        /* Initial call */
 562        if (!entry)
 563                return efi.memmap.map_end - efi.memmap.desc_size;
 564
 565        entry -= efi.memmap.desc_size;
 566        if (entry < efi.memmap.map)
 567                return NULL;
 568
 569        return entry;
 570}
 571
 572/*
 573 * efi_map_next_entry - Return the next EFI memory map descriptor
 574 * @entry: Previous EFI memory map descriptor
 575 *
 576 * This is a helper function to iterate over the EFI memory map, which
 577 * we do in different orders depending on the current configuration.
 578 *
 579 * To begin traversing the memory map @entry must be %NULL.
 580 *
 581 * Returns %NULL when we reach the end of the memory map.
 582 */
 583static void *efi_map_next_entry(void *entry)
 584{
 585        if (efi_enabled(EFI_64BIT)) {
 586                /*
 587                 * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
 588                 * config table feature requires us to map all entries
 589                 * in the same order as they appear in the EFI memory
 590                 * map. That is to say, entry N must have a lower
 591                 * virtual address than entry N+1. This is because the
 592                 * firmware toolchain leaves relative references in
 593                 * the code/data sections, which are split and become
 594                 * separate EFI memory regions. Mapping things
 595                 * out-of-order leads to the firmware accessing
 596                 * unmapped addresses.
 597                 *
 598                 * Since we need to map things this way whether or not
 599                 * the kernel actually makes use of
 600                 * EFI_PROPERTIES_TABLE, let's just switch to this
 601                 * scheme by default for 64-bit.
 602                 */
 603                return efi_map_next_entry_reverse(entry);
 604        }
 605
 606        /* Initial call */
 607        if (!entry)
 608                return efi.memmap.map;
 609
 610        entry += efi.memmap.desc_size;
 611        if (entry >= efi.memmap.map_end)
 612                return NULL;
 613
 614        return entry;
 615}
 616
 617static bool should_map_region(efi_memory_desc_t *md)
 618{
 619        /*
 620         * Runtime regions always require runtime mappings (obviously).
 621         */
 622        if (md->attribute & EFI_MEMORY_RUNTIME)
 623                return true;
 624
 625        /*
 626         * 32-bit EFI doesn't suffer from the bug that requires us to
 627         * reserve boot services regions, and mixed mode support
 628         * doesn't exist for 32-bit kernels.
 629         */
 630        if (IS_ENABLED(CONFIG_X86_32))
 631                return false;
 632
 633        /*
 634         * EFI specific purpose memory may be reserved by default
 635         * depending on kernel config and boot options.
 636         */
 637        if (md->type == EFI_CONVENTIONAL_MEMORY &&
 638            efi_soft_reserve_enabled() &&
 639            (md->attribute & EFI_MEMORY_SP))
 640                return false;
 641
 642        /*
 643         * Map all of RAM so that we can access arguments in the 1:1
 644         * mapping when making EFI runtime calls.
 645         */
 646        if (efi_is_mixed()) {
 647                if (md->type == EFI_CONVENTIONAL_MEMORY ||
 648                    md->type == EFI_LOADER_DATA ||
 649                    md->type == EFI_LOADER_CODE)
 650                        return true;
 651        }
 652
 653        /*
 654         * Map boot services regions as a workaround for buggy
 655         * firmware that accesses them even when they shouldn't.
 656         *
 657         * See efi_{reserve,free}_boot_services().
 658         */
 659        if (md->type == EFI_BOOT_SERVICES_CODE ||
 660            md->type == EFI_BOOT_SERVICES_DATA)
 661                return true;
 662
 663        return false;
 664}
 665
 666/*
 667 * Map the efi memory ranges of the runtime services and update new_mmap with
 668 * virtual addresses.
 669 */
 670static void * __init efi_map_regions(int *count, int *pg_shift)
 671{
 672        void *p, *new_memmap = NULL;
 673        unsigned long left = 0;
 674        unsigned long desc_size;
 675        efi_memory_desc_t *md;
 676
 677        desc_size = efi.memmap.desc_size;
 678
 679        p = NULL;
 680        while ((p = efi_map_next_entry(p))) {
 681                md = p;
 682
 683                if (!should_map_region(md))
 684                        continue;
 685
 686                efi_map_region(md);
 687
 688                if (left < desc_size) {
 689                        new_memmap = realloc_pages(new_memmap, *pg_shift);
 690                        if (!new_memmap)
 691                                return NULL;
 692
 693                        left += PAGE_SIZE << *pg_shift;
 694                        (*pg_shift)++;
 695                }
 696
 697                memcpy(new_memmap + (*count * desc_size), md, desc_size);
 698
 699                left -= desc_size;
 700                (*count)++;
 701        }
 702
 703        return new_memmap;
 704}
 705
 706static void __init kexec_enter_virtual_mode(void)
 707{
 708#ifdef CONFIG_KEXEC_CORE
 709        efi_memory_desc_t *md;
 710        unsigned int num_pages;
 711
 712        /*
 713         * We don't do virtual mode, since we don't do runtime services, on
 714         * non-native EFI.
 715         */
 716        if (efi_is_mixed()) {
 717                efi_memmap_unmap();
 718                clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 719                return;
 720        }
 721
 722        if (efi_alloc_page_tables()) {
 723                pr_err("Failed to allocate EFI page tables\n");
 724                clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 725                return;
 726        }
 727
 728        /*
 729        * Map efi regions which were passed via setup_data. The virt_addr is a
 730        * fixed addr which was used in first kernel of a kexec boot.
 731        */
 732        for_each_efi_memory_desc(md)
 733                efi_map_region_fixed(md); /* FIXME: add error handling */
 734
 735        /*
 736         * Unregister the early EFI memmap from efi_init() and install
 737         * the new EFI memory map.
 738         */
 739        efi_memmap_unmap();
 740
 741        if (efi_memmap_init_late(efi.memmap.phys_map,
 742                                 efi.memmap.desc_size * efi.memmap.nr_map)) {
 743                pr_err("Failed to remap late EFI memory map\n");
 744                clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 745                return;
 746        }
 747
 748        num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
 749        num_pages >>= PAGE_SHIFT;
 750
 751        if (efi_setup_page_tables(efi.memmap.phys_map, num_pages)) {
 752                clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 753                return;
 754        }
 755
 756        efi_sync_low_kernel_mappings();
 757        efi_native_runtime_setup();
 758#endif
 759}
 760
 761/*
 762 * This function will switch the EFI runtime services to virtual mode.
 763 * Essentially, we look through the EFI memmap and map every region that
 764 * has the runtime attribute bit set in its memory descriptor into the
 765 * efi_pgd page table.
 766 *
 767 * The new method does a pagetable switch in a preemption-safe manner
 768 * so that we're in a different address space when calling a runtime
 769 * function. For function arguments passing we do copy the PUDs of the
 770 * kernel page table into efi_pgd prior to each call.
 771 *
 772 * Specially for kexec boot, efi runtime maps in previous kernel should
 773 * be passed in via setup_data. In that case runtime ranges will be mapped
 774 * to the same virtual addresses as the first kernel, see
 775 * kexec_enter_virtual_mode().
 776 */
 777static void __init __efi_enter_virtual_mode(void)
 778{
 779        int count = 0, pg_shift = 0;
 780        void *new_memmap = NULL;
 781        efi_status_t status;
 782        unsigned long pa;
 783
 784        if (efi_alloc_page_tables()) {
 785                pr_err("Failed to allocate EFI page tables\n");
 786                goto err;
 787        }
 788
 789        efi_merge_regions();
 790        new_memmap = efi_map_regions(&count, &pg_shift);
 791        if (!new_memmap) {
 792                pr_err("Error reallocating memory, EFI runtime non-functional!\n");
 793                goto err;
 794        }
 795
 796        pa = __pa(new_memmap);
 797
 798        /*
 799         * Unregister the early EFI memmap from efi_init() and install
 800         * the new EFI memory map that we are about to pass to the
 801         * firmware via SetVirtualAddressMap().
 802         */
 803        efi_memmap_unmap();
 804
 805        if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) {
 806                pr_err("Failed to remap late EFI memory map\n");
 807                goto err;
 808        }
 809
 810        if (efi_enabled(EFI_DBG)) {
 811                pr_info("EFI runtime memory map:\n");
 812                efi_print_memmap();
 813        }
 814
 815        if (efi_setup_page_tables(pa, 1 << pg_shift))
 816                goto err;
 817
 818        efi_sync_low_kernel_mappings();
 819
 820        status = efi_set_virtual_address_map(efi.memmap.desc_size * count,
 821                                             efi.memmap.desc_size,
 822                                             efi.memmap.desc_version,
 823                                             (efi_memory_desc_t *)pa,
 824                                             efi_systab_phys);
 825        if (status != EFI_SUCCESS) {
 826                pr_err("Unable to switch EFI into virtual mode (status=%lx)!\n",
 827                       status);
 828                goto err;
 829        }
 830
 831        efi_check_for_embedded_firmwares();
 832        efi_free_boot_services();
 833
 834        if (!efi_is_mixed())
 835                efi_native_runtime_setup();
 836        else
 837                efi_thunk_runtime_setup();
 838
 839        /*
 840         * Apply more restrictive page table mapping attributes now that
 841         * SVAM() has been called and the firmware has performed all
 842         * necessary relocation fixups for the new virtual addresses.
 843         */
 844        efi_runtime_update_mappings();
 845
 846        /* clean DUMMY object */
 847        efi_delete_dummy_variable();
 848        return;
 849
 850err:
 851        clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 852}
 853
 854void __init efi_enter_virtual_mode(void)
 855{
 856        if (efi_enabled(EFI_PARAVIRT))
 857                return;
 858
 859        efi.runtime = (efi_runtime_services_t *)efi_runtime;
 860
 861        if (efi_setup)
 862                kexec_enter_virtual_mode();
 863        else
 864                __efi_enter_virtual_mode();
 865
 866        efi_dump_pagetable();
 867}
 868
 869bool efi_is_table_address(unsigned long phys_addr)
 870{
 871        unsigned int i;
 872
 873        if (phys_addr == EFI_INVALID_TABLE_ADDR)
 874                return false;
 875
 876        for (i = 0; i < ARRAY_SIZE(efi_tables); i++)
 877                if (*(efi_tables[i]) == phys_addr)
 878                        return true;
 879
 880        return false;
 881}
 882
 883char *efi_systab_show_arch(char *str)
 884{
 885        if (uga_phys != EFI_INVALID_TABLE_ADDR)
 886                str += sprintf(str, "UGA=0x%lx\n", uga_phys);
 887        return str;
 888}
 889
 890#define EFI_FIELD(var) efi_ ## var
 891
 892#define EFI_ATTR_SHOW(name) \
 893static ssize_t name##_show(struct kobject *kobj, \
 894                                struct kobj_attribute *attr, char *buf) \
 895{ \
 896        return sprintf(buf, "0x%lx\n", EFI_FIELD(name)); \
 897}
 898
 899EFI_ATTR_SHOW(fw_vendor);
 900EFI_ATTR_SHOW(runtime);
 901EFI_ATTR_SHOW(config_table);
 902
 903struct kobj_attribute efi_attr_fw_vendor = __ATTR_RO(fw_vendor);
 904struct kobj_attribute efi_attr_runtime = __ATTR_RO(runtime);
 905struct kobj_attribute efi_attr_config_table = __ATTR_RO(config_table);
 906
 907umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
 908{
 909        if (attr == &efi_attr_fw_vendor.attr) {
 910                if (efi_enabled(EFI_PARAVIRT) ||
 911                                efi_fw_vendor == EFI_INVALID_TABLE_ADDR)
 912                        return 0;
 913        } else if (attr == &efi_attr_runtime.attr) {
 914                if (efi_runtime == EFI_INVALID_TABLE_ADDR)
 915                        return 0;
 916        } else if (attr == &efi_attr_config_table.attr) {
 917                if (efi_config_table == EFI_INVALID_TABLE_ADDR)
 918                        return 0;
 919        }
 920        return attr->mode;
 921}
 922