linux/arch/x86/platform/efi/efi_64.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * x86_64 specific EFI support functions
   4 * Based on Extensible Firmware Interface Specification version 1.0
   5 *
   6 * Copyright (C) 2005-2008 Intel Co.
   7 *      Fenghua Yu <fenghua.yu@intel.com>
   8 *      Bibo Mao <bibo.mao@intel.com>
   9 *      Chandramouli Narayanan <mouli@linux.intel.com>
  10 *      Huang Ying <ying.huang@intel.com>
  11 *
  12 * Code to convert EFI to E820 map has been implemented in elilo bootloader
  13 * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
  14 * is setup appropriately for EFI runtime code.
  15 * - mouli 06/14/2007.
  16 *
  17 */
  18
  19#define pr_fmt(fmt) "efi: " fmt
  20
  21#include <linux/kernel.h>
  22#include <linux/init.h>
  23#include <linux/mm.h>
  24#include <linux/types.h>
  25#include <linux/spinlock.h>
  26#include <linux/memblock.h>
  27#include <linux/ioport.h>
  28#include <linux/mc146818rtc.h>
  29#include <linux/efi.h>
  30#include <linux/export.h>
  31#include <linux/uaccess.h>
  32#include <linux/io.h>
  33#include <linux/reboot.h>
  34#include <linux/slab.h>
  35#include <linux/ucs2_string.h>
  36#include <linux/mem_encrypt.h>
  37#include <linux/sched/task.h>
  38
  39#include <asm/setup.h>
  40#include <asm/page.h>
  41#include <asm/e820/api.h>
  42#include <asm/pgtable.h>
  43#include <asm/tlbflush.h>
  44#include <asm/proto.h>
  45#include <asm/efi.h>
  46#include <asm/cacheflush.h>
  47#include <asm/fixmap.h>
  48#include <asm/realmode.h>
  49#include <asm/time.h>
  50#include <asm/pgalloc.h>
  51#include <asm/sev.h>
  52
  53/*
  54 * We allocate runtime services regions top-down, starting from -4G, i.e.
  55 * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
  56 */
  57static u64 efi_va = EFI_VA_START;
  58
  59struct efi_scratch efi_scratch;
  60
  61EXPORT_SYMBOL_GPL(efi_mm);
  62
  63/*
  64 * We need our own copy of the higher levels of the page tables
  65 * because we want to avoid inserting EFI region mappings (EFI_VA_END
  66 * to EFI_VA_START) into the standard kernel page tables. Everything
  67 * else can be shared, see efi_sync_low_kernel_mappings().
  68 *
  69 * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the
  70 * allocation.
  71 */
  72int __init efi_alloc_page_tables(void)
  73{
  74        pgd_t *pgd, *efi_pgd;
  75        p4d_t *p4d;
  76        pud_t *pud;
  77        gfp_t gfp_mask;
  78
  79        gfp_mask = GFP_KERNEL | __GFP_ZERO;
  80        efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
  81        if (!efi_pgd)
  82                return -ENOMEM;
  83
  84        pgd = efi_pgd + pgd_index(EFI_VA_END);
  85        p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
  86        if (!p4d) {
  87                free_page((unsigned long)efi_pgd);
  88                return -ENOMEM;
  89        }
  90
  91        pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
  92        if (!pud) {
  93                if (pgtable_l5_enabled())
  94                        free_page((unsigned long) pgd_page_vaddr(*pgd));
  95                free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
  96                return -ENOMEM;
  97        }
  98
  99        efi_mm.pgd = efi_pgd;
 100        mm_init_cpumask(&efi_mm);
 101        init_new_context(NULL, &efi_mm);
 102
 103        return 0;
 104}
 105
 106/*
 107 * Add low kernel mappings for passing arguments to EFI functions.
 108 */
 109void efi_sync_low_kernel_mappings(void)
 110{
 111        unsigned num_entries;
 112        pgd_t *pgd_k, *pgd_efi;
 113        p4d_t *p4d_k, *p4d_efi;
 114        pud_t *pud_k, *pud_efi;
 115        pgd_t *efi_pgd = efi_mm.pgd;
 116
 117        pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
 118        pgd_k = pgd_offset_k(PAGE_OFFSET);
 119
 120        num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
 121        memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
 122
 123        pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
 124        pgd_k = pgd_offset_k(EFI_VA_END);
 125        p4d_efi = p4d_offset(pgd_efi, 0);
 126        p4d_k = p4d_offset(pgd_k, 0);
 127
 128        num_entries = p4d_index(EFI_VA_END);
 129        memcpy(p4d_efi, p4d_k, sizeof(p4d_t) * num_entries);
 130
 131        /*
 132         * We share all the PUD entries apart from those that map the
 133         * EFI regions. Copy around them.
 134         */
 135        BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
 136        BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
 137
 138        p4d_efi = p4d_offset(pgd_efi, EFI_VA_END);
 139        p4d_k = p4d_offset(pgd_k, EFI_VA_END);
 140        pud_efi = pud_offset(p4d_efi, 0);
 141        pud_k = pud_offset(p4d_k, 0);
 142
 143        num_entries = pud_index(EFI_VA_END);
 144        memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
 145
 146        pud_efi = pud_offset(p4d_efi, EFI_VA_START);
 147        pud_k = pud_offset(p4d_k, EFI_VA_START);
 148
 149        num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
 150        memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
 151}
 152
 153/*
 154 * Wrapper for slow_virt_to_phys() that handles NULL addresses.
 155 */
 156static inline phys_addr_t
 157virt_to_phys_or_null_size(void *va, unsigned long size)
 158{
 159        bool bad_size;
 160
 161        if (!va)
 162                return 0;
 163
 164        if (virt_addr_valid(va))
 165                return virt_to_phys(va);
 166
 167        /*
 168         * A fully aligned variable on the stack is guaranteed not to
 169         * cross a page bounary. Try to catch strings on the stack by
 170         * checking that 'size' is a power of two.
 171         */
 172        bad_size = size > PAGE_SIZE || !is_power_of_2(size);
 173
 174        WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
 175
 176        return slow_virt_to_phys(va);
 177}
 178
 179#define virt_to_phys_or_null(addr)                              \
 180        virt_to_phys_or_null_size((addr), sizeof(*(addr)))
 181
 182int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 183{
 184        unsigned long pfn, text, pf;
 185        struct page *page;
 186        unsigned npages;
 187        pgd_t *pgd = efi_mm.pgd;
 188
 189        /*
 190         * It can happen that the physical address of new_memmap lands in memory
 191         * which is not mapped in the EFI page table. Therefore we need to go
 192         * and ident-map those pages containing the map before calling
 193         * phys_efi_set_virtual_address_map().
 194         */
 195        pfn = pa_memmap >> PAGE_SHIFT;
 196        pf = _PAGE_NX | _PAGE_RW | _PAGE_ENC;
 197        if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
 198                pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
 199                return 1;
 200        }
 201
 202        /*
 203         * Certain firmware versions are way too sentimential and still believe
 204         * they are exclusive and unquestionable owners of the first physical page,
 205         * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
 206         * (but then write-access it later during SetVirtualAddressMap()).
 207         *
 208         * Create a 1:1 mapping for this page, to avoid triple faults during early
 209         * boot with such firmware. We are free to hand this page to the BIOS,
 210         * as trim_bios_range() will reserve the first page and isolate it away
 211         * from memory allocators anyway.
 212         */
 213        pf = _PAGE_RW;
 214        if (sev_active())
 215                pf |= _PAGE_ENC;
 216
 217        if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
 218                pr_err("Failed to create 1:1 mapping for the first page!\n");
 219                return 1;
 220        }
 221
 222        /*
 223         * When SEV-ES is active, the GHCB as set by the kernel will be used
 224         * by firmware. Create a 1:1 unencrypted mapping for each GHCB.
 225         */
 226        if (sev_es_efi_map_ghcbs(pgd)) {
 227                pr_err("Failed to create 1:1 mapping for the GHCBs!\n");
 228                return 1;
 229        }
 230
 231        /*
 232         * When making calls to the firmware everything needs to be 1:1
 233         * mapped and addressable with 32-bit pointers. Map the kernel
 234         * text and allocate a new stack because we can't rely on the
 235         * stack pointer being < 4GB.
 236         */
 237        if (!efi_is_mixed())
 238                return 0;
 239
 240        page = alloc_page(GFP_KERNEL|__GFP_DMA32);
 241        if (!page)
 242                panic("Unable to allocate EFI runtime stack < 4GB\n");
 243
 244        efi_scratch.phys_stack = virt_to_phys(page_address(page));
 245        efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */
 246
 247        npages = (_etext - _text) >> PAGE_SHIFT;
 248        text = __pa(_text);
 249        pfn = text >> PAGE_SHIFT;
 250
 251        pf = _PAGE_RW | _PAGE_ENC;
 252        if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, pf)) {
 253                pr_err("Failed to map kernel text 1:1\n");
 254                return 1;
 255        }
 256
 257        return 0;
 258}
 259
 260static void __init __map_region(efi_memory_desc_t *md, u64 va)
 261{
 262        unsigned long flags = _PAGE_RW;
 263        unsigned long pfn;
 264        pgd_t *pgd = efi_mm.pgd;
 265
 266        if (!(md->attribute & EFI_MEMORY_WB))
 267                flags |= _PAGE_PCD;
 268
 269        if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO)
 270                flags |= _PAGE_ENC;
 271
 272        pfn = md->phys_addr >> PAGE_SHIFT;
 273        if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
 274                pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
 275                           md->phys_addr, va);
 276}
 277
 278void __init efi_map_region(efi_memory_desc_t *md)
 279{
 280        unsigned long size = md->num_pages << PAGE_SHIFT;
 281        u64 pa = md->phys_addr;
 282
 283        /*
 284         * Make sure the 1:1 mappings are present as a catch-all for b0rked
 285         * firmware which doesn't update all internal pointers after switching
 286         * to virtual mode and would otherwise crap on us.
 287         */
 288        __map_region(md, md->phys_addr);
 289
 290        /*
 291         * Enforce the 1:1 mapping as the default virtual address when
 292         * booting in EFI mixed mode, because even though we may be
 293         * running a 64-bit kernel, the firmware may only be 32-bit.
 294         */
 295        if (efi_is_mixed()) {
 296                md->virt_addr = md->phys_addr;
 297                return;
 298        }
 299
 300        efi_va -= size;
 301
 302        /* Is PA 2M-aligned? */
 303        if (!(pa & (PMD_SIZE - 1))) {
 304                efi_va &= PMD_MASK;
 305        } else {
 306                u64 pa_offset = pa & (PMD_SIZE - 1);
 307                u64 prev_va = efi_va;
 308
 309                /* get us the same offset within this 2M page */
 310                efi_va = (efi_va & PMD_MASK) + pa_offset;
 311
 312                if (efi_va > prev_va)
 313                        efi_va -= PMD_SIZE;
 314        }
 315
 316        if (efi_va < EFI_VA_END) {
 317                pr_warn(FW_WARN "VA address range overflow!\n");
 318                return;
 319        }
 320
 321        /* Do the VA map */
 322        __map_region(md, efi_va);
 323        md->virt_addr = efi_va;
 324}
 325
 326/*
 327 * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
 328 * md->virt_addr is the original virtual address which had been mapped in kexec
 329 * 1st kernel.
 330 */
 331void __init efi_map_region_fixed(efi_memory_desc_t *md)
 332{
 333        __map_region(md, md->phys_addr);
 334        __map_region(md, md->virt_addr);
 335}
 336
 337void __init parse_efi_setup(u64 phys_addr, u32 data_len)
 338{
 339        efi_setup = phys_addr + sizeof(struct setup_data);
 340}
 341
 342static int __init efi_update_mappings(efi_memory_desc_t *md, unsigned long pf)
 343{
 344        unsigned long pfn;
 345        pgd_t *pgd = efi_mm.pgd;
 346        int err1, err2;
 347
 348        /* Update the 1:1 mapping */
 349        pfn = md->phys_addr >> PAGE_SHIFT;
 350        err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
 351        if (err1) {
 352                pr_err("Error while updating 1:1 mapping PA 0x%llx -> VA 0x%llx!\n",
 353                           md->phys_addr, md->virt_addr);
 354        }
 355
 356        err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
 357        if (err2) {
 358                pr_err("Error while updating VA mapping PA 0x%llx -> VA 0x%llx!\n",
 359                           md->phys_addr, md->virt_addr);
 360        }
 361
 362        return err1 || err2;
 363}
 364
 365static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md)
 366{
 367        unsigned long pf = 0;
 368
 369        if (md->attribute & EFI_MEMORY_XP)
 370                pf |= _PAGE_NX;
 371
 372        if (!(md->attribute & EFI_MEMORY_RO))
 373                pf |= _PAGE_RW;
 374
 375        if (sev_active())
 376                pf |= _PAGE_ENC;
 377
 378        return efi_update_mappings(md, pf);
 379}
 380
 381void __init efi_runtime_update_mappings(void)
 382{
 383        efi_memory_desc_t *md;
 384
 385        /*
 386         * Use the EFI Memory Attribute Table for mapping permissions if it
 387         * exists, since it is intended to supersede EFI_PROPERTIES_TABLE.
 388         */
 389        if (efi_enabled(EFI_MEM_ATTR)) {
 390                efi_memattr_apply_permissions(NULL, efi_update_mem_attr);
 391                return;
 392        }
 393
 394        /*
 395         * EFI_MEMORY_ATTRIBUTES_TABLE is intended to replace
 396         * EFI_PROPERTIES_TABLE. So, use EFI_PROPERTIES_TABLE to update
 397         * permissions only if EFI_MEMORY_ATTRIBUTES_TABLE is not
 398         * published by the firmware. Even if we find a buggy implementation of
 399         * EFI_MEMORY_ATTRIBUTES_TABLE, don't fall back to
 400         * EFI_PROPERTIES_TABLE, because of the same reason.
 401         */
 402
 403        if (!efi_enabled(EFI_NX_PE_DATA))
 404                return;
 405
 406        for_each_efi_memory_desc(md) {
 407                unsigned long pf = 0;
 408
 409                if (!(md->attribute & EFI_MEMORY_RUNTIME))
 410                        continue;
 411
 412                if (!(md->attribute & EFI_MEMORY_WB))
 413                        pf |= _PAGE_PCD;
 414
 415                if ((md->attribute & EFI_MEMORY_XP) ||
 416                        (md->type == EFI_RUNTIME_SERVICES_DATA))
 417                        pf |= _PAGE_NX;
 418
 419                if (!(md->attribute & EFI_MEMORY_RO) &&
 420                        (md->type != EFI_RUNTIME_SERVICES_CODE))
 421                        pf |= _PAGE_RW;
 422
 423                if (sev_active())
 424                        pf |= _PAGE_ENC;
 425
 426                efi_update_mappings(md, pf);
 427        }
 428}
 429
 430void __init efi_dump_pagetable(void)
 431{
 432#ifdef CONFIG_EFI_PGT_DUMP
 433        ptdump_walk_pgd_level(NULL, efi_mm.pgd);
 434#endif
 435}
 436
 437/*
 438 * Makes the calling thread switch to/from efi_mm context. Can be used
 439 * in a kernel thread and user context. Preemption needs to remain disabled
 440 * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm
 441 * can not change under us.
 442 * It should be ensured that there are no concurent calls to this function.
 443 */
 444void efi_switch_mm(struct mm_struct *mm)
 445{
 446        efi_scratch.prev_mm = current->active_mm;
 447        current->active_mm = mm;
 448        switch_mm(efi_scratch.prev_mm, mm, NULL);
 449}
 450
 451extern efi_status_t efi64_thunk(u32, ...);
 452
 453static DEFINE_SPINLOCK(efi_runtime_lock);
 454
 455/*
 456 * DS and ES contain user values.  We need to save them.
 457 * The 32-bit EFI code needs a valid DS, ES, and SS.  There's no
 458 * need to save the old SS: __KERNEL_DS is always acceptable.
 459 */
 460#define __efi_thunk(func, ...)                                          \
 461({                                                                      \
 462        efi_runtime_services_32_t *__rt;                                \
 463        unsigned short __ds, __es;                                      \
 464        efi_status_t ____s;                                             \
 465                                                                        \
 466        __rt = (void *)(unsigned long)efi.systab->mixed_mode.runtime;   \
 467                                                                        \
 468        savesegment(ds, __ds);                                          \
 469        savesegment(es, __es);                                          \
 470                                                                        \
 471        loadsegment(ss, __KERNEL_DS);                                   \
 472        loadsegment(ds, __KERNEL_DS);                                   \
 473        loadsegment(es, __KERNEL_DS);                                   \
 474                                                                        \
 475        ____s = efi64_thunk(__rt->func, __VA_ARGS__);                   \
 476                                                                        \
 477        loadsegment(ds, __ds);                                          \
 478        loadsegment(es, __es);                                          \
 479                                                                        \
 480        ____s ^= (____s & BIT(31)) | (____s & BIT_ULL(31)) << 32;       \
 481        ____s;                                                          \
 482})
 483
 484/*
 485 * Switch to the EFI page tables early so that we can access the 1:1
 486 * runtime services mappings which are not mapped in any other page
 487 * tables.
 488 *
 489 * Also, disable interrupts because the IDT points to 64-bit handlers,
 490 * which aren't going to function correctly when we switch to 32-bit.
 491 */
 492#define efi_thunk(func...)                                              \
 493({                                                                      \
 494        efi_status_t __s;                                               \
 495                                                                        \
 496        arch_efi_call_virt_setup();                                     \
 497                                                                        \
 498        __s = __efi_thunk(func);                                        \
 499                                                                        \
 500        arch_efi_call_virt_teardown();                                  \
 501                                                                        \
 502        __s;                                                            \
 503})
 504
 505static efi_status_t __init __no_sanitize_address
 506efi_thunk_set_virtual_address_map(unsigned long memory_map_size,
 507                                  unsigned long descriptor_size,
 508                                  u32 descriptor_version,
 509                                  efi_memory_desc_t *virtual_map)
 510{
 511        efi_status_t status;
 512        unsigned long flags;
 513
 514        efi_sync_low_kernel_mappings();
 515        local_irq_save(flags);
 516
 517        efi_switch_mm(&efi_mm);
 518
 519        status = __efi_thunk(set_virtual_address_map, memory_map_size,
 520                             descriptor_size, descriptor_version, virtual_map);
 521
 522        efi_switch_mm(efi_scratch.prev_mm);
 523        local_irq_restore(flags);
 524
 525        return status;
 526}
 527
 528static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 529{
 530        efi_status_t status;
 531        u32 phys_tm, phys_tc;
 532        unsigned long flags;
 533
 534        spin_lock(&rtc_lock);
 535        spin_lock_irqsave(&efi_runtime_lock, flags);
 536
 537        phys_tm = virt_to_phys_or_null(tm);
 538        phys_tc = virt_to_phys_or_null(tc);
 539
 540        status = efi_thunk(get_time, phys_tm, phys_tc);
 541
 542        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 543        spin_unlock(&rtc_lock);
 544
 545        return status;
 546}
 547
 548static efi_status_t efi_thunk_set_time(efi_time_t *tm)
 549{
 550        efi_status_t status;
 551        u32 phys_tm;
 552        unsigned long flags;
 553
 554        spin_lock(&rtc_lock);
 555        spin_lock_irqsave(&efi_runtime_lock, flags);
 556
 557        phys_tm = virt_to_phys_or_null(tm);
 558
 559        status = efi_thunk(set_time, phys_tm);
 560
 561        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 562        spin_unlock(&rtc_lock);
 563
 564        return status;
 565}
 566
 567static efi_status_t
 568efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
 569                          efi_time_t *tm)
 570{
 571        efi_status_t status;
 572        u32 phys_enabled, phys_pending, phys_tm;
 573        unsigned long flags;
 574
 575        spin_lock(&rtc_lock);
 576        spin_lock_irqsave(&efi_runtime_lock, flags);
 577
 578        phys_enabled = virt_to_phys_or_null(enabled);
 579        phys_pending = virt_to_phys_or_null(pending);
 580        phys_tm = virt_to_phys_or_null(tm);
 581
 582        status = efi_thunk(get_wakeup_time, phys_enabled,
 583                             phys_pending, phys_tm);
 584
 585        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 586        spin_unlock(&rtc_lock);
 587
 588        return status;
 589}
 590
 591static efi_status_t
 592efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 593{
 594        efi_status_t status;
 595        u32 phys_tm;
 596        unsigned long flags;
 597
 598        spin_lock(&rtc_lock);
 599        spin_lock_irqsave(&efi_runtime_lock, flags);
 600
 601        phys_tm = virt_to_phys_or_null(tm);
 602
 603        status = efi_thunk(set_wakeup_time, enabled, phys_tm);
 604
 605        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 606        spin_unlock(&rtc_lock);
 607
 608        return status;
 609}
 610
 611static unsigned long efi_name_size(efi_char16_t *name)
 612{
 613        return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
 614}
 615
 616static efi_status_t
 617efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
 618                       u32 *attr, unsigned long *data_size, void *data)
 619{
 620        efi_status_t status;
 621        u32 phys_name, phys_vendor, phys_attr;
 622        u32 phys_data_size, phys_data;
 623        unsigned long flags;
 624
 625        spin_lock_irqsave(&efi_runtime_lock, flags);
 626
 627        phys_data_size = virt_to_phys_or_null(data_size);
 628        phys_vendor = virt_to_phys_or_null(vendor);
 629        phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
 630        phys_attr = virt_to_phys_or_null(attr);
 631        phys_data = virt_to_phys_or_null_size(data, *data_size);
 632
 633        status = efi_thunk(get_variable, phys_name, phys_vendor,
 634                           phys_attr, phys_data_size, phys_data);
 635
 636        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 637
 638        return status;
 639}
 640
 641static efi_status_t
 642efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
 643                       u32 attr, unsigned long data_size, void *data)
 644{
 645        u32 phys_name, phys_vendor, phys_data;
 646        efi_status_t status;
 647        unsigned long flags;
 648
 649        spin_lock_irqsave(&efi_runtime_lock, flags);
 650
 651        phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
 652        phys_vendor = virt_to_phys_or_null(vendor);
 653        phys_data = virt_to_phys_or_null_size(data, data_size);
 654
 655        /* If data_size is > sizeof(u32) we've got problems */
 656        status = efi_thunk(set_variable, phys_name, phys_vendor,
 657                           attr, data_size, phys_data);
 658
 659        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 660
 661        return status;
 662}
 663
 664static efi_status_t
 665efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
 666                                   u32 attr, unsigned long data_size,
 667                                   void *data)
 668{
 669        u32 phys_name, phys_vendor, phys_data;
 670        efi_status_t status;
 671        unsigned long flags;
 672
 673        if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
 674                return EFI_NOT_READY;
 675
 676        phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
 677        phys_vendor = virt_to_phys_or_null(vendor);
 678        phys_data = virt_to_phys_or_null_size(data, data_size);
 679
 680        /* If data_size is > sizeof(u32) we've got problems */
 681        status = efi_thunk(set_variable, phys_name, phys_vendor,
 682                           attr, data_size, phys_data);
 683
 684        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 685
 686        return status;
 687}
 688
 689static efi_status_t
 690efi_thunk_get_next_variable(unsigned long *name_size,
 691                            efi_char16_t *name,
 692                            efi_guid_t *vendor)
 693{
 694        efi_status_t status;
 695        u32 phys_name_size, phys_name, phys_vendor;
 696        unsigned long flags;
 697
 698        spin_lock_irqsave(&efi_runtime_lock, flags);
 699
 700        phys_name_size = virt_to_phys_or_null(name_size);
 701        phys_vendor = virt_to_phys_or_null(vendor);
 702        phys_name = virt_to_phys_or_null_size(name, *name_size);
 703
 704        status = efi_thunk(get_next_variable, phys_name_size,
 705                           phys_name, phys_vendor);
 706
 707        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 708
 709        return status;
 710}
 711
 712static efi_status_t
 713efi_thunk_get_next_high_mono_count(u32 *count)
 714{
 715        efi_status_t status;
 716        u32 phys_count;
 717        unsigned long flags;
 718
 719        spin_lock_irqsave(&efi_runtime_lock, flags);
 720
 721        phys_count = virt_to_phys_or_null(count);
 722        status = efi_thunk(get_next_high_mono_count, phys_count);
 723
 724        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 725
 726        return status;
 727}
 728
 729static void
 730efi_thunk_reset_system(int reset_type, efi_status_t status,
 731                       unsigned long data_size, efi_char16_t *data)
 732{
 733        u32 phys_data;
 734        unsigned long flags;
 735
 736        spin_lock_irqsave(&efi_runtime_lock, flags);
 737
 738        phys_data = virt_to_phys_or_null_size(data, data_size);
 739
 740        efi_thunk(reset_system, reset_type, status, data_size, phys_data);
 741
 742        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 743}
 744
 745static efi_status_t
 746efi_thunk_update_capsule(efi_capsule_header_t **capsules,
 747                         unsigned long count, unsigned long sg_list)
 748{
 749        /*
 750         * To properly support this function we would need to repackage
 751         * 'capsules' because the firmware doesn't understand 64-bit
 752         * pointers.
 753         */
 754        return EFI_UNSUPPORTED;
 755}
 756
 757static efi_status_t
 758efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
 759                              u64 *remaining_space,
 760                              u64 *max_variable_size)
 761{
 762        efi_status_t status;
 763        u32 phys_storage, phys_remaining, phys_max;
 764        unsigned long flags;
 765
 766        if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
 767                return EFI_UNSUPPORTED;
 768
 769        spin_lock_irqsave(&efi_runtime_lock, flags);
 770
 771        phys_storage = virt_to_phys_or_null(storage_space);
 772        phys_remaining = virt_to_phys_or_null(remaining_space);
 773        phys_max = virt_to_phys_or_null(max_variable_size);
 774
 775        status = efi_thunk(query_variable_info, attr, phys_storage,
 776                           phys_remaining, phys_max);
 777
 778        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 779
 780        return status;
 781}
 782
 783static efi_status_t
 784efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
 785                                          u64 *remaining_space,
 786                                          u64 *max_variable_size)
 787{
 788        efi_status_t status;
 789        u32 phys_storage, phys_remaining, phys_max;
 790        unsigned long flags;
 791
 792        if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
 793                return EFI_UNSUPPORTED;
 794
 795        if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
 796                return EFI_NOT_READY;
 797
 798        phys_storage = virt_to_phys_or_null(storage_space);
 799        phys_remaining = virt_to_phys_or_null(remaining_space);
 800        phys_max = virt_to_phys_or_null(max_variable_size);
 801
 802        status = efi_thunk(query_variable_info, attr, phys_storage,
 803                           phys_remaining, phys_max);
 804
 805        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 806
 807        return status;
 808}
 809
 810static efi_status_t
 811efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
 812                             unsigned long count, u64 *max_size,
 813                             int *reset_type)
 814{
 815        /*
 816         * To properly support this function we would need to repackage
 817         * 'capsules' because the firmware doesn't understand 64-bit
 818         * pointers.
 819         */
 820        return EFI_UNSUPPORTED;
 821}
 822
 823void __init efi_thunk_runtime_setup(void)
 824{
 825        if (!IS_ENABLED(CONFIG_EFI_MIXED))
 826                return;
 827
 828        efi.get_time = efi_thunk_get_time;
 829        efi.set_time = efi_thunk_set_time;
 830        efi.get_wakeup_time = efi_thunk_get_wakeup_time;
 831        efi.set_wakeup_time = efi_thunk_set_wakeup_time;
 832        efi.get_variable = efi_thunk_get_variable;
 833        efi.get_next_variable = efi_thunk_get_next_variable;
 834        efi.set_variable = efi_thunk_set_variable;
 835        efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
 836        efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
 837        efi.reset_system = efi_thunk_reset_system;
 838        efi.query_variable_info = efi_thunk_query_variable_info;
 839        efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
 840        efi.update_capsule = efi_thunk_update_capsule;
 841        efi.query_capsule_caps = efi_thunk_query_capsule_caps;
 842}
 843
 844efi_status_t __init __no_sanitize_address
 845efi_set_virtual_address_map(unsigned long memory_map_size,
 846                            unsigned long descriptor_size,
 847                            u32 descriptor_version,
 848                            efi_memory_desc_t *virtual_map)
 849{
 850        efi_status_t status;
 851        unsigned long flags;
 852
 853        if (efi_is_mixed())
 854                return efi_thunk_set_virtual_address_map(memory_map_size,
 855                                                         descriptor_size,
 856                                                         descriptor_version,
 857                                                         virtual_map);
 858        efi_switch_mm(&efi_mm);
 859
 860        efi_fpu_begin();
 861
 862        /* Disable interrupts around EFI calls: */
 863        local_irq_save(flags);
 864        status = efi_call(efi.systab->runtime->set_virtual_address_map,
 865                          memory_map_size, descriptor_size,
 866                          descriptor_version, virtual_map);
 867        local_irq_restore(flags);
 868
 869        efi_fpu_end();
 870
 871        efi_switch_mm(efi_scratch.prev_mm);
 872
 873        return status;
 874}
 875