linux/arch/x86/platform/efi/efi_64.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * x86_64 specific EFI support functions
   4 * Based on Extensible Firmware Interface Specification version 1.0
   5 *
   6 * Copyright (C) 2005-2008 Intel Co.
   7 *      Fenghua Yu <fenghua.yu@intel.com>
   8 *      Bibo Mao <bibo.mao@intel.com>
   9 *      Chandramouli Narayanan <mouli@linux.intel.com>
  10 *      Huang Ying <ying.huang@intel.com>
  11 *
  12 * Code to convert EFI to E820 map has been implemented in elilo bootloader
  13 * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
  14 * is setup appropriately for EFI runtime code.
  15 * - mouli 06/14/2007.
  16 *
  17 */
  18
  19#define pr_fmt(fmt) "efi: " fmt
  20
  21#include <linux/kernel.h>
  22#include <linux/init.h>
  23#include <linux/mm.h>
  24#include <linux/types.h>
  25#include <linux/spinlock.h>
  26#include <linux/memblock.h>
  27#include <linux/ioport.h>
  28#include <linux/mc146818rtc.h>
  29#include <linux/efi.h>
  30#include <linux/export.h>
  31#include <linux/uaccess.h>
  32#include <linux/io.h>
  33#include <linux/reboot.h>
  34#include <linux/slab.h>
  35#include <linux/ucs2_string.h>
  36#include <linux/mem_encrypt.h>
  37#include <linux/sched/task.h>
  38
  39#include <asm/setup.h>
  40#include <asm/page.h>
  41#include <asm/e820/api.h>
  42#include <asm/pgtable.h>
  43#include <asm/tlbflush.h>
  44#include <asm/proto.h>
  45#include <asm/efi.h>
  46#include <asm/cacheflush.h>
  47#include <asm/fixmap.h>
  48#include <asm/realmode.h>
  49#include <asm/time.h>
  50#include <asm/pgalloc.h>
  51#include <asm/sev-es.h>
  52
  53/*
  54 * We allocate runtime services regions top-down, starting from -4G, i.e.
  55 * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
  56 */
  57static u64 efi_va = EFI_VA_START;
  58
  59struct efi_scratch efi_scratch;
  60
  61EXPORT_SYMBOL_GPL(efi_mm);
  62
  63/*
  64 * We need our own copy of the higher levels of the page tables
  65 * because we want to avoid inserting EFI region mappings (EFI_VA_END
  66 * to EFI_VA_START) into the standard kernel page tables. Everything
  67 * else can be shared, see efi_sync_low_kernel_mappings().
  68 *
  69 * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the
  70 * allocation.
  71 */
  72int __init efi_alloc_page_tables(void)
  73{
  74        pgd_t *pgd, *efi_pgd;
  75        p4d_t *p4d;
  76        pud_t *pud;
  77        gfp_t gfp_mask;
  78
  79        gfp_mask = GFP_KERNEL | __GFP_ZERO;
  80        efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
  81        if (!efi_pgd)
  82                return -ENOMEM;
  83
  84        pgd = efi_pgd + pgd_index(EFI_VA_END);
  85        p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
  86        if (!p4d) {
  87                free_page((unsigned long)efi_pgd);
  88                return -ENOMEM;
  89        }
  90
  91        pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
  92        if (!pud) {
  93                if (pgtable_l5_enabled())
  94                        free_page((unsigned long) pgd_page_vaddr(*pgd));
  95                free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
  96                return -ENOMEM;
  97        }
  98
  99        efi_mm.pgd = efi_pgd;
 100        mm_init_cpumask(&efi_mm);
 101        init_new_context(NULL, &efi_mm);
 102
 103        return 0;
 104}
 105
 106/*
 107 * Add low kernel mappings for passing arguments to EFI functions.
 108 */
 109void efi_sync_low_kernel_mappings(void)
 110{
 111        unsigned num_entries;
 112        pgd_t *pgd_k, *pgd_efi;
 113        p4d_t *p4d_k, *p4d_efi;
 114        pud_t *pud_k, *pud_efi;
 115        pgd_t *efi_pgd = efi_mm.pgd;
 116
 117        pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
 118        pgd_k = pgd_offset_k(PAGE_OFFSET);
 119
 120        num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
 121        memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
 122
 123        pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
 124        pgd_k = pgd_offset_k(EFI_VA_END);
 125        p4d_efi = p4d_offset(pgd_efi, 0);
 126        p4d_k = p4d_offset(pgd_k, 0);
 127
 128        num_entries = p4d_index(EFI_VA_END);
 129        memcpy(p4d_efi, p4d_k, sizeof(p4d_t) * num_entries);
 130
 131        /*
 132         * We share all the PUD entries apart from those that map the
 133         * EFI regions. Copy around them.
 134         */
 135        BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
 136        BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
 137
 138        p4d_efi = p4d_offset(pgd_efi, EFI_VA_END);
 139        p4d_k = p4d_offset(pgd_k, EFI_VA_END);
 140        pud_efi = pud_offset(p4d_efi, 0);
 141        pud_k = pud_offset(p4d_k, 0);
 142
 143        num_entries = pud_index(EFI_VA_END);
 144        memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
 145
 146        pud_efi = pud_offset(p4d_efi, EFI_VA_START);
 147        pud_k = pud_offset(p4d_k, EFI_VA_START);
 148
 149        num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
 150        memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
 151}
 152
 153/*
 154 * Wrapper for slow_virt_to_phys() that handles NULL addresses.
 155 */
 156static inline phys_addr_t
 157virt_to_phys_or_null_size(void *va, unsigned long size)
 158{
 159        bool bad_size;
 160
 161        if (!va)
 162                return 0;
 163
 164        if (virt_addr_valid(va))
 165                return virt_to_phys(va);
 166
 167        /*
 168         * A fully aligned variable on the stack is guaranteed not to
 169         * cross a page bounary. Try to catch strings on the stack by
 170         * checking that 'size' is a power of two.
 171         */
 172        bad_size = size > PAGE_SIZE || !is_power_of_2(size);
 173
 174        WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
 175
 176        return slow_virt_to_phys(va);
 177}
 178
 179#define virt_to_phys_or_null(addr)                              \
 180        virt_to_phys_or_null_size((addr), sizeof(*(addr)))
 181
 182int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 183{
 184        unsigned long pfn, text, pf;
 185        struct page *page;
 186        unsigned npages;
 187        pgd_t *pgd = efi_mm.pgd;
 188
 189        /*
 190         * It can happen that the physical address of new_memmap lands in memory
 191         * which is not mapped in the EFI page table. Therefore we need to go
 192         * and ident-map those pages containing the map before calling
 193         * phys_efi_set_virtual_address_map().
 194         */
 195        pfn = pa_memmap >> PAGE_SHIFT;
 196        pf = _PAGE_NX | _PAGE_RW | _PAGE_ENC;
 197        if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
 198                pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
 199                return 1;
 200        }
 201
 202        /*
 203         * Certain firmware versions are way too sentimential and still believe
 204         * they are exclusive and unquestionable owners of the first physical page,
 205         * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
 206         * (but then write-access it later during SetVirtualAddressMap()).
 207         *
 208         * Create a 1:1 mapping for this page, to avoid triple faults during early
 209         * boot with such firmware. We are free to hand this page to the BIOS,
 210         * as trim_bios_range() will reserve the first page and isolate it away
 211         * from memory allocators anyway.
 212         */
 213        pf = _PAGE_RW;
 214        if (sev_active())
 215                pf |= _PAGE_ENC;
 216
 217        if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
 218                pr_err("Failed to create 1:1 mapping for the first page!\n");
 219                return 1;
 220        }
 221
 222        /*
 223         * When SEV-ES is active, the GHCB as set by the kernel will be used
 224         * by firmware. Create a 1:1 unencrypted mapping for each GHCB.
 225         */
 226        if (sev_es_efi_map_ghcbs(pgd)) {
 227                pr_err("Failed to create 1:1 mapping for the GHCBs!\n");
 228                return 1;
 229        }
 230
 231        /*
 232         * When making calls to the firmware everything needs to be 1:1
 233         * mapped and addressable with 32-bit pointers. Map the kernel
 234         * text and allocate a new stack because we can't rely on the
 235         * stack pointer being < 4GB.
 236         */
 237        if (!efi_is_mixed())
 238                return 0;
 239
 240        page = alloc_page(GFP_KERNEL|__GFP_DMA32);
 241        if (!page)
 242                panic("Unable to allocate EFI runtime stack < 4GB\n");
 243
 244        efi_scratch.phys_stack = virt_to_phys(page_address(page));
 245        efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */
 246
 247        npages = (_etext - _text) >> PAGE_SHIFT;
 248        text = __pa(_text);
 249        pfn = text >> PAGE_SHIFT;
 250
 251        pf = _PAGE_RW | _PAGE_ENC;
 252        if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, pf)) {
 253                pr_err("Failed to map kernel text 1:1\n");
 254                return 1;
 255        }
 256
 257        return 0;
 258}
 259
 260static void __init __map_region(efi_memory_desc_t *md, u64 va)
 261{
 262        unsigned long flags = _PAGE_RW;
 263        unsigned long pfn;
 264        pgd_t *pgd = efi_mm.pgd;
 265
 266        if (!(md->attribute & EFI_MEMORY_WB))
 267                flags |= _PAGE_PCD;
 268
 269        if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO)
 270                flags |= _PAGE_ENC;
 271
 272        pfn = md->phys_addr >> PAGE_SHIFT;
 273        if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
 274                pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
 275                           md->phys_addr, va);
 276}
 277
 278void __init efi_map_region(efi_memory_desc_t *md)
 279{
 280        unsigned long size = md->num_pages << PAGE_SHIFT;
 281        u64 pa = md->phys_addr;
 282
 283        /*
 284         * Make sure the 1:1 mappings are present as a catch-all for b0rked
 285         * firmware which doesn't update all internal pointers after switching
 286         * to virtual mode and would otherwise crap on us.
 287         */
 288        __map_region(md, md->phys_addr);
 289
 290        /*
 291         * Enforce the 1:1 mapping as the default virtual address when
 292         * booting in EFI mixed mode, because even though we may be
 293         * running a 64-bit kernel, the firmware may only be 32-bit.
 294         */
 295        if (efi_is_mixed()) {
 296                md->virt_addr = md->phys_addr;
 297                return;
 298        }
 299
 300        efi_va -= size;
 301
 302        /* Is PA 2M-aligned? */
 303        if (!(pa & (PMD_SIZE - 1))) {
 304                efi_va &= PMD_MASK;
 305        } else {
 306                u64 pa_offset = pa & (PMD_SIZE - 1);
 307                u64 prev_va = efi_va;
 308
 309                /* get us the same offset within this 2M page */
 310                efi_va = (efi_va & PMD_MASK) + pa_offset;
 311
 312                if (efi_va > prev_va)
 313                        efi_va -= PMD_SIZE;
 314        }
 315
 316        if (efi_va < EFI_VA_END) {
 317                pr_warn(FW_WARN "VA address range overflow!\n");
 318                return;
 319        }
 320
 321        /* Do the VA map */
 322        __map_region(md, efi_va);
 323        md->virt_addr = efi_va;
 324}
 325
 326/*
 327 * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
 328 * md->virt_addr is the original virtual address which had been mapped in kexec
 329 * 1st kernel.
 330 */
 331void __init efi_map_region_fixed(efi_memory_desc_t *md)
 332{
 333        __map_region(md, md->phys_addr);
 334        __map_region(md, md->virt_addr);
 335}
 336
 337void __init parse_efi_setup(u64 phys_addr, u32 data_len)
 338{
 339        efi_setup = phys_addr + sizeof(struct setup_data);
 340}
 341
 342static int __init efi_update_mappings(efi_memory_desc_t *md, unsigned long pf)
 343{
 344        unsigned long pfn;
 345        pgd_t *pgd = efi_mm.pgd;
 346        int err1, err2;
 347
 348        /* Update the 1:1 mapping */
 349        pfn = md->phys_addr >> PAGE_SHIFT;
 350        err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
 351        if (err1) {
 352                pr_err("Error while updating 1:1 mapping PA 0x%llx -> VA 0x%llx!\n",
 353                           md->phys_addr, md->virt_addr);
 354        }
 355
 356        err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
 357        if (err2) {
 358                pr_err("Error while updating VA mapping PA 0x%llx -> VA 0x%llx!\n",
 359                           md->phys_addr, md->virt_addr);
 360        }
 361
 362        return err1 || err2;
 363}
 364
 365static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md)
 366{
 367        unsigned long pf = 0;
 368
 369        if (md->attribute & EFI_MEMORY_XP)
 370                pf |= _PAGE_NX;
 371
 372        if (!(md->attribute & EFI_MEMORY_RO))
 373                pf |= _PAGE_RW;
 374
 375        if (sev_active())
 376                pf |= _PAGE_ENC;
 377
 378        return efi_update_mappings(md, pf);
 379}
 380
 381void __init efi_runtime_update_mappings(void)
 382{
 383        efi_memory_desc_t *md;
 384
 385        /*
 386         * Use the EFI Memory Attribute Table for mapping permissions if it
 387         * exists, since it is intended to supersede EFI_PROPERTIES_TABLE.
 388         */
 389        if (efi_enabled(EFI_MEM_ATTR)) {
 390                efi_memattr_apply_permissions(NULL, efi_update_mem_attr);
 391                return;
 392        }
 393
 394        /*
 395         * EFI_MEMORY_ATTRIBUTES_TABLE is intended to replace
 396         * EFI_PROPERTIES_TABLE. So, use EFI_PROPERTIES_TABLE to update
 397         * permissions only if EFI_MEMORY_ATTRIBUTES_TABLE is not
 398         * published by the firmware. Even if we find a buggy implementation of
 399         * EFI_MEMORY_ATTRIBUTES_TABLE, don't fall back to
 400         * EFI_PROPERTIES_TABLE, because of the same reason.
 401         */
 402
 403        if (!efi_enabled(EFI_NX_PE_DATA))
 404                return;
 405
 406        for_each_efi_memory_desc(md) {
 407                unsigned long pf = 0;
 408
 409                if (!(md->attribute & EFI_MEMORY_RUNTIME))
 410                        continue;
 411
 412                if (!(md->attribute & EFI_MEMORY_WB))
 413                        pf |= _PAGE_PCD;
 414
 415                if ((md->attribute & EFI_MEMORY_XP) ||
 416                        (md->type == EFI_RUNTIME_SERVICES_DATA))
 417                        pf |= _PAGE_NX;
 418
 419                if (!(md->attribute & EFI_MEMORY_RO) &&
 420                        (md->type != EFI_RUNTIME_SERVICES_CODE))
 421                        pf |= _PAGE_RW;
 422
 423                if (sev_active())
 424                        pf |= _PAGE_ENC;
 425
 426                efi_update_mappings(md, pf);
 427        }
 428}
 429
 430void __init efi_dump_pagetable(void)
 431{
 432#ifdef CONFIG_EFI_PGT_DUMP
 433        ptdump_walk_pgd_level(NULL, efi_mm.pgd);
 434#endif
 435}
 436
 437/*
 438 * Makes the calling thread switch to/from efi_mm context. Can be used
 439 * for SetVirtualAddressMap() i.e. current->active_mm == init_mm as well
 440 * as during efi runtime calls i.e current->active_mm == current_mm.
 441 * We are not mm_dropping()/mm_grabbing() any mm, because we are not
 442 * losing/creating any references.
 443 */
 444void efi_switch_mm(struct mm_struct *mm)
 445{
 446        task_lock(current);
 447        efi_scratch.prev_mm = current->active_mm;
 448        current->active_mm = mm;
 449        switch_mm(efi_scratch.prev_mm, mm, NULL);
 450        task_unlock(current);
 451}
 452
 453extern efi_status_t efi64_thunk(u32, ...);
 454
 455static DEFINE_SPINLOCK(efi_runtime_lock);
 456
 457/*
 458 * DS and ES contain user values.  We need to save them.
 459 * The 32-bit EFI code needs a valid DS, ES, and SS.  There's no
 460 * need to save the old SS: __KERNEL_DS is always acceptable.
 461 */
 462#define __efi_thunk(func, ...)                                          \
 463({                                                                      \
 464        efi_runtime_services_32_t *__rt;                                \
 465        unsigned short __ds, __es;                                      \
 466        efi_status_t ____s;                                             \
 467                                                                        \
 468        __rt = (void *)(unsigned long)efi.systab->mixed_mode.runtime;   \
 469                                                                        \
 470        savesegment(ds, __ds);                                          \
 471        savesegment(es, __es);                                          \
 472                                                                        \
 473        loadsegment(ss, __KERNEL_DS);                                   \
 474        loadsegment(ds, __KERNEL_DS);                                   \
 475        loadsegment(es, __KERNEL_DS);                                   \
 476                                                                        \
 477        ____s = efi64_thunk(__rt->func, __VA_ARGS__);                   \
 478                                                                        \
 479        loadsegment(ds, __ds);                                          \
 480        loadsegment(es, __es);                                          \
 481                                                                        \
 482        ____s ^= (____s & BIT(31)) | (____s & BIT_ULL(31)) << 32;       \
 483        ____s;                                                          \
 484})
 485
 486/*
 487 * Switch to the EFI page tables early so that we can access the 1:1
 488 * runtime services mappings which are not mapped in any other page
 489 * tables.
 490 *
 491 * Also, disable interrupts because the IDT points to 64-bit handlers,
 492 * which aren't going to function correctly when we switch to 32-bit.
 493 */
 494#define efi_thunk(func...)                                              \
 495({                                                                      \
 496        efi_status_t __s;                                               \
 497                                                                        \
 498        arch_efi_call_virt_setup();                                     \
 499                                                                        \
 500        __s = __efi_thunk(func);                                        \
 501                                                                        \
 502        arch_efi_call_virt_teardown();                                  \
 503                                                                        \
 504        __s;                                                            \
 505})
 506
 507static efi_status_t __init __no_sanitize_address
 508efi_thunk_set_virtual_address_map(unsigned long memory_map_size,
 509                                  unsigned long descriptor_size,
 510                                  u32 descriptor_version,
 511                                  efi_memory_desc_t *virtual_map)
 512{
 513        efi_status_t status;
 514        unsigned long flags;
 515
 516        efi_sync_low_kernel_mappings();
 517        local_irq_save(flags);
 518
 519        efi_switch_mm(&efi_mm);
 520
 521        status = __efi_thunk(set_virtual_address_map, memory_map_size,
 522                             descriptor_size, descriptor_version, virtual_map);
 523
 524        efi_switch_mm(efi_scratch.prev_mm);
 525        local_irq_restore(flags);
 526
 527        return status;
 528}
 529
 530static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 531{
 532        efi_status_t status;
 533        u32 phys_tm, phys_tc;
 534        unsigned long flags;
 535
 536        spin_lock(&rtc_lock);
 537        spin_lock_irqsave(&efi_runtime_lock, flags);
 538
 539        phys_tm = virt_to_phys_or_null(tm);
 540        phys_tc = virt_to_phys_or_null(tc);
 541
 542        status = efi_thunk(get_time, phys_tm, phys_tc);
 543
 544        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 545        spin_unlock(&rtc_lock);
 546
 547        return status;
 548}
 549
 550static efi_status_t efi_thunk_set_time(efi_time_t *tm)
 551{
 552        efi_status_t status;
 553        u32 phys_tm;
 554        unsigned long flags;
 555
 556        spin_lock(&rtc_lock);
 557        spin_lock_irqsave(&efi_runtime_lock, flags);
 558
 559        phys_tm = virt_to_phys_or_null(tm);
 560
 561        status = efi_thunk(set_time, phys_tm);
 562
 563        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 564        spin_unlock(&rtc_lock);
 565
 566        return status;
 567}
 568
 569static efi_status_t
 570efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
 571                          efi_time_t *tm)
 572{
 573        efi_status_t status;
 574        u32 phys_enabled, phys_pending, phys_tm;
 575        unsigned long flags;
 576
 577        spin_lock(&rtc_lock);
 578        spin_lock_irqsave(&efi_runtime_lock, flags);
 579
 580        phys_enabled = virt_to_phys_or_null(enabled);
 581        phys_pending = virt_to_phys_or_null(pending);
 582        phys_tm = virt_to_phys_or_null(tm);
 583
 584        status = efi_thunk(get_wakeup_time, phys_enabled,
 585                             phys_pending, phys_tm);
 586
 587        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 588        spin_unlock(&rtc_lock);
 589
 590        return status;
 591}
 592
 593static efi_status_t
 594efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 595{
 596        efi_status_t status;
 597        u32 phys_tm;
 598        unsigned long flags;
 599
 600        spin_lock(&rtc_lock);
 601        spin_lock_irqsave(&efi_runtime_lock, flags);
 602
 603        phys_tm = virt_to_phys_or_null(tm);
 604
 605        status = efi_thunk(set_wakeup_time, enabled, phys_tm);
 606
 607        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 608        spin_unlock(&rtc_lock);
 609
 610        return status;
 611}
 612
 613static unsigned long efi_name_size(efi_char16_t *name)
 614{
 615        return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
 616}
 617
 618static efi_status_t
 619efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
 620                       u32 *attr, unsigned long *data_size, void *data)
 621{
 622        efi_status_t status;
 623        u32 phys_name, phys_vendor, phys_attr;
 624        u32 phys_data_size, phys_data;
 625        unsigned long flags;
 626
 627        spin_lock_irqsave(&efi_runtime_lock, flags);
 628
 629        phys_data_size = virt_to_phys_or_null(data_size);
 630        phys_vendor = virt_to_phys_or_null(vendor);
 631        phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
 632        phys_attr = virt_to_phys_or_null(attr);
 633        phys_data = virt_to_phys_or_null_size(data, *data_size);
 634
 635        status = efi_thunk(get_variable, phys_name, phys_vendor,
 636                           phys_attr, phys_data_size, phys_data);
 637
 638        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 639
 640        return status;
 641}
 642
 643static efi_status_t
 644efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
 645                       u32 attr, unsigned long data_size, void *data)
 646{
 647        u32 phys_name, phys_vendor, phys_data;
 648        efi_status_t status;
 649        unsigned long flags;
 650
 651        spin_lock_irqsave(&efi_runtime_lock, flags);
 652
 653        phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
 654        phys_vendor = virt_to_phys_or_null(vendor);
 655        phys_data = virt_to_phys_or_null_size(data, data_size);
 656
 657        /* If data_size is > sizeof(u32) we've got problems */
 658        status = efi_thunk(set_variable, phys_name, phys_vendor,
 659                           attr, data_size, phys_data);
 660
 661        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 662
 663        return status;
 664}
 665
 666static efi_status_t
 667efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
 668                                   u32 attr, unsigned long data_size,
 669                                   void *data)
 670{
 671        u32 phys_name, phys_vendor, phys_data;
 672        efi_status_t status;
 673        unsigned long flags;
 674
 675        if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
 676                return EFI_NOT_READY;
 677
 678        phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
 679        phys_vendor = virt_to_phys_or_null(vendor);
 680        phys_data = virt_to_phys_or_null_size(data, data_size);
 681
 682        /* If data_size is > sizeof(u32) we've got problems */
 683        status = efi_thunk(set_variable, phys_name, phys_vendor,
 684                           attr, data_size, phys_data);
 685
 686        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 687
 688        return status;
 689}
 690
 691static efi_status_t
 692efi_thunk_get_next_variable(unsigned long *name_size,
 693                            efi_char16_t *name,
 694                            efi_guid_t *vendor)
 695{
 696        efi_status_t status;
 697        u32 phys_name_size, phys_name, phys_vendor;
 698        unsigned long flags;
 699
 700        spin_lock_irqsave(&efi_runtime_lock, flags);
 701
 702        phys_name_size = virt_to_phys_or_null(name_size);
 703        phys_vendor = virt_to_phys_or_null(vendor);
 704        phys_name = virt_to_phys_or_null_size(name, *name_size);
 705
 706        status = efi_thunk(get_next_variable, phys_name_size,
 707                           phys_name, phys_vendor);
 708
 709        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 710
 711        return status;
 712}
 713
 714static efi_status_t
 715efi_thunk_get_next_high_mono_count(u32 *count)
 716{
 717        efi_status_t status;
 718        u32 phys_count;
 719        unsigned long flags;
 720
 721        spin_lock_irqsave(&efi_runtime_lock, flags);
 722
 723        phys_count = virt_to_phys_or_null(count);
 724        status = efi_thunk(get_next_high_mono_count, phys_count);
 725
 726        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 727
 728        return status;
 729}
 730
 731static void
 732efi_thunk_reset_system(int reset_type, efi_status_t status,
 733                       unsigned long data_size, efi_char16_t *data)
 734{
 735        u32 phys_data;
 736        unsigned long flags;
 737
 738        spin_lock_irqsave(&efi_runtime_lock, flags);
 739
 740        phys_data = virt_to_phys_or_null_size(data, data_size);
 741
 742        efi_thunk(reset_system, reset_type, status, data_size, phys_data);
 743
 744        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 745}
 746
 747static efi_status_t
 748efi_thunk_update_capsule(efi_capsule_header_t **capsules,
 749                         unsigned long count, unsigned long sg_list)
 750{
 751        /*
 752         * To properly support this function we would need to repackage
 753         * 'capsules' because the firmware doesn't understand 64-bit
 754         * pointers.
 755         */
 756        return EFI_UNSUPPORTED;
 757}
 758
 759static efi_status_t
 760efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
 761                              u64 *remaining_space,
 762                              u64 *max_variable_size)
 763{
 764        efi_status_t status;
 765        u32 phys_storage, phys_remaining, phys_max;
 766        unsigned long flags;
 767
 768        if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
 769                return EFI_UNSUPPORTED;
 770
 771        spin_lock_irqsave(&efi_runtime_lock, flags);
 772
 773        phys_storage = virt_to_phys_or_null(storage_space);
 774        phys_remaining = virt_to_phys_or_null(remaining_space);
 775        phys_max = virt_to_phys_or_null(max_variable_size);
 776
 777        status = efi_thunk(query_variable_info, attr, phys_storage,
 778                           phys_remaining, phys_max);
 779
 780        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 781
 782        return status;
 783}
 784
 785static efi_status_t
 786efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
 787                                          u64 *remaining_space,
 788                                          u64 *max_variable_size)
 789{
 790        efi_status_t status;
 791        u32 phys_storage, phys_remaining, phys_max;
 792        unsigned long flags;
 793
 794        if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
 795                return EFI_UNSUPPORTED;
 796
 797        if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
 798                return EFI_NOT_READY;
 799
 800        phys_storage = virt_to_phys_or_null(storage_space);
 801        phys_remaining = virt_to_phys_or_null(remaining_space);
 802        phys_max = virt_to_phys_or_null(max_variable_size);
 803
 804        status = efi_thunk(query_variable_info, attr, phys_storage,
 805                           phys_remaining, phys_max);
 806
 807        spin_unlock_irqrestore(&efi_runtime_lock, flags);
 808
 809        return status;
 810}
 811
 812static efi_status_t
 813efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
 814                             unsigned long count, u64 *max_size,
 815                             int *reset_type)
 816{
 817        /*
 818         * To properly support this function we would need to repackage
 819         * 'capsules' because the firmware doesn't understand 64-bit
 820         * pointers.
 821         */
 822        return EFI_UNSUPPORTED;
 823}
 824
 825void __init efi_thunk_runtime_setup(void)
 826{
 827        if (!IS_ENABLED(CONFIG_EFI_MIXED))
 828                return;
 829
 830        efi.get_time = efi_thunk_get_time;
 831        efi.set_time = efi_thunk_set_time;
 832        efi.get_wakeup_time = efi_thunk_get_wakeup_time;
 833        efi.set_wakeup_time = efi_thunk_set_wakeup_time;
 834        efi.get_variable = efi_thunk_get_variable;
 835        efi.get_next_variable = efi_thunk_get_next_variable;
 836        efi.set_variable = efi_thunk_set_variable;
 837        efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
 838        efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
 839        efi.reset_system = efi_thunk_reset_system;
 840        efi.query_variable_info = efi_thunk_query_variable_info;
 841        efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
 842        efi.update_capsule = efi_thunk_update_capsule;
 843        efi.query_capsule_caps = efi_thunk_query_capsule_caps;
 844}
 845
 846efi_status_t __init __no_sanitize_address
 847efi_set_virtual_address_map(unsigned long memory_map_size,
 848                            unsigned long descriptor_size,
 849                            u32 descriptor_version,
 850                            efi_memory_desc_t *virtual_map)
 851{
 852        efi_status_t status;
 853        unsigned long flags;
 854
 855        if (efi_is_mixed())
 856                return efi_thunk_set_virtual_address_map(memory_map_size,
 857                                                         descriptor_size,
 858                                                         descriptor_version,
 859                                                         virtual_map);
 860        efi_switch_mm(&efi_mm);
 861
 862        efi_fpu_begin();
 863
 864        /* Disable interrupts around EFI calls: */
 865        local_irq_save(flags);
 866        status = efi_call(efi.systab->runtime->set_virtual_address_map,
 867                          memory_map_size, descriptor_size,
 868                          descriptor_version, virtual_map);
 869        local_irq_restore(flags);
 870
 871        efi_fpu_end();
 872
 873        efi_switch_mm(efi_scratch.prev_mm);
 874
 875        return status;
 876}
 877