linux/arch/x86/include/asm/efi.h
<<
>>
Prefs
   1#ifndef _ASM_X86_EFI_H
   2#define _ASM_X86_EFI_H
   3
   4#include <asm/fpu/api.h>
   5#include <asm/pgtable.h>
   6#include <asm/tlb.h>
   7
   8/*
   9 * We map the EFI regions needed for runtime services non-contiguously,
  10 * with preserved alignment on virtual addresses starting from -4G down
  11 * for a total max space of 64G. This way, we provide for stable runtime
  12 * services addresses across kernels so that a kexec'd kernel can still
  13 * use them.
  14 *
  15 * This is the main reason why we're doing stable VA mappings for RT
  16 * services.
  17 *
  18 * This flag is used in conjuction with a chicken bit called
  19 * "efi=old_map" which can be used as a fallback to the old runtime
  20 * services mapping method in case there's some b0rkage with a
  21 * particular EFI implementation (haha, it is hard to hold up the
  22 * sarcasm here...).
  23 */
  24#define EFI_OLD_MEMMAP          EFI_ARCH_1
  25
  26#define EFI32_LOADER_SIGNATURE  "EL32"
  27#define EFI64_LOADER_SIGNATURE  "EL64"
  28
  29#define MAX_CMDLINE_ADDRESS     UINT_MAX
  30
  31#ifdef CONFIG_X86_32
  32
  33
  34extern unsigned long asmlinkage efi_call_phys(void *, ...);
  35
  36/*
  37 * Wrap all the virtual calls in a way that forces the parameters on the stack.
  38 */
  39
  40/* Use this macro if your virtual returns a non-void value */
  41#define efi_call_virt(f, args...) \
  42({                                                                      \
  43        efi_status_t __s;                                               \
  44        kernel_fpu_begin();                                             \
  45        __s = ((efi_##f##_t __attribute__((regparm(0)))*)               \
  46                efi.systab->runtime->f)(args);                          \
  47        kernel_fpu_end();                                               \
  48        __s;                                                            \
  49})
  50
  51/* Use this macro if your virtual call does not return any value */
  52#define __efi_call_virt(f, args...) \
  53({                                                                      \
  54        kernel_fpu_begin();                                             \
  55        ((efi_##f##_t __attribute__((regparm(0)))*)                     \
  56                efi.systab->runtime->f)(args);                          \
  57        kernel_fpu_end();                                               \
  58})
  59
  60#define efi_ioremap(addr, size, type, attr)     ioremap_cache(addr, size)
  61
  62#else /* !CONFIG_X86_32 */
  63
  64#define EFI_LOADER_SIGNATURE    "EL64"
  65
  66extern u64 asmlinkage efi_call(void *fp, ...);
  67
  68#define efi_call_phys(f, args...)               efi_call((f), args)
  69
  70/*
  71 * Scratch space used for switching the pagetable in the EFI stub
  72 */
  73struct efi_scratch {
  74        u64     r15;
  75        u64     prev_cr3;
  76        pgd_t   *efi_pgt;
  77        bool    use_pgd;
  78        u64     phys_stack;
  79} __packed;
  80
  81#define efi_call_virt(f, ...)                                           \
  82({                                                                      \
  83        efi_status_t __s;                                               \
  84                                                                        \
  85        efi_sync_low_kernel_mappings();                                 \
  86        preempt_disable();                                              \
  87        __kernel_fpu_begin();                                           \
  88                                                                        \
  89        if (efi_scratch.use_pgd) {                                      \
  90                efi_scratch.prev_cr3 = read_cr3();                      \
  91                write_cr3((unsigned long)efi_scratch.efi_pgt);          \
  92                __flush_tlb_all();                                      \
  93        }                                                               \
  94                                                                        \
  95        __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__);    \
  96                                                                        \
  97        if (efi_scratch.use_pgd) {                                      \
  98                write_cr3(efi_scratch.prev_cr3);                        \
  99                __flush_tlb_all();                                      \
 100        }                                                               \
 101                                                                        \
 102        __kernel_fpu_end();                                             \
 103        preempt_enable();                                               \
 104        __s;                                                            \
 105})
 106
 107/*
 108 * All X86_64 virt calls return non-void values. Thus, use non-void call for
 109 * virt calls that would be void on X86_32.
 110 */
 111#define __efi_call_virt(f, args...) efi_call_virt(f, args)
 112
 113extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
 114                                        u32 type, u64 attribute);
 115
 116#ifdef CONFIG_KASAN
 117/*
 118 * CONFIG_KASAN may redefine memset to __memset.  __memset function is present
 119 * only in kernel binary.  Since the EFI stub linked into a separate binary it
 120 * doesn't have __memset().  So we should use standard memset from
 121 * arch/x86/boot/compressed/string.c.  The same applies to memcpy and memmove.
 122 */
 123#undef memcpy
 124#undef memset
 125#undef memmove
 126#endif
 127
 128#endif /* CONFIG_X86_32 */
 129
 130extern struct efi_scratch efi_scratch;
 131extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
 132extern int __init efi_memblock_x86_reserve_range(void);
 133extern pgd_t * __init efi_call_phys_prolog(void);
 134extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
 135extern void __init efi_print_memmap(void);
 136extern void __init efi_unmap_memmap(void);
 137extern void __init efi_memory_uc(u64 addr, unsigned long size);
 138extern void __init efi_map_region(efi_memory_desc_t *md);
 139extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
 140extern void efi_sync_low_kernel_mappings(void);
 141extern int __init efi_alloc_page_tables(void);
 142extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
 143extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
 144extern void __init old_map_region(efi_memory_desc_t *md);
 145extern void __init runtime_code_page_mkexec(void);
 146extern void __init efi_runtime_update_mappings(void);
 147extern void __init efi_dump_pagetable(void);
 148extern void __init efi_apply_memmap_quirks(void);
 149extern int __init efi_reuse_config(u64 tables, int nr_tables);
 150extern void efi_delete_dummy_variable(void);
 151
 152struct efi_setup_data {
 153        u64 fw_vendor;
 154        u64 runtime;
 155        u64 tables;
 156        u64 smbios;
 157        u64 reserved[8];
 158};
 159
 160extern u64 efi_setup;
 161
 162#ifdef CONFIG_EFI
 163
 164static inline bool efi_is_native(void)
 165{
 166        return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
 167}
 168
 169static inline bool efi_runtime_supported(void)
 170{
 171        if (efi_is_native())
 172                return true;
 173
 174        if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP))
 175                return true;
 176
 177        return false;
 178}
 179
 180extern struct console early_efi_console;
 181extern void parse_efi_setup(u64 phys_addr, u32 data_len);
 182
 183#ifdef CONFIG_EFI_MIXED
 184extern void efi_thunk_runtime_setup(void);
 185extern efi_status_t efi_thunk_set_virtual_address_map(
 186        void *phys_set_virtual_address_map,
 187        unsigned long memory_map_size,
 188        unsigned long descriptor_size,
 189        u32 descriptor_version,
 190        efi_memory_desc_t *virtual_map);
 191#else
 192static inline void efi_thunk_runtime_setup(void) {}
 193static inline efi_status_t efi_thunk_set_virtual_address_map(
 194        void *phys_set_virtual_address_map,
 195        unsigned long memory_map_size,
 196        unsigned long descriptor_size,
 197        u32 descriptor_version,
 198        efi_memory_desc_t *virtual_map)
 199{
 200        return EFI_SUCCESS;
 201}
 202#endif /* CONFIG_EFI_MIXED */
 203
 204
 205/* arch specific definitions used by the stub code */
 206
 207struct efi_config {
 208        u64 image_handle;
 209        u64 table;
 210        u64 allocate_pool;
 211        u64 allocate_pages;
 212        u64 get_memory_map;
 213        u64 free_pool;
 214        u64 free_pages;
 215        u64 locate_handle;
 216        u64 handle_protocol;
 217        u64 exit_boot_services;
 218        u64 text_output;
 219        efi_status_t (*call)(unsigned long, ...);
 220        bool is64;
 221} __packed;
 222
 223__pure const struct efi_config *__efi_early(void);
 224
 225#define efi_call_early(f, ...)                                          \
 226        __efi_early()->call(__efi_early()->f, __VA_ARGS__);
 227
 228extern bool efi_reboot_required(void);
 229
 230#else
 231static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
 232static inline bool efi_reboot_required(void)
 233{
 234        return false;
 235}
 236#endif /* CONFIG_EFI */
 237
 238#endif /* _ASM_X86_EFI_H */
 239