linux/arch/x86/include/asm/kexec.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_X86_KEXEC_H
   3#define _ASM_X86_KEXEC_H
   4
   5#ifdef CONFIG_X86_32
   6# define PA_CONTROL_PAGE        0
   7# define VA_CONTROL_PAGE        1
   8# define PA_PGD                 2
   9# define PA_SWAP_PAGE           3
  10# define PAGES_NR               4
  11#else
  12# define PA_CONTROL_PAGE        0
  13# define VA_CONTROL_PAGE        1
  14# define PA_TABLE_PAGE          2
  15# define PA_SWAP_PAGE           3
  16# define PAGES_NR               4
  17#endif
  18
  19# define KEXEC_CONTROL_CODE_MAX_SIZE    2048
  20
  21#ifndef __ASSEMBLY__
  22
  23#include <linux/string.h>
  24#include <linux/kernel.h>
  25
  26#include <asm/page.h>
  27#include <asm/ptrace.h>
  28#include <asm/bootparam.h>
  29
  30struct kimage;
  31
  32/*
  33 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
  34 * I.e. Maximum page that is mapped directly into kernel memory,
  35 * and kmap is not required.
  36 *
  37 * So far x86_64 is limited to 40 physical address bits.
  38 */
  39#ifdef CONFIG_X86_32
  40/* Maximum physical address we can use pages from */
  41# define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
  42/* Maximum address we can reach in physical address mode */
  43# define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
  44/* Maximum address we can use for the control code buffer */
  45# define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
  46
  47# define KEXEC_CONTROL_PAGE_SIZE        4096
  48
  49/* The native architecture */
  50# define KEXEC_ARCH KEXEC_ARCH_386
  51
  52/* We can also handle crash dumps from 64 bit kernel. */
  53# define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
  54#else
  55/* Maximum physical address we can use pages from */
  56# define KEXEC_SOURCE_MEMORY_LIMIT      (MAXMEM-1)
  57/* Maximum address we can reach in physical address mode */
  58# define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1)
  59/* Maximum address we can use for the control pages */
  60# define KEXEC_CONTROL_MEMORY_LIMIT     (MAXMEM-1)
  61
  62/* Allocate one page for the pdp and the second for the code */
  63# define KEXEC_CONTROL_PAGE_SIZE  (4096UL + 4096UL)
  64
  65/* The native architecture */
  66# define KEXEC_ARCH KEXEC_ARCH_X86_64
  67#endif
  68
  69/*
  70 * This function is responsible for capturing register states if coming
  71 * via panic otherwise just fix up the ss and sp if coming via kernel
  72 * mode exception.
  73 */
  74static inline void crash_setup_regs(struct pt_regs *newregs,
  75                                    struct pt_regs *oldregs)
  76{
  77        if (oldregs) {
  78                memcpy(newregs, oldregs, sizeof(*newregs));
  79        } else {
  80#ifdef CONFIG_X86_32
  81                asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
  82                asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
  83                asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
  84                asm volatile("movl %%esi,%0" : "=m"(newregs->si));
  85                asm volatile("movl %%edi,%0" : "=m"(newregs->di));
  86                asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
  87                asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
  88                asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
  89                asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
  90                asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
  91                asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
  92                asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
  93                asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
  94#else
  95                asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
  96                asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
  97                asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
  98                asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
  99                asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
 100                asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
 101                asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
 102                asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
 103                asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
 104                asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
 105                asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
 106                asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
 107                asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
 108                asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
 109                asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
 110                asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
 111                asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
 112                asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
 113                asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
 114#endif
 115                newregs->ip = _THIS_IP_;
 116        }
 117}
 118
 119#ifdef CONFIG_X86_32
 120asmlinkage unsigned long
 121relocate_kernel(unsigned long indirection_page,
 122                unsigned long control_page,
 123                unsigned long start_address,
 124                unsigned int has_pae,
 125                unsigned int preserve_context);
 126#else
 127unsigned long
 128relocate_kernel(unsigned long indirection_page,
 129                unsigned long page_list,
 130                unsigned long start_address,
 131                unsigned int preserve_context,
 132                unsigned int sme_active);
 133#endif
 134
 135#define ARCH_HAS_KIMAGE_ARCH
 136
 137#ifdef CONFIG_X86_32
 138struct kimage_arch {
 139        pgd_t *pgd;
 140#ifdef CONFIG_X86_PAE
 141        pmd_t *pmd0;
 142        pmd_t *pmd1;
 143#endif
 144        pte_t *pte0;
 145        pte_t *pte1;
 146};
 147#else
 148struct kimage_arch {
 149        p4d_t *p4d;
 150        pud_t *pud;
 151        pmd_t *pmd;
 152        pte_t *pte;
 153
 154        /* Core ELF header buffer */
 155        void *elf_headers;
 156        unsigned long elf_headers_sz;
 157        unsigned long elf_load_addr;
 158};
 159#endif /* CONFIG_X86_32 */
 160
 161#ifdef CONFIG_X86_64
 162/*
 163 * Number of elements and order of elements in this structure should match
 164 * with the ones in arch/x86/purgatory/entry64.S. If you make a change here
 165 * make an appropriate change in purgatory too.
 166 */
 167struct kexec_entry64_regs {
 168        uint64_t rax;
 169        uint64_t rcx;
 170        uint64_t rdx;
 171        uint64_t rbx;
 172        uint64_t rsp;
 173        uint64_t rbp;
 174        uint64_t rsi;
 175        uint64_t rdi;
 176        uint64_t r8;
 177        uint64_t r9;
 178        uint64_t r10;
 179        uint64_t r11;
 180        uint64_t r12;
 181        uint64_t r13;
 182        uint64_t r14;
 183        uint64_t r15;
 184        uint64_t rip;
 185};
 186
 187extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
 188                                       gfp_t gfp);
 189#define arch_kexec_post_alloc_pages arch_kexec_post_alloc_pages
 190
 191extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
 192#define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
 193
 194#endif
 195
 196typedef void crash_vmclear_fn(void);
 197extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
 198extern void kdump_nmi_shootdown_cpus(void);
 199
 200#endif /* __ASSEMBLY__ */
 201
 202#endif /* _ASM_X86_KEXEC_H */
 203