linux/arch/x86/include/asm/kexec.h
<<
>>
Prefs
   1#ifndef _ASM_X86_KEXEC_H
   2#define _ASM_X86_KEXEC_H
   3
   4#ifdef CONFIG_X86_32
   5# define PA_CONTROL_PAGE        0
   6# define VA_CONTROL_PAGE        1
   7# define PA_PGD                 2
   8# define PA_SWAP_PAGE           3
   9# define PAGES_NR               4
  10#else
  11# define PA_CONTROL_PAGE        0
  12# define VA_CONTROL_PAGE        1
  13# define PA_TABLE_PAGE          2
  14# define PA_SWAP_PAGE           3
  15# define PAGES_NR               4
  16#endif
  17
  18# define KEXEC_CONTROL_CODE_MAX_SIZE    2048
  19
  20#ifndef __ASSEMBLY__
  21
  22#include <linux/string.h>
  23
  24#include <asm/page.h>
  25#include <asm/ptrace.h>
  26
  27/*
  28 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
  29 * I.e. Maximum page that is mapped directly into kernel memory,
  30 * and kmap is not required.
  31 *
  32 * So far x86_64 is limited to 40 physical address bits.
  33 */
  34#ifdef CONFIG_X86_32
  35/* Maximum physical address we can use pages from */
  36# define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
  37/* Maximum address we can reach in physical address mode */
  38# define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
  39/* Maximum address we can use for the control code buffer */
  40# define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
  41
  42# define KEXEC_CONTROL_PAGE_SIZE        4096
  43
  44/* The native architecture */
  45# define KEXEC_ARCH KEXEC_ARCH_386
  46
  47/* We can also handle crash dumps from 64 bit kernel. */
  48# define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
  49#else
  50/* Maximum physical address we can use pages from */
  51# define KEXEC_SOURCE_MEMORY_LIMIT      (MAXMEM-1)
  52/* Maximum address we can reach in physical address mode */
  53# define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1)
  54/* Maximum address we can use for the control pages */
  55# define KEXEC_CONTROL_MEMORY_LIMIT     (MAXMEM-1)
  56
  57/* Allocate one page for the pdp and the second for the code */
  58# define KEXEC_CONTROL_PAGE_SIZE  (4096UL + 4096UL)
  59
  60/* The native architecture */
  61# define KEXEC_ARCH KEXEC_ARCH_X86_64
  62#endif
  63
  64/*
  65 * CPU does not save ss and sp on stack if execution is already
  66 * running in kernel mode at the time of NMI occurrence. This code
  67 * fixes it.
  68 */
  69static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
  70                                      struct pt_regs *oldregs)
  71{
  72#ifdef CONFIG_X86_32
  73        newregs->sp = (unsigned long)&(oldregs->sp);
  74        asm volatile("xorl %%eax, %%eax\n\t"
  75                     "movw %%ss, %%ax\n\t"
  76                     :"=a"(newregs->ss));
  77#endif
  78}
  79
  80/*
  81 * This function is responsible for capturing register states if coming
  82 * via panic otherwise just fix up the ss and sp if coming via kernel
  83 * mode exception.
  84 */
  85static inline void crash_setup_regs(struct pt_regs *newregs,
  86                                    struct pt_regs *oldregs)
  87{
  88        if (oldregs) {
  89                memcpy(newregs, oldregs, sizeof(*newregs));
  90                crash_fixup_ss_esp(newregs, oldregs);
  91        } else {
  92#ifdef CONFIG_X86_32
  93                asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
  94                asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
  95                asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
  96                asm volatile("movl %%esi,%0" : "=m"(newregs->si));
  97                asm volatile("movl %%edi,%0" : "=m"(newregs->di));
  98                asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
  99                asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
 100                asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
 101                asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
 102                asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
 103                asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
 104                asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
 105                asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
 106#else
 107                asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
 108                asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
 109                asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
 110                asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
 111                asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
 112                asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
 113                asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
 114                asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
 115                asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
 116                asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
 117                asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
 118                asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
 119                asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
 120                asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
 121                asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
 122                asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
 123                asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
 124                asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
 125                asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
 126#endif
 127                newregs->ip = (unsigned long)current_text_addr();
 128        }
 129}
 130
 131#ifdef CONFIG_X86_32
 132asmlinkage unsigned long
 133relocate_kernel(unsigned long indirection_page,
 134                unsigned long control_page,
 135                unsigned long start_address,
 136                unsigned int has_pae,
 137                unsigned int preserve_context);
 138#else
 139unsigned long
 140relocate_kernel(unsigned long indirection_page,
 141                unsigned long page_list,
 142                unsigned long start_address,
 143                unsigned int preserve_context);
 144#endif
 145
 146#define ARCH_HAS_KIMAGE_ARCH
 147
 148#ifdef CONFIG_X86_32
 149struct kimage_arch {
 150        pgd_t *pgd;
 151#ifdef CONFIG_X86_PAE
 152        pmd_t *pmd0;
 153        pmd_t *pmd1;
 154#endif
 155        pte_t *pte0;
 156        pte_t *pte1;
 157};
 158#else
 159struct kimage_arch {
 160        pud_t *pud;
 161        pmd_t *pmd;
 162        pte_t *pte;
 163};
 164#endif
 165
 166typedef void crash_vmclear_fn(void);
 167extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
 168
 169#endif /* __ASSEMBLY__ */
 170
 171#endif /* _ASM_X86_KEXEC_H */
 172