linux/arch/x86/kernel/relocate_kernel_64.S
<<
>>
Prefs
   1/*
   2 * relocate_kernel.S - put the kernel image in place to boot
   3 * Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com>
   4 *
   5 * This source code is licensed under the GNU General Public License,
   6 * Version 2.  See the file COPYING for more details.
   7 */
   8
   9#include <linux/linkage.h>
  10#include <asm/page_types.h>
  11#include <asm/kexec.h>
  12#include <asm/processor-flags.h>
  13#include <asm/pgtable_types.h>
  14
  15/*
  16 * Must be relocatable PIC code callable as a C function
  17 */
  18
  19#define PTR(x) (x << 3)
  20#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
  21
  22/*
  23 * control_page + KEXEC_CONTROL_CODE_MAX_SIZE
  24 * ~ control_page + PAGE_SIZE are used as data storage and stack for
  25 * jumping back
  26 */
  27#define DATA(offset)            (KEXEC_CONTROL_CODE_MAX_SIZE+(offset))
  28
  29/* Minimal CPU state */
  30#define RSP                     DATA(0x0)
  31#define CR0                     DATA(0x8)
  32#define CR3                     DATA(0x10)
  33#define CR4                     DATA(0x18)
  34
  35/* other data */
  36#define CP_PA_TABLE_PAGE        DATA(0x20)
  37#define CP_PA_SWAP_PAGE         DATA(0x28)
  38#define CP_PA_BACKUP_PAGES_MAP  DATA(0x30)
  39
  40        .text
  41        .align PAGE_SIZE
  42        .code64
  43        .globl relocate_kernel
  44relocate_kernel:
  45        /*
  46         * %rdi indirection_page
  47         * %rsi page_list
  48         * %rdx start address
  49         * %rcx preserve_context
  50         * %r8  sme_active
  51         */
  52
  53        /* Save the CPU context, used for jumping back */
  54        pushq %rbx
  55        pushq %rbp
  56        pushq %r12
  57        pushq %r13
  58        pushq %r14
  59        pushq %r15
  60        pushf
  61
  62        movq    PTR(VA_CONTROL_PAGE)(%rsi), %r11
  63        movq    %rsp, RSP(%r11)
  64        movq    %cr0, %rax
  65        movq    %rax, CR0(%r11)
  66        movq    %cr3, %rax
  67        movq    %rax, CR3(%r11)
  68        movq    %cr4, %rax
  69        movq    %rax, CR4(%r11)
  70
  71        /* Save CR4. Required to enable the right paging mode later. */
  72        movq    %rax, %r13
  73
  74        /* zero out flags, and disable interrupts */
  75        pushq $0
  76        popfq
  77
  78        /* Save SME active flag */
  79        movq    %r8, %r12
  80
  81        /*
  82         * get physical address of control page now
  83         * this is impossible after page table switch
  84         */
  85        movq    PTR(PA_CONTROL_PAGE)(%rsi), %r8
  86
  87        /* get physical address of page table now too */
  88        movq    PTR(PA_TABLE_PAGE)(%rsi), %r9
  89
  90        /* get physical address of swap page now */
  91        movq    PTR(PA_SWAP_PAGE)(%rsi), %r10
  92
  93        /* save some information for jumping back */
  94        movq    %r9, CP_PA_TABLE_PAGE(%r11)
  95        movq    %r10, CP_PA_SWAP_PAGE(%r11)
  96        movq    %rdi, CP_PA_BACKUP_PAGES_MAP(%r11)
  97
  98        /* Switch to the identity mapped page tables */
  99        movq    %r9, %cr3
 100
 101        /* setup a new stack at the end of the physical control page */
 102        lea     PAGE_SIZE(%r8), %rsp
 103
 104        /* jump to identity mapped page */
 105        addq    $(identity_mapped - relocate_kernel), %r8
 106        pushq   %r8
 107        ret
 108
 109identity_mapped:
 110        /* set return address to 0 if not preserving context */
 111        pushq   $0
 112        /* store the start address on the stack */
 113        pushq   %rdx
 114
 115        /*
 116         * Set cr0 to a known state:
 117         *  - Paging enabled
 118         *  - Alignment check disabled
 119         *  - Write protect disabled
 120         *  - No task switch
 121         *  - Don't do FP software emulation.
 122         *  - Proctected mode enabled
 123         */
 124        movq    %cr0, %rax
 125        andq    $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax
 126        orl     $(X86_CR0_PG | X86_CR0_PE), %eax
 127        movq    %rax, %cr0
 128
 129        /*
 130         * Set cr4 to a known state:
 131         *  - physical address extension enabled
 132         *  - 5-level paging, if it was enabled before
 133         */
 134        movl    $X86_CR4_PAE, %eax
 135        testq   $X86_CR4_LA57, %r13
 136        jz      1f
 137        orl     $X86_CR4_LA57, %eax
 1381:
 139        movq    %rax, %cr4
 140
 141        jmp 1f
 1421:
 143
 144        /* Flush the TLB (needed?) */
 145        movq    %r9, %cr3
 146
 147        /*
 148         * If SME is active, there could be old encrypted cache line
 149         * entries that will conflict with the now unencrypted memory
 150         * used by kexec. Flush the caches before copying the kernel.
 151         */
 152        testq   %r12, %r12
 153        jz 1f
 154        wbinvd
 1551:
 156
 157        movq    %rcx, %r11
 158        call    swap_pages
 159
 160        /*
 161         * To be certain of avoiding problems with self-modifying code
 162         * I need to execute a serializing instruction here.
 163         * So I flush the TLB by reloading %cr3 here, it's handy,
 164         * and not processor dependent.
 165         */
 166        movq    %cr3, %rax
 167        movq    %rax, %cr3
 168
 169        /*
 170         * set all of the registers to known values
 171         * leave %rsp alone
 172         */
 173
 174        testq   %r11, %r11
 175        jnz 1f
 176        xorl    %eax, %eax
 177        xorl    %ebx, %ebx
 178        xorl    %ecx, %ecx
 179        xorl    %edx, %edx
 180        xorl    %esi, %esi
 181        xorl    %edi, %edi
 182        xorl    %ebp, %ebp
 183        xorl    %r8d, %r8d
 184        xorl    %r9d, %r9d
 185        xorl    %r10d, %r10d
 186        xorl    %r11d, %r11d
 187        xorl    %r12d, %r12d
 188        xorl    %r13d, %r13d
 189        xorl    %r14d, %r14d
 190        xorl    %r15d, %r15d
 191
 192        ret
 193
 1941:
 195        popq    %rdx
 196        leaq    PAGE_SIZE(%r10), %rsp
 197        call    *%rdx
 198
 199        /* get the re-entry point of the peer system */
 200        movq    0(%rsp), %rbp
 201        call    1f
 2021:
 203        popq    %r8
 204        subq    $(1b - relocate_kernel), %r8
 205        movq    CP_PA_SWAP_PAGE(%r8), %r10
 206        movq    CP_PA_BACKUP_PAGES_MAP(%r8), %rdi
 207        movq    CP_PA_TABLE_PAGE(%r8), %rax
 208        movq    %rax, %cr3
 209        lea     PAGE_SIZE(%r8), %rsp
 210        call    swap_pages
 211        movq    $virtual_mapped, %rax
 212        pushq   %rax
 213        ret
 214
 215virtual_mapped:
 216        movq    RSP(%r8), %rsp
 217        movq    CR4(%r8), %rax
 218        movq    %rax, %cr4
 219        movq    CR3(%r8), %rax
 220        movq    CR0(%r8), %r8
 221        movq    %rax, %cr3
 222        movq    %r8, %cr0
 223        movq    %rbp, %rax
 224
 225        popf
 226        popq    %r15
 227        popq    %r14
 228        popq    %r13
 229        popq    %r12
 230        popq    %rbp
 231        popq    %rbx
 232        ret
 233
 234        /* Do the copies */
 235swap_pages:
 236        movq    %rdi, %rcx      /* Put the page_list in %rcx */
 237        xorl    %edi, %edi
 238        xorl    %esi, %esi
 239        jmp     1f
 240
 2410:      /* top, read another word for the indirection page */
 242
 243        movq    (%rbx), %rcx
 244        addq    $8,     %rbx
 2451:
 246        testb   $0x1,   %cl   /* is it a destination page? */
 247        jz      2f
 248        movq    %rcx,   %rdi
 249        andq    $0xfffffffffffff000, %rdi
 250        jmp     0b
 2512:
 252        testb   $0x2,   %cl   /* is it an indirection page? */
 253        jz      2f
 254        movq    %rcx,   %rbx
 255        andq    $0xfffffffffffff000, %rbx
 256        jmp     0b
 2572:
 258        testb   $0x4,   %cl   /* is it the done indicator? */
 259        jz      2f
 260        jmp     3f
 2612:
 262        testb   $0x8,   %cl   /* is it the source indicator? */
 263        jz      0b            /* Ignore it otherwise */
 264        movq    %rcx,   %rsi  /* For ever source page do a copy */
 265        andq    $0xfffffffffffff000, %rsi
 266
 267        movq    %rdi, %rdx
 268        movq    %rsi, %rax
 269
 270        movq    %r10, %rdi
 271        movl    $512, %ecx
 272        rep ; movsq
 273
 274        movq    %rax, %rdi
 275        movq    %rdx, %rsi
 276        movl    $512, %ecx
 277        rep ; movsq
 278
 279        movq    %rdx, %rdi
 280        movq    %r10, %rsi
 281        movl    $512, %ecx
 282        rep ; movsq
 283
 284        lea     PAGE_SIZE(%rax), %rsi
 285        jmp     0b
 2863:
 287        ret
 288
 289        .globl kexec_control_code_size
 290.set kexec_control_code_size, . - relocate_kernel
 291