linux/arch/x86/kernel/relocate_kernel_32.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * relocate_kernel.S - put the kernel image in place to boot
   4 * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
   5 */
   6
   7#include <linux/linkage.h>
   8#include <asm/page_types.h>
   9#include <asm/kexec.h>
  10#include <asm/processor-flags.h>
  11
  12/*
  13 * Must be relocatable PIC code callable as a C function
  14 */
  15
  16#define PTR(x) (x << 2)
  17
  18/*
  19 * control_page + KEXEC_CONTROL_CODE_MAX_SIZE
  20 * ~ control_page + PAGE_SIZE are used as data storage and stack for
  21 * jumping back
  22 */
  23#define DATA(offset)            (KEXEC_CONTROL_CODE_MAX_SIZE+(offset))
  24
  25/* Minimal CPU state */
  26#define ESP                     DATA(0x0)
  27#define CR0                     DATA(0x4)
  28#define CR3                     DATA(0x8)
  29#define CR4                     DATA(0xc)
  30
  31/* other data */
  32#define CP_VA_CONTROL_PAGE      DATA(0x10)
  33#define CP_PA_PGD               DATA(0x14)
  34#define CP_PA_SWAP_PAGE         DATA(0x18)
  35#define CP_PA_BACKUP_PAGES_MAP  DATA(0x1c)
  36
  37        .text
  38SYM_CODE_START_NOALIGN(relocate_kernel)
  39        /* Save the CPU context, used for jumping back */
  40
  41        pushl   %ebx
  42        pushl   %esi
  43        pushl   %edi
  44        pushl   %ebp
  45        pushf
  46
  47        movl    20+8(%esp), %ebp /* list of pages */
  48        movl    PTR(VA_CONTROL_PAGE)(%ebp), %edi
  49        movl    %esp, ESP(%edi)
  50        movl    %cr0, %eax
  51        movl    %eax, CR0(%edi)
  52        movl    %cr3, %eax
  53        movl    %eax, CR3(%edi)
  54        movl    %cr4, %eax
  55        movl    %eax, CR4(%edi)
  56
  57        /* read the arguments and say goodbye to the stack */
  58        movl  20+4(%esp), %ebx /* page_list */
  59        movl  20+8(%esp), %ebp /* list of pages */
  60        movl  20+12(%esp), %edx /* start address */
  61        movl  20+16(%esp), %ecx /* cpu_has_pae */
  62        movl  20+20(%esp), %esi /* preserve_context */
  63
  64        /* zero out flags, and disable interrupts */
  65        pushl $0
  66        popfl
  67
  68        /* save some information for jumping back */
  69        movl    PTR(VA_CONTROL_PAGE)(%ebp), %edi
  70        movl    %edi, CP_VA_CONTROL_PAGE(%edi)
  71        movl    PTR(PA_PGD)(%ebp), %eax
  72        movl    %eax, CP_PA_PGD(%edi)
  73        movl    PTR(PA_SWAP_PAGE)(%ebp), %eax
  74        movl    %eax, CP_PA_SWAP_PAGE(%edi)
  75        movl    %ebx, CP_PA_BACKUP_PAGES_MAP(%edi)
  76
  77        /*
  78         * get physical address of control page now
  79         * this is impossible after page table switch
  80         */
  81        movl    PTR(PA_CONTROL_PAGE)(%ebp), %edi
  82
  83        /* switch to new set of page tables */
  84        movl    PTR(PA_PGD)(%ebp), %eax
  85        movl    %eax, %cr3
  86
  87        /* setup a new stack at the end of the physical control page */
  88        lea     PAGE_SIZE(%edi), %esp
  89
  90        /* jump to identity mapped page */
  91        movl    %edi, %eax
  92        addl    $(identity_mapped - relocate_kernel), %eax
  93        pushl   %eax
  94        ret
  95SYM_CODE_END(relocate_kernel)
  96
  97SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
  98        /* set return address to 0 if not preserving context */
  99        pushl   $0
 100        /* store the start address on the stack */
 101        pushl   %edx
 102
 103        /*
 104         * Set cr0 to a known state:
 105         *  - Paging disabled
 106         *  - Alignment check disabled
 107         *  - Write protect disabled
 108         *  - No task switch
 109         *  - Don't do FP software emulation.
 110         *  - Protected mode enabled
 111         */
 112        movl    %cr0, %eax
 113        andl    $~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax
 114        orl     $(X86_CR0_PE), %eax
 115        movl    %eax, %cr0
 116
 117        /* clear cr4 if applicable */
 118        testl   %ecx, %ecx
 119        jz      1f
 120        /*
 121         * Set cr4 to a known state:
 122         * Setting everything to zero seems safe.
 123         */
 124        xorl    %eax, %eax
 125        movl    %eax, %cr4
 126
 127        jmp 1f
 1281:
 129
 130        /* Flush the TLB (needed?) */
 131        xorl    %eax, %eax
 132        movl    %eax, %cr3
 133
 134        movl    CP_PA_SWAP_PAGE(%edi), %eax
 135        pushl   %eax
 136        pushl   %ebx
 137        call    swap_pages
 138        addl    $8, %esp
 139
 140        /*
 141         * To be certain of avoiding problems with self-modifying code
 142         * I need to execute a serializing instruction here.
 143         * So I flush the TLB, it's handy, and not processor dependent.
 144         */
 145        xorl    %eax, %eax
 146        movl    %eax, %cr3
 147
 148        /*
 149         * set all of the registers to known values
 150         * leave %esp alone
 151         */
 152
 153        testl   %esi, %esi
 154        jnz 1f
 155        xorl    %edi, %edi
 156        xorl    %eax, %eax
 157        xorl    %ebx, %ebx
 158        xorl    %ecx, %ecx
 159        xorl    %edx, %edx
 160        xorl    %esi, %esi
 161        xorl    %ebp, %ebp
 162        ret
 1631:
 164        popl    %edx
 165        movl    CP_PA_SWAP_PAGE(%edi), %esp
 166        addl    $PAGE_SIZE, %esp
 1672:
 168        call    *%edx
 169
 170        /* get the re-entry point of the peer system */
 171        movl    0(%esp), %ebp
 172        call    1f
 1731:
 174        popl    %ebx
 175        subl    $(1b - relocate_kernel), %ebx
 176        movl    CP_VA_CONTROL_PAGE(%ebx), %edi
 177        lea     PAGE_SIZE(%ebx), %esp
 178        movl    CP_PA_SWAP_PAGE(%ebx), %eax
 179        movl    CP_PA_BACKUP_PAGES_MAP(%ebx), %edx
 180        pushl   %eax
 181        pushl   %edx
 182        call    swap_pages
 183        addl    $8, %esp
 184        movl    CP_PA_PGD(%ebx), %eax
 185        movl    %eax, %cr3
 186        movl    %cr0, %eax
 187        orl     $X86_CR0_PG, %eax
 188        movl    %eax, %cr0
 189        lea     PAGE_SIZE(%edi), %esp
 190        movl    %edi, %eax
 191        addl    $(virtual_mapped - relocate_kernel), %eax
 192        pushl   %eax
 193        ret
 194SYM_CODE_END(identity_mapped)
 195
 196SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
 197        movl    CR4(%edi), %eax
 198        movl    %eax, %cr4
 199        movl    CR3(%edi), %eax
 200        movl    %eax, %cr3
 201        movl    CR0(%edi), %eax
 202        movl    %eax, %cr0
 203        movl    ESP(%edi), %esp
 204        movl    %ebp, %eax
 205
 206        popf
 207        popl    %ebp
 208        popl    %edi
 209        popl    %esi
 210        popl    %ebx
 211        ret
 212SYM_CODE_END(virtual_mapped)
 213
 214        /* Do the copies */
 215SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
 216        movl    8(%esp), %edx
 217        movl    4(%esp), %ecx
 218        pushl   %ebp
 219        pushl   %ebx
 220        pushl   %edi
 221        pushl   %esi
 222        movl    %ecx, %ebx
 223        jmp     1f
 224
 2250:      /* top, read another word from the indirection page */
 226        movl    (%ebx), %ecx
 227        addl    $4, %ebx
 2281:
 229        testb   $0x1, %cl     /* is it a destination page */
 230        jz      2f
 231        movl    %ecx,   %edi
 232        andl    $0xfffff000, %edi
 233        jmp     0b
 2342:
 235        testb   $0x2, %cl    /* is it an indirection page */
 236        jz      2f
 237        movl    %ecx,   %ebx
 238        andl    $0xfffff000, %ebx
 239        jmp     0b
 2402:
 241        testb   $0x4, %cl    /* is it the done indicator */
 242        jz      2f
 243        jmp     3f
 2442:
 245        testb   $0x8, %cl    /* is it the source indicator */
 246        jz      0b           /* Ignore it otherwise */
 247        movl    %ecx,   %esi /* For every source page do a copy */
 248        andl    $0xfffff000, %esi
 249
 250        movl    %edi, %eax
 251        movl    %esi, %ebp
 252
 253        movl    %edx, %edi
 254        movl    $1024, %ecx
 255        rep ; movsl
 256
 257        movl    %ebp, %edi
 258        movl    %eax, %esi
 259        movl    $1024, %ecx
 260        rep ; movsl
 261
 262        movl    %eax, %edi
 263        movl    %edx, %esi
 264        movl    $1024, %ecx
 265        rep ; movsl
 266
 267        lea     PAGE_SIZE(%ebp), %esi
 268        jmp     0b
 2693:
 270        popl    %esi
 271        popl    %edi
 272        popl    %ebx
 273        popl    %ebp
 274        ret
 275SYM_CODE_END(swap_pages)
 276
 277        .globl kexec_control_code_size
 278.set kexec_control_code_size, . - relocate_kernel
 279