linux/arch/x86/kernel/relocate_kernel_32.S
<<
>>
Prefs
   1/*
   2 * relocate_kernel.S - put the kernel image in place to boot
   3 * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
   4 *
   5 * This source code is licensed under the GNU General Public License,
   6 * Version 2.  See the file COPYING for more details.
   7 */
   8
   9#include <linux/linkage.h>
  10#include <asm/page_types.h>
  11#include <asm/kexec.h>
  12#include <asm/processor-flags.h>
  13
  14/*
  15 * Must be relocatable PIC code callable as a C function
  16 */
  17
  18#define PTR(x) (x << 2)
  19
  20/*
  21 * control_page + KEXEC_CONTROL_CODE_MAX_SIZE
  22 * ~ control_page + PAGE_SIZE are used as data storage and stack for
  23 * jumping back
  24 */
  25#define DATA(offset)            (KEXEC_CONTROL_CODE_MAX_SIZE+(offset))
  26
  27/* Minimal CPU state */
  28#define ESP                     DATA(0x0)
  29#define CR0                     DATA(0x4)
  30#define CR3                     DATA(0x8)
  31#define CR4                     DATA(0xc)
  32
  33/* other data */
  34#define CP_VA_CONTROL_PAGE      DATA(0x10)
  35#define CP_PA_PGD               DATA(0x14)
  36#define CP_PA_SWAP_PAGE         DATA(0x18)
  37#define CP_PA_BACKUP_PAGES_MAP  DATA(0x1c)
  38
  39        .text
  40        .globl relocate_kernel
  41relocate_kernel:
  42        /* Save the CPU context, used for jumping back */
  43
  44        pushl   %ebx
  45        pushl   %esi
  46        pushl   %edi
  47        pushl   %ebp
  48        pushf
  49
  50        movl    20+8(%esp), %ebp /* list of pages */
  51        movl    PTR(VA_CONTROL_PAGE)(%ebp), %edi
  52        movl    %esp, ESP(%edi)
  53        movl    %cr0, %eax
  54        movl    %eax, CR0(%edi)
  55        movl    %cr3, %eax
  56        movl    %eax, CR3(%edi)
  57        movl    %cr4, %eax
  58        movl    %eax, CR4(%edi)
  59
  60        /* read the arguments and say goodbye to the stack */
  61        movl  20+4(%esp), %ebx /* page_list */
  62        movl  20+8(%esp), %ebp /* list of pages */
  63        movl  20+12(%esp), %edx /* start address */
  64        movl  20+16(%esp), %ecx /* cpu_has_pae */
  65        movl  20+20(%esp), %esi /* preserve_context */
  66
  67        /* zero out flags, and disable interrupts */
  68        pushl $0
  69        popfl
  70
  71        /* save some information for jumping back */
  72        movl    PTR(VA_CONTROL_PAGE)(%ebp), %edi
  73        movl    %edi, CP_VA_CONTROL_PAGE(%edi)
  74        movl    PTR(PA_PGD)(%ebp), %eax
  75        movl    %eax, CP_PA_PGD(%edi)
  76        movl    PTR(PA_SWAP_PAGE)(%ebp), %eax
  77        movl    %eax, CP_PA_SWAP_PAGE(%edi)
  78        movl    %ebx, CP_PA_BACKUP_PAGES_MAP(%edi)
  79
  80        /*
  81         * get physical address of control page now
  82         * this is impossible after page table switch
  83         */
  84        movl    PTR(PA_CONTROL_PAGE)(%ebp), %edi
  85
  86        /* switch to new set of page tables */
  87        movl    PTR(PA_PGD)(%ebp), %eax
  88        movl    %eax, %cr3
  89
  90        /* setup a new stack at the end of the physical control page */
  91        lea     PAGE_SIZE(%edi), %esp
  92
  93        /* jump to identity mapped page */
  94        movl    %edi, %eax
  95        addl    $(identity_mapped - relocate_kernel), %eax
  96        pushl   %eax
  97        ret
  98
  99identity_mapped:
 100        /* set return address to 0 if not preserving context */
 101        pushl   $0
 102        /* store the start address on the stack */
 103        pushl   %edx
 104
 105        /*
 106         * Set cr0 to a known state:
 107         *  - Paging disabled
 108         *  - Alignment check disabled
 109         *  - Write protect disabled
 110         *  - No task switch
 111         *  - Don't do FP software emulation.
 112         *  - Proctected mode enabled
 113         */
 114        movl    %cr0, %eax
 115        andl    $~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax
 116        orl     $(X86_CR0_PE), %eax
 117        movl    %eax, %cr0
 118
 119        /* clear cr4 if applicable */
 120        testl   %ecx, %ecx
 121        jz      1f
 122        /*
 123         * Set cr4 to a known state:
 124         * Setting everything to zero seems safe.
 125         */
 126        xorl    %eax, %eax
 127        movl    %eax, %cr4
 128
 129        jmp 1f
 1301:
 131
 132        /* Flush the TLB (needed?) */
 133        xorl    %eax, %eax
 134        movl    %eax, %cr3
 135
 136        movl    CP_PA_SWAP_PAGE(%edi), %eax
 137        pushl   %eax
 138        pushl   %ebx
 139        call    swap_pages
 140        addl    $8, %esp
 141
 142        /*
 143         * To be certain of avoiding problems with self-modifying code
 144         * I need to execute a serializing instruction here.
 145         * So I flush the TLB, it's handy, and not processor dependent.
 146         */
 147        xorl    %eax, %eax
 148        movl    %eax, %cr3
 149
 150        /*
 151         * set all of the registers to known values
 152         * leave %esp alone
 153         */
 154
 155        testl   %esi, %esi
 156        jnz 1f
 157        xorl    %edi, %edi
 158        xorl    %eax, %eax
 159        xorl    %ebx, %ebx
 160        xorl    %ecx, %ecx
 161        xorl    %edx, %edx
 162        xorl    %esi, %esi
 163        xorl    %ebp, %ebp
 164        ret
 1651:
 166        popl    %edx
 167        movl    CP_PA_SWAP_PAGE(%edi), %esp
 168        addl    $PAGE_SIZE, %esp
 1692:
 170        call    *%edx
 171
 172        /* get the re-entry point of the peer system */
 173        movl    0(%esp), %ebp
 174        call    1f
 1751:
 176        popl    %ebx
 177        subl    $(1b - relocate_kernel), %ebx
 178        movl    CP_VA_CONTROL_PAGE(%ebx), %edi
 179        lea     PAGE_SIZE(%ebx), %esp
 180        movl    CP_PA_SWAP_PAGE(%ebx), %eax
 181        movl    CP_PA_BACKUP_PAGES_MAP(%ebx), %edx
 182        pushl   %eax
 183        pushl   %edx
 184        call    swap_pages
 185        addl    $8, %esp
 186        movl    CP_PA_PGD(%ebx), %eax
 187        movl    %eax, %cr3
 188        movl    %cr0, %eax
 189        orl     $X86_CR0_PG, %eax
 190        movl    %eax, %cr0
 191        lea     PAGE_SIZE(%edi), %esp
 192        movl    %edi, %eax
 193        addl    $(virtual_mapped - relocate_kernel), %eax
 194        pushl   %eax
 195        ret
 196
 197virtual_mapped:
 198        movl    CR4(%edi), %eax
 199        movl    %eax, %cr4
 200        movl    CR3(%edi), %eax
 201        movl    %eax, %cr3
 202        movl    CR0(%edi), %eax
 203        movl    %eax, %cr0
 204        movl    ESP(%edi), %esp
 205        movl    %ebp, %eax
 206
 207        popf
 208        popl    %ebp
 209        popl    %edi
 210        popl    %esi
 211        popl    %ebx
 212        ret
 213
 214        /* Do the copies */
 215swap_pages:
 216        movl    8(%esp), %edx
 217        movl    4(%esp), %ecx
 218        pushl   %ebp
 219        pushl   %ebx
 220        pushl   %edi
 221        pushl   %esi
 222        movl    %ecx, %ebx
 223        jmp     1f
 224
 2250:      /* top, read another word from the indirection page */
 226        movl    (%ebx), %ecx
 227        addl    $4, %ebx
 2281:
 229        testb   $0x1, %cl     /* is it a destination page */
 230        jz      2f
 231        movl    %ecx,   %edi
 232        andl    $0xfffff000, %edi
 233        jmp     0b
 2342:
 235        testb   $0x2, %cl    /* is it an indirection page */
 236        jz      2f
 237        movl    %ecx,   %ebx
 238        andl    $0xfffff000, %ebx
 239        jmp     0b
 2402:
 241        testb   $0x4, %cl    /* is it the done indicator */
 242        jz      2f
 243        jmp     3f
 2442:
 245        testb   $0x8, %cl    /* is it the source indicator */
 246        jz      0b           /* Ignore it otherwise */
 247        movl    %ecx,   %esi /* For every source page do a copy */
 248        andl    $0xfffff000, %esi
 249
 250        movl    %edi, %eax
 251        movl    %esi, %ebp
 252
 253        movl    %edx, %edi
 254        movl    $1024, %ecx
 255        rep ; movsl
 256
 257        movl    %ebp, %edi
 258        movl    %eax, %esi
 259        movl    $1024, %ecx
 260        rep ; movsl
 261
 262        movl    %eax, %edi
 263        movl    %edx, %esi
 264        movl    $1024, %ecx
 265        rep ; movsl
 266
 267        lea     PAGE_SIZE(%ebp), %esi
 268        jmp     0b
 2693:
 270        popl    %esi
 271        popl    %edi
 272        popl    %ebx
 273        popl    %ebp
 274        ret
 275
 276        .globl kexec_control_code_size
 277.set kexec_control_code_size, . - relocate_kernel
 278