linux/arch/x86/boot/compressed/head_64.S
<<
>>
Prefs
   1/*
   2 *  linux/boot/head.S
   3 *
   4 *  Copyright (C) 1991, 1992, 1993  Linus Torvalds
   5 */
   6
   7/*
   8 *  head.S contains the 32-bit startup code.
   9 *
  10 * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
  11 * the page directory will exist. The startup code will be overwritten by
  12 * the page directory. [According to comments etc elsewhere on a compressed
  13 * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
  14 *
  15 * Page 0 is deliberately kept safe, since System Management Mode code in 
  16 * laptops may need to access the BIOS data stored there.  This is also
  17 * useful for future device drivers that either access the BIOS via VM86 
  18 * mode.
  19 */
  20
  21/*
  22 * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
  23 */
  24        .code32
  25        .text
  26
  27#include <linux/init.h>
  28#include <linux/linkage.h>
  29#include <asm/segment.h>
  30#include <asm/pgtable_types.h>
  31#include <asm/page_types.h>
  32#include <asm/boot.h>
  33#include <asm/msr.h>
  34#include <asm/processor-flags.h>
  35#include <asm/asm-offsets.h>
  36
  37        __HEAD
  38        .code32
  39ENTRY(startup_32)
  40        cld
  41        /*
  42         * Test KEEP_SEGMENTS flag to see if the bootloader is asking
  43         * us to not reload segments
  44         */
  45        testb $(1<<6), BP_loadflags(%esi)
  46        jnz 1f
  47
  48        cli
  49        movl    $(__KERNEL_DS), %eax
  50        movl    %eax, %ds
  51        movl    %eax, %es
  52        movl    %eax, %ss
  531:
  54
  55/*
  56 * Calculate the delta between where we were compiled to run
  57 * at and where we were actually loaded at.  This can only be done
  58 * with a short local call on x86.  Nothing  else will tell us what
  59 * address we are running at.  The reserved chunk of the real-mode
  60 * data at 0x1e4 (defined as a scratch field) are used as the stack
  61 * for this calculation. Only 4 bytes are needed.
  62 */
  63        leal    (BP_scratch+4)(%esi), %esp
  64        call    1f
  651:      popl    %ebp
  66        subl    $1b, %ebp
  67
  68/* setup a stack and make sure cpu supports long mode. */
  69        movl    $boot_stack_end, %eax
  70        addl    %ebp, %eax
  71        movl    %eax, %esp
  72
  73        call    verify_cpu
  74        testl   %eax, %eax
  75        jnz     no_longmode
  76
  77/*
  78 * Compute the delta between where we were compiled to run at
  79 * and where the code will actually run at.
  80 *
  81 * %ebp contains the address we are loaded at by the boot loader and %ebx
  82 * contains the address where we should move the kernel image temporarily
  83 * for safe in-place decompression.
  84 */
  85
  86#ifdef CONFIG_RELOCATABLE
  87        movl    %ebp, %ebx
  88        movl    BP_kernel_alignment(%esi), %eax
  89        decl    %eax
  90        addl    %eax, %ebx
  91        notl    %eax
  92        andl    %eax, %ebx
  93#else
  94        movl    $LOAD_PHYSICAL_ADDR, %ebx
  95#endif
  96
  97        /* Target address to relocate to for decompression */
  98        addl    $z_extract_offset, %ebx
  99
 100/*
 101 * Prepare for entering 64 bit mode
 102 */
 103
 104        /* Load new GDT with the 64bit segments using 32bit descriptor */
 105        leal    gdt(%ebp), %eax
 106        movl    %eax, gdt+2(%ebp)
 107        lgdt    gdt(%ebp)
 108
 109        /* Enable PAE mode */
 110        xorl    %eax, %eax
 111        orl     $(X86_CR4_PAE), %eax
 112        movl    %eax, %cr4
 113
 114 /*
 115  * Build early 4G boot pagetable
 116  */
 117        /* Initialize Page tables to 0 */
 118        leal    pgtable(%ebx), %edi
 119        xorl    %eax, %eax
 120        movl    $((4096*6)/4), %ecx
 121        rep     stosl
 122
 123        /* Build Level 4 */
 124        leal    pgtable + 0(%ebx), %edi
 125        leal    0x1007 (%edi), %eax
 126        movl    %eax, 0(%edi)
 127
 128        /* Build Level 3 */
 129        leal    pgtable + 0x1000(%ebx), %edi
 130        leal    0x1007(%edi), %eax
 131        movl    $4, %ecx
 1321:      movl    %eax, 0x00(%edi)
 133        addl    $0x00001000, %eax
 134        addl    $8, %edi
 135        decl    %ecx
 136        jnz     1b
 137
 138        /* Build Level 2 */
 139        leal    pgtable + 0x2000(%ebx), %edi
 140        movl    $0x00000183, %eax
 141        movl    $2048, %ecx
 1421:      movl    %eax, 0(%edi)
 143        addl    $0x00200000, %eax
 144        addl    $8, %edi
 145        decl    %ecx
 146        jnz     1b
 147
 148        /* Enable the boot page tables */
 149        leal    pgtable(%ebx), %eax
 150        movl    %eax, %cr3
 151
 152        /* Enable Long mode in EFER (Extended Feature Enable Register) */
 153        movl    $MSR_EFER, %ecx
 154        rdmsr
 155        btsl    $_EFER_LME, %eax
 156        wrmsr
 157
 158        /*
 159         * Setup for the jump to 64bit mode
 160         *
 161         * When the jump is performend we will be in long mode but
 162         * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
 163         * (and in turn EFER.LMA = 1).  To jump into 64bit mode we use
 164         * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
 165         * We place all of the values on our mini stack so lret can
 166         * used to perform that far jump.
 167         */
 168        pushl   $__KERNEL_CS
 169        leal    startup_64(%ebp), %eax
 170        pushl   %eax
 171
 172        /* Enter paged protected Mode, activating Long Mode */
 173        movl    $(X86_CR0_PG | X86_CR0_PE), %eax /* Enable Paging and Protected mode */
 174        movl    %eax, %cr0
 175
 176        /* Jump from 32bit compatibility mode into 64bit mode. */
 177        lret
 178ENDPROC(startup_32)
 179
 180no_longmode:
 181        /* This isn't an x86-64 CPU so hang */
 1821:
 183        hlt
 184        jmp     1b
 185
 186#include "../../kernel/verify_cpu_64.S"
 187
 188        /*
 189         * Be careful here startup_64 needs to be at a predictable
 190         * address so I can export it in an ELF header.  Bootloaders
 191         * should look at the ELF header to find this address, as
 192         * it may change in the future.
 193         */
 194        .code64
 195        .org 0x200
 196ENTRY(startup_64)
 197        /*
 198         * We come here either from startup_32 or directly from a
 199         * 64bit bootloader.  If we come here from a bootloader we depend on
 200         * an identity mapped page table being provied that maps our
 201         * entire text+data+bss and hopefully all of memory.
 202         */
 203
 204        /* Setup data segments. */
 205        xorl    %eax, %eax
 206        movl    %eax, %ds
 207        movl    %eax, %es
 208        movl    %eax, %ss
 209        movl    %eax, %fs
 210        movl    %eax, %gs
 211        lldt    %ax
 212        movl    $0x20, %eax
 213        ltr     %ax
 214
 215        /*
 216         * Compute the decompressed kernel start address.  It is where
 217         * we were loaded at aligned to a 2M boundary. %rbp contains the
 218         * decompressed kernel start address.
 219         *
 220         * If it is a relocatable kernel then decompress and run the kernel
 221         * from load address aligned to 2MB addr, otherwise decompress and
 222         * run the kernel from LOAD_PHYSICAL_ADDR
 223         *
 224         * We cannot rely on the calculation done in 32-bit mode, since we
 225         * may have been invoked via the 64-bit entry point.
 226         */
 227
 228        /* Start with the delta to where the kernel will run at. */
 229#ifdef CONFIG_RELOCATABLE
 230        leaq    startup_32(%rip) /* - $startup_32 */, %rbp
 231        movl    BP_kernel_alignment(%rsi), %eax
 232        decl    %eax
 233        addq    %rax, %rbp
 234        notq    %rax
 235        andq    %rax, %rbp
 236#else
 237        movq    $LOAD_PHYSICAL_ADDR, %rbp
 238#endif
 239
 240        /* Target address to relocate to for decompression */
 241        leaq    z_extract_offset(%rbp), %rbx
 242
 243        /* Set up the stack */
 244        leaq    boot_stack_end(%rbx), %rsp
 245
 246        /* Zero EFLAGS */
 247        pushq   $0
 248        popfq
 249
 250/*
 251 * Copy the compressed kernel to the end of our buffer
 252 * where decompression in place becomes safe.
 253 */
 254        pushq   %rsi
 255        leaq    (_bss-8)(%rip), %rsi
 256        leaq    (_bss-8)(%rbx), %rdi
 257        movq    $_bss /* - $startup_32 */, %rcx
 258        shrq    $3, %rcx
 259        std
 260        rep     movsq
 261        cld
 262        popq    %rsi
 263
 264/*
 265 * Jump to the relocated address.
 266 */
 267        leaq    relocated(%rbx), %rax
 268        jmp     *%rax
 269
 270        .text
 271relocated:
 272
 273/*
 274 * Clear BSS (stack is currently empty)
 275 */
 276        xorl    %eax, %eax
 277        leaq    _bss(%rip), %rdi
 278        leaq    _ebss(%rip), %rcx
 279        subq    %rdi, %rcx
 280        shrq    $3, %rcx
 281        rep     stosq
 282
 283/*
 284 * Do the decompression, and jump to the new kernel..
 285 */
 286        pushq   %rsi                    /* Save the real mode argument */
 287        movq    %rsi, %rdi              /* real mode address */
 288        leaq    boot_heap(%rip), %rsi   /* malloc area for uncompression */
 289        leaq    input_data(%rip), %rdx  /* input_data */
 290        movl    $z_input_len, %ecx      /* input_len */
 291        movq    %rbp, %r8               /* output target address */
 292        call    decompress_kernel
 293        popq    %rsi
 294
 295/*
 296 * Jump to the decompressed kernel.
 297 */
 298        jmp     *%rbp
 299
 300        .data
 301gdt:
 302        .word   gdt_end - gdt
 303        .long   gdt
 304        .word   0
 305        .quad   0x0000000000000000      /* NULL descriptor */
 306        .quad   0x00af9a000000ffff      /* __KERNEL_CS */
 307        .quad   0x00cf92000000ffff      /* __KERNEL_DS */
 308        .quad   0x0080890000000000      /* TS descriptor */
 309        .quad   0x0000000000000000      /* TS continued */
 310gdt_end:
 311
 312/*
 313 * Stack and heap for uncompression
 314 */
 315        .bss
 316        .balign 4
 317boot_heap:
 318        .fill BOOT_HEAP_SIZE, 1, 0
 319boot_stack:
 320        .fill BOOT_STACK_SIZE, 1, 0
 321boot_stack_end:
 322
 323/*
 324 * Space for page tables (not in .bss so not zeroed)
 325 */
 326        .section ".pgtable","a",@nobits
 327        .balign 4096
 328pgtable:
 329        .fill 6*4096, 1, 0
 330