linux/arch/x86/kernel/head64.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  prepare to run common code
   4 *
   5 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
   6 */
   7
   8#define DISABLE_BRANCH_PROFILING
   9
  10/* cpu_feature_enabled() cannot be used this early */
  11#define USE_EARLY_PGTABLE_L5
  12
  13#include <linux/init.h>
  14#include <linux/linkage.h>
  15#include <linux/types.h>
  16#include <linux/kernel.h>
  17#include <linux/string.h>
  18#include <linux/percpu.h>
  19#include <linux/start_kernel.h>
  20#include <linux/io.h>
  21#include <linux/memblock.h>
  22#include <linux/mem_encrypt.h>
  23#include <linux/pgtable.h>
  24
  25#include <asm/processor.h>
  26#include <asm/proto.h>
  27#include <asm/smp.h>
  28#include <asm/setup.h>
  29#include <asm/desc.h>
  30#include <asm/tlbflush.h>
  31#include <asm/sections.h>
  32#include <asm/kdebug.h>
  33#include <asm/e820/api.h>
  34#include <asm/bios_ebda.h>
  35#include <asm/bootparam_utils.h>
  36#include <asm/microcode.h>
  37#include <asm/kasan.h>
  38#include <asm/fixmap.h>
  39#include <asm/realmode.h>
  40#include <asm/desc.h>
  41#include <asm/extable.h>
  42#include <asm/trapnr.h>
  43#include <asm/sev-es.h>
  44
  45/*
  46 * Manage page tables very early on.
  47 */
  48extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
  49static unsigned int __initdata next_early_pgt;
  50pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
  51
  52#ifdef CONFIG_X86_5LEVEL
  53unsigned int __pgtable_l5_enabled __ro_after_init;
  54unsigned int pgdir_shift __ro_after_init = 39;
  55EXPORT_SYMBOL(pgdir_shift);
  56unsigned int ptrs_per_p4d __ro_after_init = 1;
  57EXPORT_SYMBOL(ptrs_per_p4d);
  58#endif
  59
  60#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
  61unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4;
  62EXPORT_SYMBOL(page_offset_base);
  63unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4;
  64EXPORT_SYMBOL(vmalloc_base);
  65unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4;
  66EXPORT_SYMBOL(vmemmap_base);
  67#endif
  68
  69/*
  70 * GDT used on the boot CPU before switching to virtual addresses.
  71 */
  72static struct desc_struct startup_gdt[GDT_ENTRIES] = {
  73        [GDT_ENTRY_KERNEL32_CS]         = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
  74        [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
  75        [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
  76};
  77
  78/*
  79 * Address needs to be set at runtime because it references the startup_gdt
  80 * while the kernel still uses a direct mapping.
  81 */
  82static struct desc_ptr startup_gdt_descr = {
  83        .size = sizeof(startup_gdt),
  84        .address = 0,
  85};
  86
  87#define __head  __section(".head.text")
  88
  89static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
  90{
  91        return ptr - (void *)_text + (void *)physaddr;
  92}
  93
  94static unsigned long __head *fixup_long(void *ptr, unsigned long physaddr)
  95{
  96        return fixup_pointer(ptr, physaddr);
  97}
  98
  99#ifdef CONFIG_X86_5LEVEL
 100static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr)
 101{
 102        return fixup_pointer(ptr, physaddr);
 103}
 104
 105static bool __head check_la57_support(unsigned long physaddr)
 106{
 107        /*
 108         * 5-level paging is detected and enabled at kernel decomression
 109         * stage. Only check if it has been enabled there.
 110         */
 111        if (!(native_read_cr4() & X86_CR4_LA57))
 112                return false;
 113
 114        *fixup_int(&__pgtable_l5_enabled, physaddr) = 1;
 115        *fixup_int(&pgdir_shift, physaddr) = 48;
 116        *fixup_int(&ptrs_per_p4d, physaddr) = 512;
 117        *fixup_long(&page_offset_base, physaddr) = __PAGE_OFFSET_BASE_L5;
 118        *fixup_long(&vmalloc_base, physaddr) = __VMALLOC_BASE_L5;
 119        *fixup_long(&vmemmap_base, physaddr) = __VMEMMAP_BASE_L5;
 120
 121        return true;
 122}
 123#else
 124static bool __head check_la57_support(unsigned long physaddr)
 125{
 126        return false;
 127}
 128#endif
 129
 130/* Code in __startup_64() can be relocated during execution, but the compiler
 131 * doesn't have to generate PC-relative relocations when accessing globals from
 132 * that function. Clang actually does not generate them, which leads to
 133 * boot-time crashes. To work around this problem, every global pointer must
 134 * be adjusted using fixup_pointer().
 135 */
 136unsigned long __head __startup_64(unsigned long physaddr,
 137                                  struct boot_params *bp)
 138{
 139        unsigned long vaddr, vaddr_end;
 140        unsigned long load_delta, *p;
 141        unsigned long pgtable_flags;
 142        pgdval_t *pgd;
 143        p4dval_t *p4d;
 144        pudval_t *pud;
 145        pmdval_t *pmd, pmd_entry;
 146        pteval_t *mask_ptr;
 147        bool la57;
 148        int i;
 149        unsigned int *next_pgt_ptr;
 150
 151        la57 = check_la57_support(physaddr);
 152
 153        /* Is the address too large? */
 154        if (physaddr >> MAX_PHYSMEM_BITS)
 155                for (;;);
 156
 157        /*
 158         * Compute the delta between the address I am compiled to run at
 159         * and the address I am actually running at.
 160         */
 161        load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
 162
 163        /* Is the address not 2M aligned? */
 164        if (load_delta & ~PMD_PAGE_MASK)
 165                for (;;);
 166
 167        /* Activate Secure Memory Encryption (SME) if supported and enabled */
 168        sme_enable(bp);
 169
 170        /* Include the SME encryption mask in the fixup value */
 171        load_delta += sme_get_me_mask();
 172
 173        /* Fixup the physical addresses in the page table */
 174
 175        pgd = fixup_pointer(&early_top_pgt, physaddr);
 176        p = pgd + pgd_index(__START_KERNEL_map);
 177        if (la57)
 178                *p = (unsigned long)level4_kernel_pgt;
 179        else
 180                *p = (unsigned long)level3_kernel_pgt;
 181        *p += _PAGE_TABLE_NOENC - __START_KERNEL_map + load_delta;
 182
 183        if (la57) {
 184                p4d = fixup_pointer(&level4_kernel_pgt, physaddr);
 185                p4d[511] += load_delta;
 186        }
 187
 188        pud = fixup_pointer(&level3_kernel_pgt, physaddr);
 189        pud[510] += load_delta;
 190        pud[511] += load_delta;
 191
 192        pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
 193        for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
 194                pmd[i] += load_delta;
 195
 196        /*
 197         * Set up the identity mapping for the switchover.  These
 198         * entries should *NOT* have the global bit set!  This also
 199         * creates a bunch of nonsense entries but that is fine --
 200         * it avoids problems around wraparound.
 201         */
 202
 203        next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr);
 204        pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
 205        pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
 206
 207        pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
 208
 209        if (la57) {
 210                p4d = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++],
 211                                    physaddr);
 212
 213                i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
 214                pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
 215                pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
 216
 217                i = physaddr >> P4D_SHIFT;
 218                p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
 219                p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
 220        } else {
 221                i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
 222                pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
 223                pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
 224        }
 225
 226        i = physaddr >> PUD_SHIFT;
 227        pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
 228        pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
 229
 230        pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
 231        /* Filter out unsupported __PAGE_KERNEL_* bits: */
 232        mask_ptr = fixup_pointer(&__supported_pte_mask, physaddr);
 233        pmd_entry &= *mask_ptr;
 234        pmd_entry += sme_get_me_mask();
 235        pmd_entry +=  physaddr;
 236
 237        for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
 238                int idx = i + (physaddr >> PMD_SHIFT);
 239
 240                pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
 241        }
 242
 243        /*
 244         * Fixup the kernel text+data virtual addresses. Note that
 245         * we might write invalid pmds, when the kernel is relocated
 246         * cleanup_highmap() fixes this up along with the mappings
 247         * beyond _end.
 248         *
 249         * Only the region occupied by the kernel image has so far
 250         * been checked against the table of usable memory regions
 251         * provided by the firmware, so invalidate pages outside that
 252         * region. A page table entry that maps to a reserved area of
 253         * memory would allow processor speculation into that area,
 254         * and on some hardware (particularly the UV platform) even
 255         * speculative access to some reserved areas is caught as an
 256         * error, causing the BIOS to halt the system.
 257         */
 258
 259        pmd = fixup_pointer(level2_kernel_pgt, physaddr);
 260
 261        /* invalidate pages before the kernel image */
 262        for (i = 0; i < pmd_index((unsigned long)_text); i++)
 263                pmd[i] &= ~_PAGE_PRESENT;
 264
 265        /* fixup pages that are part of the kernel image */
 266        for (; i <= pmd_index((unsigned long)_end); i++)
 267                if (pmd[i] & _PAGE_PRESENT)
 268                        pmd[i] += load_delta;
 269
 270        /* invalidate pages after the kernel image */
 271        for (; i < PTRS_PER_PMD; i++)
 272                pmd[i] &= ~_PAGE_PRESENT;
 273
 274        /*
 275         * Fixup phys_base - remove the memory encryption mask to obtain
 276         * the true physical address.
 277         */
 278        *fixup_long(&phys_base, physaddr) += load_delta - sme_get_me_mask();
 279
 280        /* Encrypt the kernel and related (if SME is active) */
 281        sme_encrypt_kernel(bp);
 282
 283        /*
 284         * Clear the memory encryption mask from the .bss..decrypted section.
 285         * The bss section will be memset to zero later in the initialization so
 286         * there is no need to zero it after changing the memory encryption
 287         * attribute.
 288         */
 289        if (mem_encrypt_active()) {
 290                vaddr = (unsigned long)__start_bss_decrypted;
 291                vaddr_end = (unsigned long)__end_bss_decrypted;
 292                for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
 293                        i = pmd_index(vaddr);
 294                        pmd[i] -= sme_get_me_mask();
 295                }
 296        }
 297
 298        /*
 299         * Return the SME encryption mask (if SME is active) to be used as a
 300         * modifier for the initial pgdir entry programmed into CR3.
 301         */
 302        return sme_get_me_mask();
 303}
 304
 305unsigned long __startup_secondary_64(void)
 306{
 307        /*
 308         * Return the SME encryption mask (if SME is active) to be used as a
 309         * modifier for the initial pgdir entry programmed into CR3.
 310         */
 311        return sme_get_me_mask();
 312}
 313
 314/* Wipe all early page tables except for the kernel symbol map */
 315static void __init reset_early_page_tables(void)
 316{
 317        memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
 318        next_early_pgt = 0;
 319        write_cr3(__sme_pa_nodebug(early_top_pgt));
 320}
 321
 322/* Create a new PMD entry */
 323bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
 324{
 325        unsigned long physaddr = address - __PAGE_OFFSET;
 326        pgdval_t pgd, *pgd_p;
 327        p4dval_t p4d, *p4d_p;
 328        pudval_t pud, *pud_p;
 329        pmdval_t *pmd_p;
 330
 331        /* Invalid address or early pgt is done ?  */
 332        if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
 333                return false;
 334
 335again:
 336        pgd_p = &early_top_pgt[pgd_index(address)].pgd;
 337        pgd = *pgd_p;
 338
 339        /*
 340         * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
 341         * critical -- __PAGE_OFFSET would point us back into the dynamic
 342         * range and we might end up looping forever...
 343         */
 344        if (!pgtable_l5_enabled())
 345                p4d_p = pgd_p;
 346        else if (pgd)
 347                p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
 348        else {
 349                if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
 350                        reset_early_page_tables();
 351                        goto again;
 352                }
 353
 354                p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++];
 355                memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
 356                *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
 357        }
 358        p4d_p += p4d_index(address);
 359        p4d = *p4d_p;
 360
 361        if (p4d)
 362                pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
 363        else {
 364                if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
 365                        reset_early_page_tables();
 366                        goto again;
 367                }
 368
 369                pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
 370                memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
 371                *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
 372        }
 373        pud_p += pud_index(address);
 374        pud = *pud_p;
 375
 376        if (pud)
 377                pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
 378        else {
 379                if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
 380                        reset_early_page_tables();
 381                        goto again;
 382                }
 383
 384                pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
 385                memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
 386                *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
 387        }
 388        pmd_p[pmd_index(address)] = pmd;
 389
 390        return true;
 391}
 392
 393static bool __init early_make_pgtable(unsigned long address)
 394{
 395        unsigned long physaddr = address - __PAGE_OFFSET;
 396        pmdval_t pmd;
 397
 398        pmd = (physaddr & PMD_MASK) + early_pmd_flags;
 399
 400        return __early_make_pgtable(address, pmd);
 401}
 402
 403void __init do_early_exception(struct pt_regs *regs, int trapnr)
 404{
 405        if (trapnr == X86_TRAP_PF &&
 406            early_make_pgtable(native_read_cr2()))
 407                return;
 408
 409        if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT) &&
 410            trapnr == X86_TRAP_VC && handle_vc_boot_ghcb(regs))
 411                return;
 412
 413        early_fixup_exception(regs, trapnr);
 414}
 415
 416/* Don't add a printk in there. printk relies on the PDA which is not initialized 
 417   yet. */
 418static void __init clear_bss(void)
 419{
 420        memset(__bss_start, 0,
 421               (unsigned long) __bss_stop - (unsigned long) __bss_start);
 422}
 423
 424static unsigned long get_cmd_line_ptr(void)
 425{
 426        unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
 427
 428        cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
 429
 430        return cmd_line_ptr;
 431}
 432
 433static void __init copy_bootdata(char *real_mode_data)
 434{
 435        char * command_line;
 436        unsigned long cmd_line_ptr;
 437
 438        /*
 439         * If SME is active, this will create decrypted mappings of the
 440         * boot data in advance of the copy operations.
 441         */
 442        sme_map_bootdata(real_mode_data);
 443
 444        memcpy(&boot_params, real_mode_data, sizeof(boot_params));
 445        sanitize_boot_params(&boot_params);
 446        cmd_line_ptr = get_cmd_line_ptr();
 447        if (cmd_line_ptr) {
 448                command_line = __va(cmd_line_ptr);
 449                memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
 450        }
 451
 452        /*
 453         * The old boot data is no longer needed and won't be reserved,
 454         * freeing up that memory for use by the system. If SME is active,
 455         * we need to remove the mappings that were created so that the
 456         * memory doesn't remain mapped as decrypted.
 457         */
 458        sme_unmap_bootdata(real_mode_data);
 459}
 460
 461asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
 462{
 463        /*
 464         * Build-time sanity checks on the kernel image and module
 465         * area mappings. (these are purely build-time and produce no code)
 466         */
 467        BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
 468        BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
 469        BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
 470        BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
 471        BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
 472        BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
 473        MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
 474                                (__START_KERNEL & PGDIR_MASK)));
 475        BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
 476
 477        cr4_init_shadow();
 478
 479        /* Kill off the identity-map trampoline */
 480        reset_early_page_tables();
 481
 482        clear_bss();
 483
 484        clear_page(init_top_pgt);
 485
 486        /*
 487         * SME support may update early_pmd_flags to include the memory
 488         * encryption mask, so it needs to be called before anything
 489         * that may generate a page fault.
 490         */
 491        sme_early_init();
 492
 493        kasan_early_init();
 494
 495        idt_setup_early_handler();
 496
 497        copy_bootdata(__va(real_mode_data));
 498
 499        /*
 500         * Load microcode early on BSP.
 501         */
 502        load_ucode_bsp();
 503
 504        /* set init_top_pgt kernel high mapping*/
 505        init_top_pgt[511] = early_top_pgt[511];
 506
 507        x86_64_start_reservations(real_mode_data);
 508}
 509
 510void __init x86_64_start_reservations(char *real_mode_data)
 511{
 512        /* version is always not zero if it is copied */
 513        if (!boot_params.hdr.version)
 514                copy_bootdata(__va(real_mode_data));
 515
 516        x86_early_init_platform_quirks();
 517
 518        switch (boot_params.hdr.hardware_subarch) {
 519        case X86_SUBARCH_INTEL_MID:
 520                x86_intel_mid_early_setup();
 521                break;
 522        default:
 523                break;
 524        }
 525
 526        start_kernel();
 527}
 528
 529/*
 530 * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
 531 * used until the idt_table takes over. On the boot CPU this happens in
 532 * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
 533 * this happens in the functions called from head_64.S.
 534 *
 535 * The idt_table can't be used that early because all the code modifying it is
 536 * in idt.c and can be instrumented by tracing or KASAN, which both don't work
 537 * during early CPU bringup. Also the idt_table has the runtime vectors
 538 * configured which require certain CPU state to be setup already (like TSS),
 539 * which also hasn't happened yet in early CPU bringup.
 540 */
 541static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
 542
 543static struct desc_ptr bringup_idt_descr = {
 544        .size           = (NUM_EXCEPTION_VECTORS * sizeof(gate_desc)) - 1,
 545        .address        = 0, /* Set at runtime */
 546};
 547
 548static void set_bringup_idt_handler(gate_desc *idt, int n, void *handler)
 549{
 550#ifdef CONFIG_AMD_MEM_ENCRYPT
 551        struct idt_data data;
 552        gate_desc desc;
 553
 554        init_idt_data(&data, n, handler);
 555        idt_init_desc(&desc, &data);
 556        native_write_idt_entry(idt, n, &desc);
 557#endif
 558}
 559
 560/* This runs while still in the direct mapping */
 561static void startup_64_load_idt(unsigned long physbase)
 562{
 563        struct desc_ptr *desc = fixup_pointer(&bringup_idt_descr, physbase);
 564        gate_desc *idt = fixup_pointer(bringup_idt_table, physbase);
 565
 566
 567        if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
 568                void *handler;
 569
 570                /* VMM Communication Exception */
 571                handler = fixup_pointer(vc_no_ghcb, physbase);
 572                set_bringup_idt_handler(idt, X86_TRAP_VC, handler);
 573        }
 574
 575        desc->address = (unsigned long)idt;
 576        native_load_idt(desc);
 577}
 578
 579/* This is used when running on kernel addresses */
 580void early_setup_idt(void)
 581{
 582        /* VMM Communication Exception */
 583        if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
 584                set_bringup_idt_handler(bringup_idt_table, X86_TRAP_VC, vc_boot_ghcb);
 585
 586        bringup_idt_descr.address = (unsigned long)bringup_idt_table;
 587        native_load_idt(&bringup_idt_descr);
 588}
 589
 590/*
 591 * Setup boot CPU state needed before kernel switches to virtual addresses.
 592 */
 593void __head startup_64_setup_env(unsigned long physbase)
 594{
 595        /* Load GDT */
 596        startup_gdt_descr.address = (unsigned long)fixup_pointer(startup_gdt, physbase);
 597        native_load_gdt(&startup_gdt_descr);
 598
 599        /* New GDT is live - reload data segment registers */
 600        asm volatile("movl %%eax, %%ds\n"
 601                     "movl %%eax, %%ss\n"
 602                     "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
 603
 604        startup_64_load_idt(physbase);
 605}
 606