linux/arch/x86/kernel/cpu/common.c
<<
>>
Prefs
   1#include <linux/bootmem.h>
   2#include <linux/linkage.h>
   3#include <linux/bitops.h>
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/percpu.h>
   7#include <linux/string.h>
   8#include <linux/ctype.h>
   9#include <linux/delay.h>
  10#include <linux/sched.h>
  11#include <linux/init.h>
  12#include <linux/kprobes.h>
  13#include <linux/kgdb.h>
  14#include <linux/smp.h>
  15#include <linux/io.h>
  16#include <linux/syscore_ops.h>
  17
  18#include <asm/stackprotector.h>
  19#include <asm/perf_event.h>
  20#include <asm/mmu_context.h>
  21#include <asm/archrandom.h>
  22#include <asm/hypervisor.h>
  23#include <asm/processor.h>
  24#include <asm/tlbflush.h>
  25#include <asm/debugreg.h>
  26#include <asm/sections.h>
  27#include <asm/vsyscall.h>
  28#include <linux/topology.h>
  29#include <linux/cpumask.h>
  30#include <asm/pgtable.h>
  31#include <linux/atomic.h>
  32#include <asm/proto.h>
  33#include <asm/setup.h>
  34#include <asm/apic.h>
  35#include <asm/desc.h>
  36#include <asm/fpu/internal.h>
  37#include <asm/mtrr.h>
  38#include <linux/numa.h>
  39#include <asm/asm.h>
  40#include <asm/bugs.h>
  41#include <asm/cpu.h>
  42#include <asm/mce.h>
  43#include <asm/msr.h>
  44#include <asm/pat.h>
  45#include <asm/microcode.h>
  46#include <asm/microcode_intel.h>
  47
  48#ifdef CONFIG_X86_LOCAL_APIC
  49#include <asm/uv/uv.h>
  50#endif
  51
  52#include "cpu.h"
  53
  54/* all of these masks are initialized in setup_cpu_local_masks() */
  55cpumask_var_t cpu_initialized_mask;
  56cpumask_var_t cpu_callout_mask;
  57cpumask_var_t cpu_callin_mask;
  58
  59/* representing cpus for which sibling maps can be computed */
  60cpumask_var_t cpu_sibling_setup_mask;
  61
  62/* correctly size the local cpu masks */
  63void __init setup_cpu_local_masks(void)
  64{
  65        alloc_bootmem_cpumask_var(&cpu_initialized_mask);
  66        alloc_bootmem_cpumask_var(&cpu_callin_mask);
  67        alloc_bootmem_cpumask_var(&cpu_callout_mask);
  68        alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
  69}
  70
  71static void default_init(struct cpuinfo_x86 *c)
  72{
  73#ifdef CONFIG_X86_64
  74        cpu_detect_cache_sizes(c);
  75#else
  76        /* Not much we can do here... */
  77        /* Check if at least it has cpuid */
  78        if (c->cpuid_level == -1) {
  79                /* No cpuid. It must be an ancient CPU */
  80                if (c->x86 == 4)
  81                        strcpy(c->x86_model_id, "486");
  82                else if (c->x86 == 3)
  83                        strcpy(c->x86_model_id, "386");
  84        }
  85#endif
  86}
  87
  88static const struct cpu_dev default_cpu = {
  89        .c_init         = default_init,
  90        .c_vendor       = "Unknown",
  91        .c_x86_vendor   = X86_VENDOR_UNKNOWN,
  92};
  93
  94static const struct cpu_dev *this_cpu = &default_cpu;
  95
  96DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
  97#ifdef CONFIG_X86_64
  98        /*
  99         * We need valid kernel segments for data and code in long mode too
 100         * IRET will check the segment types  kkeil 2000/10/28
 101         * Also sysret mandates a special GDT layout
 102         *
 103         * TLS descriptors are currently at a different place compared to i386.
 104         * Hopefully nobody expects them at a fixed place (Wine?)
 105         */
 106        [GDT_ENTRY_KERNEL32_CS]         = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
 107        [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
 108        [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
 109        [GDT_ENTRY_DEFAULT_USER32_CS]   = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
 110        [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
 111        [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
 112#else
 113        [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
 114        [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 115        [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
 116        [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
 117        /*
 118         * Segments used for calling PnP BIOS have byte granularity.
 119         * They code segments and data segments have fixed 64k limits,
 120         * the transfer segment sizes are set at run time.
 121         */
 122        /* 32-bit code */
 123        [GDT_ENTRY_PNPBIOS_CS32]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
 124        /* 16-bit code */
 125        [GDT_ENTRY_PNPBIOS_CS16]        = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
 126        /* 16-bit data */
 127        [GDT_ENTRY_PNPBIOS_DS]          = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
 128        /* 16-bit data */
 129        [GDT_ENTRY_PNPBIOS_TS1]         = GDT_ENTRY_INIT(0x0092, 0, 0),
 130        /* 16-bit data */
 131        [GDT_ENTRY_PNPBIOS_TS2]         = GDT_ENTRY_INIT(0x0092, 0, 0),
 132        /*
 133         * The APM segments have byte granularity and their bases
 134         * are set at run time.  All have 64k limits.
 135         */
 136        /* 32-bit code */
 137        [GDT_ENTRY_APMBIOS_BASE]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
 138        /* 16-bit code */
 139        [GDT_ENTRY_APMBIOS_BASE+1]      = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
 140        /* data */
 141        [GDT_ENTRY_APMBIOS_BASE+2]      = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
 142
 143        [GDT_ENTRY_ESPFIX_SS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 144        [GDT_ENTRY_PERCPU]              = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 145        GDT_STACK_CANARY_INIT
 146#endif
 147} };
 148EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
 149
 150static int __init x86_mpx_setup(char *s)
 151{
 152        /* require an exact match without trailing characters */
 153        if (strlen(s))
 154                return 0;
 155
 156        /* do not emit a message if the feature is not present */
 157        if (!boot_cpu_has(X86_FEATURE_MPX))
 158                return 1;
 159
 160        setup_clear_cpu_cap(X86_FEATURE_MPX);
 161        pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n");
 162        return 1;
 163}
 164__setup("nompx", x86_mpx_setup);
 165
 166static int __init x86_noinvpcid_setup(char *s)
 167{
 168        /* noinvpcid doesn't accept parameters */
 169        if (s)
 170                return -EINVAL;
 171
 172        /* do not emit a message if the feature is not present */
 173        if (!boot_cpu_has(X86_FEATURE_INVPCID))
 174                return 0;
 175
 176        setup_clear_cpu_cap(X86_FEATURE_INVPCID);
 177        pr_info("noinvpcid: INVPCID feature disabled\n");
 178        return 0;
 179}
 180early_param("noinvpcid", x86_noinvpcid_setup);
 181
 182#ifdef CONFIG_X86_32
 183static int cachesize_override = -1;
 184static int disable_x86_serial_nr = 1;
 185
 186static int __init cachesize_setup(char *str)
 187{
 188        get_option(&str, &cachesize_override);
 189        return 1;
 190}
 191__setup("cachesize=", cachesize_setup);
 192
 193static int __init x86_sep_setup(char *s)
 194{
 195        setup_clear_cpu_cap(X86_FEATURE_SEP);
 196        return 1;
 197}
 198__setup("nosep", x86_sep_setup);
 199
 200/* Standard macro to see if a specific flag is changeable */
 201static inline int flag_is_changeable_p(u32 flag)
 202{
 203        u32 f1, f2;
 204
 205        /*
 206         * Cyrix and IDT cpus allow disabling of CPUID
 207         * so the code below may return different results
 208         * when it is executed before and after enabling
 209         * the CPUID. Add "volatile" to not allow gcc to
 210         * optimize the subsequent calls to this function.
 211         */
 212        asm volatile ("pushfl           \n\t"
 213                      "pushfl           \n\t"
 214                      "popl %0          \n\t"
 215                      "movl %0, %1      \n\t"
 216                      "xorl %2, %0      \n\t"
 217                      "pushl %0         \n\t"
 218                      "popfl            \n\t"
 219                      "pushfl           \n\t"
 220                      "popl %0          \n\t"
 221                      "popfl            \n\t"
 222
 223                      : "=&r" (f1), "=&r" (f2)
 224                      : "ir" (flag));
 225
 226        return ((f1^f2) & flag) != 0;
 227}
 228
 229/* Probe for the CPUID instruction */
 230int have_cpuid_p(void)
 231{
 232        return flag_is_changeable_p(X86_EFLAGS_ID);
 233}
 234
 235static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 236{
 237        unsigned long lo, hi;
 238
 239        if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
 240                return;
 241
 242        /* Disable processor serial number: */
 243
 244        rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
 245        lo |= 0x200000;
 246        wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
 247
 248        pr_notice("CPU serial number disabled.\n");
 249        clear_cpu_cap(c, X86_FEATURE_PN);
 250
 251        /* Disabling the serial number may affect the cpuid level */
 252        c->cpuid_level = cpuid_eax(0);
 253}
 254
 255static int __init x86_serial_nr_setup(char *s)
 256{
 257        disable_x86_serial_nr = 0;
 258        return 1;
 259}
 260__setup("serialnumber", x86_serial_nr_setup);
 261#else
 262static inline int flag_is_changeable_p(u32 flag)
 263{
 264        return 1;
 265}
 266static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 267{
 268}
 269#endif
 270
 271static __init int setup_disable_smep(char *arg)
 272{
 273        setup_clear_cpu_cap(X86_FEATURE_SMEP);
 274        /* Check for things that depend on SMEP being enabled: */
 275        check_mpx_erratum(&boot_cpu_data);
 276        return 1;
 277}
 278__setup("nosmep", setup_disable_smep);
 279
 280static __always_inline void setup_smep(struct cpuinfo_x86 *c)
 281{
 282        if (cpu_has(c, X86_FEATURE_SMEP))
 283                cr4_set_bits(X86_CR4_SMEP);
 284}
 285
 286static __init int setup_disable_smap(char *arg)
 287{
 288        setup_clear_cpu_cap(X86_FEATURE_SMAP);
 289        return 1;
 290}
 291__setup("nosmap", setup_disable_smap);
 292
 293static __always_inline void setup_smap(struct cpuinfo_x86 *c)
 294{
 295        unsigned long eflags = native_save_fl();
 296
 297        /* This should have been cleared long ago */
 298        BUG_ON(eflags & X86_EFLAGS_AC);
 299
 300        if (cpu_has(c, X86_FEATURE_SMAP)) {
 301#ifdef CONFIG_X86_SMAP
 302                cr4_set_bits(X86_CR4_SMAP);
 303#else
 304                cr4_clear_bits(X86_CR4_SMAP);
 305#endif
 306        }
 307}
 308
 309/*
 310 * Protection Keys are not available in 32-bit mode.
 311 */
 312static bool pku_disabled;
 313
 314static __always_inline void setup_pku(struct cpuinfo_x86 *c)
 315{
 316        /* check the boot processor, plus compile options for PKU: */
 317        if (!cpu_feature_enabled(X86_FEATURE_PKU))
 318                return;
 319        /* checks the actual processor's cpuid bits: */
 320        if (!cpu_has(c, X86_FEATURE_PKU))
 321                return;
 322        if (pku_disabled)
 323                return;
 324
 325        cr4_set_bits(X86_CR4_PKE);
 326        /*
 327         * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
 328         * cpuid bit to be set.  We need to ensure that we
 329         * update that bit in this CPU's "cpu_info".
 330         */
 331        get_cpu_cap(c);
 332}
 333
 334#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
 335static __init int setup_disable_pku(char *arg)
 336{
 337        /*
 338         * Do not clear the X86_FEATURE_PKU bit.  All of the
 339         * runtime checks are against OSPKE so clearing the
 340         * bit does nothing.
 341         *
 342         * This way, we will see "pku" in cpuinfo, but not
 343         * "ospke", which is exactly what we want.  It shows
 344         * that the CPU has PKU, but the OS has not enabled it.
 345         * This happens to be exactly how a system would look
 346         * if we disabled the config option.
 347         */
 348        pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
 349        pku_disabled = true;
 350        return 1;
 351}
 352__setup("nopku", setup_disable_pku);
 353#endif /* CONFIG_X86_64 */
 354
 355/*
 356 * Some CPU features depend on higher CPUID levels, which may not always
 357 * be available due to CPUID level capping or broken virtualization
 358 * software.  Add those features to this table to auto-disable them.
 359 */
 360struct cpuid_dependent_feature {
 361        u32 feature;
 362        u32 level;
 363};
 364
 365static const struct cpuid_dependent_feature
 366cpuid_dependent_features[] = {
 367        { X86_FEATURE_MWAIT,            0x00000005 },
 368        { X86_FEATURE_DCA,              0x00000009 },
 369        { X86_FEATURE_XSAVE,            0x0000000d },
 370        { 0, 0 }
 371};
 372
 373static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
 374{
 375        const struct cpuid_dependent_feature *df;
 376
 377        for (df = cpuid_dependent_features; df->feature; df++) {
 378
 379                if (!cpu_has(c, df->feature))
 380                        continue;
 381                /*
 382                 * Note: cpuid_level is set to -1 if unavailable, but
 383                 * extended_extended_level is set to 0 if unavailable
 384                 * and the legitimate extended levels are all negative
 385                 * when signed; hence the weird messing around with
 386                 * signs here...
 387                 */
 388                if (!((s32)df->level < 0 ?
 389                     (u32)df->level > (u32)c->extended_cpuid_level :
 390                     (s32)df->level > (s32)c->cpuid_level))
 391                        continue;
 392
 393                clear_cpu_cap(c, df->feature);
 394                if (!warn)
 395                        continue;
 396
 397                pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
 398                        x86_cap_flag(df->feature), df->level);
 399        }
 400}
 401
 402/*
 403 * Naming convention should be: <Name> [(<Codename>)]
 404 * This table only is used unless init_<vendor>() below doesn't set it;
 405 * in particular, if CPUID levels 0x80000002..4 are supported, this
 406 * isn't used
 407 */
 408
 409/* Look up CPU names by table lookup. */
 410static const char *table_lookup_model(struct cpuinfo_x86 *c)
 411{
 412#ifdef CONFIG_X86_32
 413        const struct legacy_cpu_model_info *info;
 414
 415        if (c->x86_model >= 16)
 416                return NULL;    /* Range check */
 417
 418        if (!this_cpu)
 419                return NULL;
 420
 421        info = this_cpu->legacy_models;
 422
 423        while (info->family) {
 424                if (info->family == c->x86)
 425                        return info->model_names[c->x86_model];
 426                info++;
 427        }
 428#endif
 429        return NULL;            /* Not found */
 430}
 431
 432__u32 cpu_caps_cleared[NCAPINTS];
 433__u32 cpu_caps_set[NCAPINTS];
 434
 435void load_percpu_segment(int cpu)
 436{
 437#ifdef CONFIG_X86_32
 438        loadsegment(fs, __KERNEL_PERCPU);
 439#else
 440        __loadsegment_simple(gs, 0);
 441        wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
 442#endif
 443        load_stack_canary_segment();
 444}
 445
 446/*
 447 * Current gdt points %fs at the "master" per-cpu area: after this,
 448 * it's on the real one.
 449 */
 450void switch_to_new_gdt(int cpu)
 451{
 452        struct desc_ptr gdt_descr;
 453
 454        gdt_descr.address = (long)get_cpu_gdt_table(cpu);
 455        gdt_descr.size = GDT_SIZE - 1;
 456        load_gdt(&gdt_descr);
 457        /* Reload the per-cpu base */
 458
 459        load_percpu_segment(cpu);
 460}
 461
 462static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
 463
 464static void get_model_name(struct cpuinfo_x86 *c)
 465{
 466        unsigned int *v;
 467        char *p, *q, *s;
 468
 469        if (c->extended_cpuid_level < 0x80000004)
 470                return;
 471
 472        v = (unsigned int *)c->x86_model_id;
 473        cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
 474        cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
 475        cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
 476        c->x86_model_id[48] = 0;
 477
 478        /* Trim whitespace */
 479        p = q = s = &c->x86_model_id[0];
 480
 481        while (*p == ' ')
 482                p++;
 483
 484        while (*p) {
 485                /* Note the last non-whitespace index */
 486                if (!isspace(*p))
 487                        s = q;
 488
 489                *q++ = *p++;
 490        }
 491
 492        *(s + 1) = '\0';
 493}
 494
 495void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
 496{
 497        unsigned int n, dummy, ebx, ecx, edx, l2size;
 498
 499        n = c->extended_cpuid_level;
 500
 501        if (n >= 0x80000005) {
 502                cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
 503                c->x86_cache_size = (ecx>>24) + (edx>>24);
 504#ifdef CONFIG_X86_64
 505                /* On K8 L1 TLB is inclusive, so don't count it */
 506                c->x86_tlbsize = 0;
 507#endif
 508        }
 509
 510        if (n < 0x80000006)     /* Some chips just has a large L1. */
 511                return;
 512
 513        cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
 514        l2size = ecx >> 16;
 515
 516#ifdef CONFIG_X86_64
 517        c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
 518#else
 519        /* do processor-specific cache resizing */
 520        if (this_cpu->legacy_cache_size)
 521                l2size = this_cpu->legacy_cache_size(c, l2size);
 522
 523        /* Allow user to override all this if necessary. */
 524        if (cachesize_override != -1)
 525                l2size = cachesize_override;
 526
 527        if (l2size == 0)
 528                return;         /* Again, no L2 cache is possible */
 529#endif
 530
 531        c->x86_cache_size = l2size;
 532}
 533
 534u16 __read_mostly tlb_lli_4k[NR_INFO];
 535u16 __read_mostly tlb_lli_2m[NR_INFO];
 536u16 __read_mostly tlb_lli_4m[NR_INFO];
 537u16 __read_mostly tlb_lld_4k[NR_INFO];
 538u16 __read_mostly tlb_lld_2m[NR_INFO];
 539u16 __read_mostly tlb_lld_4m[NR_INFO];
 540u16 __read_mostly tlb_lld_1g[NR_INFO];
 541
 542static void cpu_detect_tlb(struct cpuinfo_x86 *c)
 543{
 544        if (this_cpu->c_detect_tlb)
 545                this_cpu->c_detect_tlb(c);
 546
 547        pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
 548                tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
 549                tlb_lli_4m[ENTRIES]);
 550
 551        pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
 552                tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
 553                tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
 554}
 555
 556void detect_ht(struct cpuinfo_x86 *c)
 557{
 558#ifdef CONFIG_SMP
 559        u32 eax, ebx, ecx, edx;
 560        int index_msb, core_bits;
 561        static bool printed;
 562
 563        if (!cpu_has(c, X86_FEATURE_HT))
 564                return;
 565
 566        if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
 567                goto out;
 568
 569        if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
 570                return;
 571
 572        cpuid(1, &eax, &ebx, &ecx, &edx);
 573
 574        smp_num_siblings = (ebx & 0xff0000) >> 16;
 575
 576        if (smp_num_siblings == 1) {
 577                pr_info_once("CPU0: Hyper-Threading is disabled\n");
 578                goto out;
 579        }
 580
 581        if (smp_num_siblings <= 1)
 582                goto out;
 583
 584        index_msb = get_count_order(smp_num_siblings);
 585        c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
 586
 587        smp_num_siblings = smp_num_siblings / c->x86_max_cores;
 588
 589        index_msb = get_count_order(smp_num_siblings);
 590
 591        core_bits = get_count_order(c->x86_max_cores);
 592
 593        c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
 594                                       ((1 << core_bits) - 1);
 595
 596out:
 597        if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
 598                pr_info("CPU: Physical Processor ID: %d\n",
 599                        c->phys_proc_id);
 600                pr_info("CPU: Processor Core ID: %d\n",
 601                        c->cpu_core_id);
 602                printed = 1;
 603        }
 604#endif
 605}
 606
 607static void get_cpu_vendor(struct cpuinfo_x86 *c)
 608{
 609        char *v = c->x86_vendor_id;
 610        int i;
 611
 612        for (i = 0; i < X86_VENDOR_NUM; i++) {
 613                if (!cpu_devs[i])
 614                        break;
 615
 616                if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
 617                    (cpu_devs[i]->c_ident[1] &&
 618                     !strcmp(v, cpu_devs[i]->c_ident[1]))) {
 619
 620                        this_cpu = cpu_devs[i];
 621                        c->x86_vendor = this_cpu->c_x86_vendor;
 622                        return;
 623                }
 624        }
 625
 626        pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
 627                    "CPU: Your system may be unstable.\n", v);
 628
 629        c->x86_vendor = X86_VENDOR_UNKNOWN;
 630        this_cpu = &default_cpu;
 631}
 632
 633void cpu_detect(struct cpuinfo_x86 *c)
 634{
 635        /* Get vendor name */
 636        cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
 637              (unsigned int *)&c->x86_vendor_id[0],
 638              (unsigned int *)&c->x86_vendor_id[8],
 639              (unsigned int *)&c->x86_vendor_id[4]);
 640
 641        c->x86 = 4;
 642        /* Intel-defined flags: level 0x00000001 */
 643        if (c->cpuid_level >= 0x00000001) {
 644                u32 junk, tfms, cap0, misc;
 645
 646                cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
 647                c->x86          = x86_family(tfms);
 648                c->x86_model    = x86_model(tfms);
 649                c->x86_mask     = x86_stepping(tfms);
 650
 651                if (cap0 & (1<<19)) {
 652                        c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
 653                        c->x86_cache_alignment = c->x86_clflush_size;
 654                }
 655        }
 656}
 657
 658void get_cpu_cap(struct cpuinfo_x86 *c)
 659{
 660        u32 eax, ebx, ecx, edx;
 661
 662        /* Intel-defined flags: level 0x00000001 */
 663        if (c->cpuid_level >= 0x00000001) {
 664                cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
 665
 666                c->x86_capability[CPUID_1_ECX] = ecx;
 667                c->x86_capability[CPUID_1_EDX] = edx;
 668        }
 669
 670        /* Additional Intel-defined flags: level 0x00000007 */
 671        if (c->cpuid_level >= 0x00000007) {
 672                cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
 673
 674                c->x86_capability[CPUID_7_0_EBX] = ebx;
 675
 676                c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
 677                c->x86_capability[CPUID_7_ECX] = ecx;
 678        }
 679
 680        /* Extended state features: level 0x0000000d */
 681        if (c->cpuid_level >= 0x0000000d) {
 682                cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
 683
 684                c->x86_capability[CPUID_D_1_EAX] = eax;
 685        }
 686
 687        /* Additional Intel-defined flags: level 0x0000000F */
 688        if (c->cpuid_level >= 0x0000000F) {
 689
 690                /* QoS sub-leaf, EAX=0Fh, ECX=0 */
 691                cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
 692                c->x86_capability[CPUID_F_0_EDX] = edx;
 693
 694                if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
 695                        /* will be overridden if occupancy monitoring exists */
 696                        c->x86_cache_max_rmid = ebx;
 697
 698                        /* QoS sub-leaf, EAX=0Fh, ECX=1 */
 699                        cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
 700                        c->x86_capability[CPUID_F_1_EDX] = edx;
 701
 702                        if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
 703                              ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
 704                               (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
 705                                c->x86_cache_max_rmid = ecx;
 706                                c->x86_cache_occ_scale = ebx;
 707                        }
 708                } else {
 709                        c->x86_cache_max_rmid = -1;
 710                        c->x86_cache_occ_scale = -1;
 711                }
 712        }
 713
 714        /* AMD-defined flags: level 0x80000001 */
 715        eax = cpuid_eax(0x80000000);
 716        c->extended_cpuid_level = eax;
 717
 718        if ((eax & 0xffff0000) == 0x80000000) {
 719                if (eax >= 0x80000001) {
 720                        cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
 721
 722                        c->x86_capability[CPUID_8000_0001_ECX] = ecx;
 723                        c->x86_capability[CPUID_8000_0001_EDX] = edx;
 724                }
 725        }
 726
 727        if (c->extended_cpuid_level >= 0x80000007) {
 728                cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
 729
 730                c->x86_capability[CPUID_8000_0007_EBX] = ebx;
 731                c->x86_power = edx;
 732        }
 733
 734        if (c->extended_cpuid_level >= 0x80000008) {
 735                cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
 736
 737                c->x86_virt_bits = (eax >> 8) & 0xff;
 738                c->x86_phys_bits = eax & 0xff;
 739                c->x86_capability[CPUID_8000_0008_EBX] = ebx;
 740        }
 741#ifdef CONFIG_X86_32
 742        else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
 743                c->x86_phys_bits = 36;
 744#endif
 745
 746        if (c->extended_cpuid_level >= 0x8000000a)
 747                c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
 748
 749        init_scattered_cpuid_features(c);
 750}
 751
 752static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 753{
 754#ifdef CONFIG_X86_32
 755        int i;
 756
 757        /*
 758         * First of all, decide if this is a 486 or higher
 759         * It's a 486 if we can modify the AC flag
 760         */
 761        if (flag_is_changeable_p(X86_EFLAGS_AC))
 762                c->x86 = 4;
 763        else
 764                c->x86 = 3;
 765
 766        for (i = 0; i < X86_VENDOR_NUM; i++)
 767                if (cpu_devs[i] && cpu_devs[i]->c_identify) {
 768                        c->x86_vendor_id[0] = 0;
 769                        cpu_devs[i]->c_identify(c);
 770                        if (c->x86_vendor_id[0]) {
 771                                get_cpu_vendor(c);
 772                                break;
 773                        }
 774                }
 775#endif
 776}
 777
 778/*
 779 * Do minimum CPU detection early.
 780 * Fields really needed: vendor, cpuid_level, family, model, mask,
 781 * cache alignment.
 782 * The others are not touched to avoid unwanted side effects.
 783 *
 784 * WARNING: this function is only called on the BP.  Don't add code here
 785 * that is supposed to run on all CPUs.
 786 */
 787static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 788{
 789#ifdef CONFIG_X86_64
 790        c->x86_clflush_size = 64;
 791        c->x86_phys_bits = 36;
 792        c->x86_virt_bits = 48;
 793#else
 794        c->x86_clflush_size = 32;
 795        c->x86_phys_bits = 32;
 796        c->x86_virt_bits = 32;
 797#endif
 798        c->x86_cache_alignment = c->x86_clflush_size;
 799
 800        memset(&c->x86_capability, 0, sizeof c->x86_capability);
 801        c->extended_cpuid_level = 0;
 802
 803        if (!have_cpuid_p())
 804                identify_cpu_without_cpuid(c);
 805
 806        /* cyrix could have cpuid enabled via c_identify()*/
 807        if (!have_cpuid_p())
 808                return;
 809
 810        cpu_detect(c);
 811        get_cpu_vendor(c);
 812        get_cpu_cap(c);
 813
 814        if (this_cpu->c_early_init)
 815                this_cpu->c_early_init(c);
 816
 817        c->cpu_index = 0;
 818        filter_cpuid_features(c, false);
 819
 820        if (this_cpu->c_bsp_init)
 821                this_cpu->c_bsp_init(c);
 822
 823        setup_force_cpu_cap(X86_FEATURE_ALWAYS);
 824        fpu__init_system(c);
 825}
 826
 827void __init early_cpu_init(void)
 828{
 829        const struct cpu_dev *const *cdev;
 830        int count = 0;
 831
 832#ifdef CONFIG_PROCESSOR_SELECT
 833        pr_info("KERNEL supported cpus:\n");
 834#endif
 835
 836        for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
 837                const struct cpu_dev *cpudev = *cdev;
 838
 839                if (count >= X86_VENDOR_NUM)
 840                        break;
 841                cpu_devs[count] = cpudev;
 842                count++;
 843
 844#ifdef CONFIG_PROCESSOR_SELECT
 845                {
 846                        unsigned int j;
 847
 848                        for (j = 0; j < 2; j++) {
 849                                if (!cpudev->c_ident[j])
 850                                        continue;
 851                                pr_info("  %s %s\n", cpudev->c_vendor,
 852                                        cpudev->c_ident[j]);
 853                        }
 854                }
 855#endif
 856        }
 857        early_identify_cpu(&boot_cpu_data);
 858}
 859
 860/*
 861 * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
 862 * unfortunately, that's not true in practice because of early VIA
 863 * chips and (more importantly) broken virtualizers that are not easy
 864 * to detect. In the latter case it doesn't even *fail* reliably, so
 865 * probing for it doesn't even work. Disable it completely on 32-bit
 866 * unless we can find a reliable way to detect all the broken cases.
 867 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
 868 */
 869static void detect_nopl(struct cpuinfo_x86 *c)
 870{
 871#ifdef CONFIG_X86_32
 872        clear_cpu_cap(c, X86_FEATURE_NOPL);
 873#else
 874        set_cpu_cap(c, X86_FEATURE_NOPL);
 875#endif
 876}
 877
 878static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
 879{
 880#ifdef CONFIG_X86_64
 881        /*
 882         * Empirically, writing zero to a segment selector on AMD does
 883         * not clear the base, whereas writing zero to a segment
 884         * selector on Intel does clear the base.  Intel's behavior
 885         * allows slightly faster context switches in the common case
 886         * where GS is unused by the prev and next threads.
 887         *
 888         * Since neither vendor documents this anywhere that I can see,
 889         * detect it directly instead of hardcoding the choice by
 890         * vendor.
 891         *
 892         * I've designated AMD's behavior as the "bug" because it's
 893         * counterintuitive and less friendly.
 894         */
 895
 896        unsigned long old_base, tmp;
 897        rdmsrl(MSR_FS_BASE, old_base);
 898        wrmsrl(MSR_FS_BASE, 1);
 899        loadsegment(fs, 0);
 900        rdmsrl(MSR_FS_BASE, tmp);
 901        if (tmp != 0)
 902                set_cpu_bug(c, X86_BUG_NULL_SEG);
 903        wrmsrl(MSR_FS_BASE, old_base);
 904#endif
 905}
 906
 907static void generic_identify(struct cpuinfo_x86 *c)
 908{
 909        c->extended_cpuid_level = 0;
 910
 911        if (!have_cpuid_p())
 912                identify_cpu_without_cpuid(c);
 913
 914        /* cyrix could have cpuid enabled via c_identify()*/
 915        if (!have_cpuid_p())
 916                return;
 917
 918        cpu_detect(c);
 919
 920        get_cpu_vendor(c);
 921
 922        get_cpu_cap(c);
 923
 924        if (c->cpuid_level >= 0x00000001) {
 925                c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
 926#ifdef CONFIG_X86_32
 927# ifdef CONFIG_SMP
 928                c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
 929# else
 930                c->apicid = c->initial_apicid;
 931# endif
 932#endif
 933                c->phys_proc_id = c->initial_apicid;
 934        }
 935
 936        get_model_name(c); /* Default name */
 937
 938        detect_nopl(c);
 939
 940        detect_null_seg_behavior(c);
 941
 942        /*
 943         * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
 944         * systems that run Linux at CPL > 0 may or may not have the
 945         * issue, but, even if they have the issue, there's absolutely
 946         * nothing we can do about it because we can't use the real IRET
 947         * instruction.
 948         *
 949         * NB: For the time being, only 32-bit kernels support
 950         * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
 951         * whether to apply espfix using paravirt hooks.  If any
 952         * non-paravirt system ever shows up that does *not* have the
 953         * ESPFIX issue, we can change this.
 954         */
 955#ifdef CONFIG_X86_32
 956# ifdef CONFIG_PARAVIRT
 957        do {
 958                extern void native_iret(void);
 959                if (pv_cpu_ops.iret == native_iret)
 960                        set_cpu_bug(c, X86_BUG_ESPFIX);
 961        } while (0);
 962# else
 963        set_cpu_bug(c, X86_BUG_ESPFIX);
 964# endif
 965#endif
 966}
 967
 968static void x86_init_cache_qos(struct cpuinfo_x86 *c)
 969{
 970        /*
 971         * The heavy lifting of max_rmid and cache_occ_scale are handled
 972         * in get_cpu_cap().  Here we just set the max_rmid for the boot_cpu
 973         * in case CQM bits really aren't there in this CPU.
 974         */
 975        if (c != &boot_cpu_data) {
 976                boot_cpu_data.x86_cache_max_rmid =
 977                        min(boot_cpu_data.x86_cache_max_rmid,
 978                            c->x86_cache_max_rmid);
 979        }
 980}
 981
 982/*
 983 * This does the hard work of actually picking apart the CPU stuff...
 984 */
 985static void identify_cpu(struct cpuinfo_x86 *c)
 986{
 987        int i;
 988
 989        c->loops_per_jiffy = loops_per_jiffy;
 990        c->x86_cache_size = -1;
 991        c->x86_vendor = X86_VENDOR_UNKNOWN;
 992        c->x86_model = c->x86_mask = 0; /* So far unknown... */
 993        c->x86_vendor_id[0] = '\0'; /* Unset */
 994        c->x86_model_id[0] = '\0';  /* Unset */
 995        c->x86_max_cores = 1;
 996        c->x86_coreid_bits = 0;
 997#ifdef CONFIG_X86_64
 998        c->x86_clflush_size = 64;
 999        c->x86_phys_bits = 36;
1000        c->x86_virt_bits = 48;
1001#else
1002        c->cpuid_level = -1;    /* CPUID not detected */
1003        c->x86_clflush_size = 32;
1004        c->x86_phys_bits = 32;
1005        c->x86_virt_bits = 32;
1006#endif
1007        c->x86_cache_alignment = c->x86_clflush_size;
1008        memset(&c->x86_capability, 0, sizeof c->x86_capability);
1009
1010        generic_identify(c);
1011
1012        if (this_cpu->c_identify)
1013                this_cpu->c_identify(c);
1014
1015        /* Clear/Set all flags overridden by options, after probe */
1016        for (i = 0; i < NCAPINTS; i++) {
1017                c->x86_capability[i] &= ~cpu_caps_cleared[i];
1018                c->x86_capability[i] |= cpu_caps_set[i];
1019        }
1020
1021#ifdef CONFIG_X86_64
1022        c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1023#endif
1024
1025        /*
1026         * Vendor-specific initialization.  In this section we
1027         * canonicalize the feature flags, meaning if there are
1028         * features a certain CPU supports which CPUID doesn't
1029         * tell us, CPUID claiming incorrect flags, or other bugs,
1030         * we handle them here.
1031         *
1032         * At the end of this section, c->x86_capability better
1033         * indicate the features this CPU genuinely supports!
1034         */
1035        if (this_cpu->c_init)
1036                this_cpu->c_init(c);
1037
1038        /* Disable the PN if appropriate */
1039        squash_the_stupid_serial_number(c);
1040
1041        /* Set up SMEP/SMAP */
1042        setup_smep(c);
1043        setup_smap(c);
1044
1045        /*
1046         * The vendor-specific functions might have changed features.
1047         * Now we do "generic changes."
1048         */
1049
1050        /* Filter out anything that depends on CPUID levels we don't have */
1051        filter_cpuid_features(c, true);
1052
1053        /* If the model name is still unset, do table lookup. */
1054        if (!c->x86_model_id[0]) {
1055                const char *p;
1056                p = table_lookup_model(c);
1057                if (p)
1058                        strcpy(c->x86_model_id, p);
1059                else
1060                        /* Last resort... */
1061                        sprintf(c->x86_model_id, "%02x/%02x",
1062                                c->x86, c->x86_model);
1063        }
1064
1065#ifdef CONFIG_X86_64
1066        detect_ht(c);
1067#endif
1068
1069        init_hypervisor(c);
1070        x86_init_rdrand(c);
1071        x86_init_cache_qos(c);
1072        setup_pku(c);
1073
1074        /*
1075         * Clear/Set all flags overridden by options, need do it
1076         * before following smp all cpus cap AND.
1077         */
1078        for (i = 0; i < NCAPINTS; i++) {
1079                c->x86_capability[i] &= ~cpu_caps_cleared[i];
1080                c->x86_capability[i] |= cpu_caps_set[i];
1081        }
1082
1083        /*
1084         * On SMP, boot_cpu_data holds the common feature set between
1085         * all CPUs; so make sure that we indicate which features are
1086         * common between the CPUs.  The first time this routine gets
1087         * executed, c == &boot_cpu_data.
1088         */
1089        if (c != &boot_cpu_data) {
1090                /* AND the already accumulated flags with these */
1091                for (i = 0; i < NCAPINTS; i++)
1092                        boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1093
1094                /* OR, i.e. replicate the bug flags */
1095                for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
1096                        c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
1097        }
1098
1099        /* Init Machine Check Exception if available. */
1100        mcheck_cpu_init(c);
1101
1102        select_idle_routine(c);
1103
1104#ifdef CONFIG_NUMA
1105        numa_add_cpu(smp_processor_id());
1106#endif
1107        /* The boot/hotplug time assigment got cleared, restore it */
1108        c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id);
1109}
1110
1111/*
1112 * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
1113 * on 32-bit kernels:
1114 */
1115#ifdef CONFIG_X86_32
1116void enable_sep_cpu(void)
1117{
1118        struct tss_struct *tss;
1119        int cpu;
1120
1121        if (!boot_cpu_has(X86_FEATURE_SEP))
1122                return;
1123
1124        cpu = get_cpu();
1125        tss = &per_cpu(cpu_tss, cpu);
1126
1127        /*
1128         * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
1129         * see the big comment in struct x86_hw_tss's definition.
1130         */
1131
1132        tss->x86_tss.ss1 = __KERNEL_CS;
1133        wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
1134
1135        wrmsr(MSR_IA32_SYSENTER_ESP,
1136              (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
1137              0);
1138
1139        wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
1140
1141        put_cpu();
1142}
1143#endif
1144
1145void __init identify_boot_cpu(void)
1146{
1147        identify_cpu(&boot_cpu_data);
1148        init_amd_e400_c1e_mask();
1149#ifdef CONFIG_X86_32
1150        sysenter_setup();
1151        enable_sep_cpu();
1152#endif
1153        cpu_detect_tlb(&boot_cpu_data);
1154}
1155
1156void identify_secondary_cpu(struct cpuinfo_x86 *c)
1157{
1158        BUG_ON(c == &boot_cpu_data);
1159        identify_cpu(c);
1160#ifdef CONFIG_X86_32
1161        enable_sep_cpu();
1162#endif
1163        mtrr_ap_init();
1164}
1165
1166struct msr_range {
1167        unsigned        min;
1168        unsigned        max;
1169};
1170
1171static const struct msr_range msr_range_array[] = {
1172        { 0x00000000, 0x00000418},
1173        { 0xc0000000, 0xc000040b},
1174        { 0xc0010000, 0xc0010142},
1175        { 0xc0011000, 0xc001103b},
1176};
1177
1178static void __print_cpu_msr(void)
1179{
1180        unsigned index_min, index_max;
1181        unsigned index;
1182        u64 val;
1183        int i;
1184
1185        for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
1186                index_min = msr_range_array[i].min;
1187                index_max = msr_range_array[i].max;
1188
1189                for (index = index_min; index < index_max; index++) {
1190                        if (rdmsrl_safe(index, &val))
1191                                continue;
1192                        pr_info(" MSR%08x: %016llx\n", index, val);
1193                }
1194        }
1195}
1196
1197static int show_msr;
1198
1199static __init int setup_show_msr(char *arg)
1200{
1201        int num;
1202
1203        get_option(&arg, &num);
1204
1205        if (num > 0)
1206                show_msr = num;
1207        return 1;
1208}
1209__setup("show_msr=", setup_show_msr);
1210
1211static __init int setup_noclflush(char *arg)
1212{
1213        setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
1214        setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
1215        return 1;
1216}
1217__setup("noclflush", setup_noclflush);
1218
1219void print_cpu_info(struct cpuinfo_x86 *c)
1220{
1221        const char *vendor = NULL;
1222
1223        if (c->x86_vendor < X86_VENDOR_NUM) {
1224                vendor = this_cpu->c_vendor;
1225        } else {
1226                if (c->cpuid_level >= 0)
1227                        vendor = c->x86_vendor_id;
1228        }
1229
1230        if (vendor && !strstr(c->x86_model_id, vendor))
1231                pr_cont("%s ", vendor);
1232
1233        if (c->x86_model_id[0])
1234                pr_cont("%s", c->x86_model_id);
1235        else
1236                pr_cont("%d86", c->x86);
1237
1238        pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
1239
1240        if (c->x86_mask || c->cpuid_level >= 0)
1241                pr_cont(", stepping: 0x%x)\n", c->x86_mask);
1242        else
1243                pr_cont(")\n");
1244
1245        print_cpu_msr(c);
1246}
1247
1248void print_cpu_msr(struct cpuinfo_x86 *c)
1249{
1250        if (c->cpu_index < show_msr)
1251                __print_cpu_msr();
1252}
1253
1254static __init int setup_disablecpuid(char *arg)
1255{
1256        int bit;
1257
1258        if (get_option(&arg, &bit) && bit < NCAPINTS*32)
1259                setup_clear_cpu_cap(bit);
1260        else
1261                return 0;
1262
1263        return 1;
1264}
1265__setup("clearcpuid=", setup_disablecpuid);
1266
1267#ifdef CONFIG_X86_64
1268struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
1269struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
1270                                    (unsigned long) debug_idt_table };
1271
1272DEFINE_PER_CPU_FIRST(union irq_stack_union,
1273                     irq_stack_union) __aligned(PAGE_SIZE) __visible;
1274
1275/*
1276 * The following percpu variables are hot.  Align current_task to
1277 * cacheline size such that they fall in the same cacheline.
1278 */
1279DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
1280        &init_task;
1281EXPORT_PER_CPU_SYMBOL(current_task);
1282
1283DEFINE_PER_CPU(char *, irq_stack_ptr) =
1284        init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
1285
1286DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
1287
1288DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1289EXPORT_PER_CPU_SYMBOL(__preempt_count);
1290
1291/*
1292 * Special IST stacks which the CPU switches to when it calls
1293 * an IST-marked descriptor entry. Up to 7 stacks (hardware
1294 * limit), all of them are 4K, except the debug stack which
1295 * is 8K.
1296 */
1297static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
1298          [0 ... N_EXCEPTION_STACKS - 1]        = EXCEPTION_STKSZ,
1299          [DEBUG_STACK - 1]                     = DEBUG_STKSZ
1300};
1301
1302static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
1303        [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
1304
1305/* May not be marked __init: used by software suspend */
1306void syscall_init(void)
1307{
1308        /*
1309         * LSTAR and STAR live in a bit strange symbiosis.
1310         * They both write to the same internal register. STAR allows to
1311         * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
1312         */
1313        wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
1314        wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
1315
1316#ifdef CONFIG_IA32_EMULATION
1317        wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
1318        /*
1319         * This only works on Intel CPUs.
1320         * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
1321         * This does not cause SYSENTER to jump to the wrong location, because
1322         * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
1323         */
1324        wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
1325        wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
1326        wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
1327#else
1328        wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
1329        wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
1330        wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
1331        wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
1332#endif
1333
1334        /* Flags to clear on syscall */
1335        wrmsrl(MSR_SYSCALL_MASK,
1336               X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
1337               X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
1338}
1339
1340/*
1341 * Copies of the original ist values from the tss are only accessed during
1342 * debugging, no special alignment required.
1343 */
1344DEFINE_PER_CPU(struct orig_ist, orig_ist);
1345
1346static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
1347DEFINE_PER_CPU(int, debug_stack_usage);
1348
1349int is_debug_stack(unsigned long addr)
1350{
1351        return __this_cpu_read(debug_stack_usage) ||
1352                (addr <= __this_cpu_read(debug_stack_addr) &&
1353                 addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
1354}
1355NOKPROBE_SYMBOL(is_debug_stack);
1356
1357DEFINE_PER_CPU(u32, debug_idt_ctr);
1358
1359void debug_stack_set_zero(void)
1360{
1361        this_cpu_inc(debug_idt_ctr);
1362        load_current_idt();
1363}
1364NOKPROBE_SYMBOL(debug_stack_set_zero);
1365
1366void debug_stack_reset(void)
1367{
1368        if (WARN_ON(!this_cpu_read(debug_idt_ctr)))
1369                return;
1370        if (this_cpu_dec_return(debug_idt_ctr) == 0)
1371                load_current_idt();
1372}
1373NOKPROBE_SYMBOL(debug_stack_reset);
1374
1375#else   /* CONFIG_X86_64 */
1376
1377DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1378EXPORT_PER_CPU_SYMBOL(current_task);
1379DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1380EXPORT_PER_CPU_SYMBOL(__preempt_count);
1381
1382/*
1383 * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
1384 * the top of the kernel stack.  Use an extra percpu variable to track the
1385 * top of the kernel stack directly.
1386 */
1387DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
1388        (unsigned long)&init_thread_union + THREAD_SIZE;
1389EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
1390
1391#ifdef CONFIG_CC_STACKPROTECTOR
1392DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
1393#endif
1394
1395#endif  /* CONFIG_X86_64 */
1396
1397/*
1398 * Clear all 6 debug registers:
1399 */
1400static void clear_all_debug_regs(void)
1401{
1402        int i;
1403
1404        for (i = 0; i < 8; i++) {
1405                /* Ignore db4, db5 */
1406                if ((i == 4) || (i == 5))
1407                        continue;
1408
1409                set_debugreg(0, i);
1410        }
1411}
1412
1413#ifdef CONFIG_KGDB
1414/*
1415 * Restore debug regs if using kgdbwait and you have a kernel debugger
1416 * connection established.
1417 */
1418static void dbg_restore_debug_regs(void)
1419{
1420        if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
1421                arch_kgdb_ops.correct_hw_break();
1422}
1423#else /* ! CONFIG_KGDB */
1424#define dbg_restore_debug_regs()
1425#endif /* ! CONFIG_KGDB */
1426
1427static void wait_for_master_cpu(int cpu)
1428{
1429#ifdef CONFIG_SMP
1430        /*
1431         * wait for ACK from master CPU before continuing
1432         * with AP initialization
1433         */
1434        WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
1435        while (!cpumask_test_cpu(cpu, cpu_callout_mask))
1436                cpu_relax();
1437#endif
1438}
1439
1440/*
1441 * cpu_init() initializes state that is per-CPU. Some data is already
1442 * initialized (naturally) in the bootstrap process, such as the GDT
1443 * and IDT. We reload them nevertheless, this function acts as a
1444 * 'CPU state barrier', nothing should get across.
1445 * A lot of state is already set up in PDA init for 64 bit
1446 */
1447#ifdef CONFIG_X86_64
1448
1449void cpu_init(void)
1450{
1451        struct orig_ist *oist;
1452        struct task_struct *me;
1453        struct tss_struct *t;
1454        unsigned long v;
1455        int cpu = stack_smp_processor_id();
1456        int i;
1457
1458        wait_for_master_cpu(cpu);
1459
1460        /*
1461         * Initialize the CR4 shadow before doing anything that could
1462         * try to read it.
1463         */
1464        cr4_init_shadow();
1465
1466        /*
1467         * Load microcode on this cpu if a valid microcode is available.
1468         * This is early microcode loading procedure.
1469         */
1470        load_ucode_ap();
1471
1472        t = &per_cpu(cpu_tss, cpu);
1473        oist = &per_cpu(orig_ist, cpu);
1474
1475#ifdef CONFIG_NUMA
1476        if (this_cpu_read(numa_node) == 0 &&
1477            early_cpu_to_node(cpu) != NUMA_NO_NODE)
1478                set_numa_node(early_cpu_to_node(cpu));
1479#endif
1480
1481        me = current;
1482
1483        pr_debug("Initializing CPU#%d\n", cpu);
1484
1485        cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1486
1487        /*
1488         * Initialize the per-CPU GDT with the boot GDT,
1489         * and set up the GDT descriptor:
1490         */
1491
1492        switch_to_new_gdt(cpu);
1493        loadsegment(fs, 0);
1494
1495        load_current_idt();
1496
1497        memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
1498        syscall_init();
1499
1500        wrmsrl(MSR_FS_BASE, 0);
1501        wrmsrl(MSR_KERNEL_GS_BASE, 0);
1502        barrier();
1503
1504        x86_configure_nx();
1505        x2apic_setup();
1506
1507        /*
1508         * set up and load the per-CPU TSS
1509         */
1510        if (!oist->ist[0]) {
1511                char *estacks = per_cpu(exception_stacks, cpu);
1512
1513                for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1514                        estacks += exception_stack_sizes[v];
1515                        oist->ist[v] = t->x86_tss.ist[v] =
1516                                        (unsigned long)estacks;
1517                        if (v == DEBUG_STACK-1)
1518                                per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
1519                }
1520        }
1521
1522        t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1523
1524        /*
1525         * <= is required because the CPU will access up to
1526         * 8 bits beyond the end of the IO permission bitmap.
1527         */
1528        for (i = 0; i <= IO_BITMAP_LONGS; i++)
1529                t->io_bitmap[i] = ~0UL;
1530
1531        atomic_inc(&init_mm.mm_count);
1532        me->active_mm = &init_mm;
1533        BUG_ON(me->mm);
1534        enter_lazy_tlb(&init_mm, me);
1535
1536        load_sp0(t, &current->thread);
1537        set_tss_desc(cpu, t);
1538        load_TR_desc();
1539        load_mm_ldt(&init_mm);
1540
1541        clear_all_debug_regs();
1542        dbg_restore_debug_regs();
1543
1544        fpu__init_cpu();
1545
1546        if (is_uv_system())
1547                uv_cpu_init();
1548}
1549
1550#else
1551
1552void cpu_init(void)
1553{
1554        int cpu = smp_processor_id();
1555        struct task_struct *curr = current;
1556        struct tss_struct *t = &per_cpu(cpu_tss, cpu);
1557        struct thread_struct *thread = &curr->thread;
1558
1559        wait_for_master_cpu(cpu);
1560
1561        /*
1562         * Initialize the CR4 shadow before doing anything that could
1563         * try to read it.
1564         */
1565        cr4_init_shadow();
1566
1567        show_ucode_info_early();
1568
1569        pr_info("Initializing CPU#%d\n", cpu);
1570
1571        if (cpu_feature_enabled(X86_FEATURE_VME) ||
1572            boot_cpu_has(X86_FEATURE_TSC) ||
1573            boot_cpu_has(X86_FEATURE_DE))
1574                cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1575
1576        load_current_idt();
1577        switch_to_new_gdt(cpu);
1578
1579        /*
1580         * Set up and load the per-CPU TSS and LDT
1581         */
1582        atomic_inc(&init_mm.mm_count);
1583        curr->active_mm = &init_mm;
1584        BUG_ON(curr->mm);
1585        enter_lazy_tlb(&init_mm, curr);
1586
1587        load_sp0(t, thread);
1588        set_tss_desc(cpu, t);
1589        load_TR_desc();
1590        load_mm_ldt(&init_mm);
1591
1592        t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1593
1594#ifdef CONFIG_DOUBLEFAULT
1595        /* Set up doublefault TSS pointer in the GDT */
1596        __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1597#endif
1598
1599        clear_all_debug_regs();
1600        dbg_restore_debug_regs();
1601
1602        fpu__init_cpu();
1603}
1604#endif
1605
1606static void bsp_resume(void)
1607{
1608        if (this_cpu->c_bsp_resume)
1609                this_cpu->c_bsp_resume(&boot_cpu_data);
1610}
1611
1612static struct syscore_ops cpu_syscore_ops = {
1613        .resume         = bsp_resume,
1614};
1615
1616static int __init init_cpu_syscore(void)
1617{
1618        register_syscore_ops(&cpu_syscore_ops);
1619        return 0;
1620}
1621core_initcall(init_cpu_syscore);
1622