linux/arch/x86/kernel/cpu/intel.c
<<
>>
Prefs
   1#include <linux/init.h>
   2#include <linux/kernel.h>
   3
   4#include <linux/string.h>
   5#include <linux/bitops.h>
   6#include <linux/smp.h>
   7#include <linux/sched.h>
   8#include <linux/thread_info.h>
   9#include <linux/module.h>
  10#include <linux/uaccess.h>
  11
  12#include <asm/processor.h>
  13#include <asm/pgtable.h>
  14#include <asm/msr.h>
  15#include <asm/bugs.h>
  16#include <asm/cpu.h>
  17
  18#ifdef CONFIG_X86_64
  19#include <linux/topology.h>
  20#endif
  21
  22#include "cpu.h"
  23
  24#ifdef CONFIG_X86_LOCAL_APIC
  25#include <asm/mpspec.h>
  26#include <asm/apic.h>
  27#endif
  28
  29static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
  30{
  31        u64 misc_enable;
  32
  33        /* Unmask CPUID levels if masked: */
  34        if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
  35                rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
  36
  37                if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
  38                        misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
  39                        wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
  40                        c->cpuid_level = cpuid_eax(0);
  41                        get_cpu_cap(c);
  42                }
  43        }
  44
  45        if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
  46                (c->x86 == 0x6 && c->x86_model >= 0x0e))
  47                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
  48
  49        if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
  50                unsigned lower_word;
  51
  52                wrmsr(MSR_IA32_UCODE_REV, 0, 0);
  53                /* Required by the SDM */
  54                sync_core();
  55                rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
  56        }
  57
  58        /*
  59         * Atom erratum AAE44/AAF40/AAG38/AAH41:
  60         *
  61         * A race condition between speculative fetches and invalidating
  62         * a large page.  This is worked around in microcode, but we
  63         * need the microcode to have already been loaded... so if it is
  64         * not, recommend a BIOS update and disable large pages.
  65         */
  66        if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
  67            c->microcode < 0x20e) {
  68                printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
  69                clear_cpu_cap(c, X86_FEATURE_PSE);
  70        }
  71
  72#ifdef CONFIG_X86_64
  73        set_cpu_cap(c, X86_FEATURE_SYSENTER32);
  74#else
  75        /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
  76        if (c->x86 == 15 && c->x86_cache_alignment == 64)
  77                c->x86_cache_alignment = 128;
  78#endif
  79
  80        /* CPUID workaround for 0F33/0F34 CPU */
  81        if (c->x86 == 0xF && c->x86_model == 0x3
  82            && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
  83                c->x86_phys_bits = 36;
  84
  85        /*
  86         * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
  87         * with P/T states and does not stop in deep C-states.
  88         *
  89         * It is also reliable across cores and sockets. (but not across
  90         * cabinets - we turn it off in that case explicitly.)
  91         */
  92        if (c->x86_power & (1 << 8)) {
  93                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
  94                set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
  95                if (!check_tsc_unstable())
  96                        sched_clock_stable = 1;
  97        }
  98
  99        /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
 100        if (c->x86 == 6) {
 101                switch (c->x86_model) {
 102                case 0x27:      /* Penwell */
 103                case 0x35:      /* Cloverview */
 104                        set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
 105                        break;
 106                default:
 107                        break;
 108                }
 109        }
 110
 111        /*
 112         * There is a known erratum on Pentium III and Core Solo
 113         * and Core Duo CPUs.
 114         * " Page with PAT set to WC while associated MTRR is UC
 115         *   may consolidate to UC "
 116         * Because of this erratum, it is better to stick with
 117         * setting WC in MTRR rather than using PAT on these CPUs.
 118         *
 119         * Enable PAT WC only on P4, Core 2 or later CPUs.
 120         */
 121        if (c->x86 == 6 && c->x86_model < 15)
 122                clear_cpu_cap(c, X86_FEATURE_PAT);
 123
 124#ifdef CONFIG_KMEMCHECK
 125        /*
 126         * P4s have a "fast strings" feature which causes single-
 127         * stepping REP instructions to only generate a #DB on
 128         * cache-line boundaries.
 129         *
 130         * Ingo Molnar reported a Pentium D (model 6) and a Xeon
 131         * (model 2) with the same problem.
 132         */
 133        if (c->x86 == 15) {
 134                rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
 135
 136                if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
 137                        printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
 138
 139                        misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
 140                        wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
 141                }
 142        }
 143#endif
 144
 145        /*
 146         * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
 147         * clear the fast string and enhanced fast string CPU capabilities.
 148         */
 149        if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
 150                rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
 151                if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
 152                        printk(KERN_INFO "Disabled fast string operations\n");
 153                        setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
 154                        setup_clear_cpu_cap(X86_FEATURE_ERMS);
 155                }
 156        }
 157}
 158
 159#ifdef CONFIG_X86_32
 160/*
 161 *      Early probe support logic for ppro memory erratum #50
 162 *
 163 *      This is called before we do cpu ident work
 164 */
 165
 166int __cpuinit ppro_with_ram_bug(void)
 167{
 168        /* Uses data from early_cpu_detect now */
 169        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
 170            boot_cpu_data.x86 == 6 &&
 171            boot_cpu_data.x86_model == 1 &&
 172            boot_cpu_data.x86_mask < 8) {
 173                printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
 174                return 1;
 175        }
 176        return 0;
 177}
 178
 179static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
 180{
 181        /* calling is from identify_secondary_cpu() ? */
 182        if (!c->cpu_index)
 183                return;
 184
 185        /*
 186         * Mask B, Pentium, but not Pentium MMX
 187         */
 188        if (c->x86 == 5 &&
 189            c->x86_mask >= 1 && c->x86_mask <= 4 &&
 190            c->x86_model <= 3) {
 191                /*
 192                 * Remember we have B step Pentia with bugs
 193                 */
 194                WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
 195                                    "with B stepping processors.\n");
 196        }
 197}
 198
 199static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
 200{
 201        unsigned long lo, hi;
 202
 203#ifdef CONFIG_X86_F00F_BUG
 204        /*
 205         * All current models of Pentium and Pentium with MMX technology CPUs
 206         * have the F0 0F bug, which lets nonprivileged users lock up the
 207         * system. Announce that the fault handler will be checking for it.
 208         */
 209        clear_cpu_bug(c, X86_BUG_F00F);
 210        if (!paravirt_enabled() && c->x86 == 5) {
 211                static int f00f_workaround_enabled;
 212
 213                set_cpu_bug(c, X86_BUG_F00F);
 214                if (!f00f_workaround_enabled) {
 215                        printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
 216                        f00f_workaround_enabled = 1;
 217                }
 218        }
 219#endif
 220
 221        /*
 222         * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
 223         * model 3 mask 3
 224         */
 225        if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
 226                clear_cpu_cap(c, X86_FEATURE_SEP);
 227
 228        /*
 229         * P4 Xeon errata 037 workaround.
 230         * Hardware prefetcher may cause stale data to be loaded into the cache.
 231         */
 232        if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
 233                rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
 234                if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
 235                        printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
 236                        printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
 237                        lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
 238                        wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
 239                }
 240        }
 241
 242        /*
 243         * See if we have a good local APIC by checking for buggy Pentia,
 244         * i.e. all B steppings and the C2 stepping of P54C when using their
 245         * integrated APIC (see 11AP erratum in "Pentium Processor
 246         * Specification Update").
 247         */
 248        if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
 249            (c->x86_mask < 0x6 || c->x86_mask == 0xb))
 250                set_cpu_cap(c, X86_FEATURE_11AP);
 251
 252
 253#ifdef CONFIG_X86_INTEL_USERCOPY
 254        /*
 255         * Set up the preferred alignment for movsl bulk memory moves
 256         */
 257        switch (c->x86) {
 258        case 4:         /* 486: untested */
 259                break;
 260        case 5:         /* Old Pentia: untested */
 261                break;
 262        case 6:         /* PII/PIII only like movsl with 8-byte alignment */
 263                movsl_mask.mask = 7;
 264                break;
 265        case 15:        /* P4 is OK down to 8-byte alignment */
 266                movsl_mask.mask = 7;
 267                break;
 268        }
 269#endif
 270
 271#ifdef CONFIG_X86_NUMAQ
 272        numaq_tsc_disable();
 273#endif
 274
 275        intel_smp_check(c);
 276}
 277#else
 278static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
 279{
 280}
 281#endif
 282
 283static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
 284{
 285#ifdef CONFIG_NUMA
 286        unsigned node;
 287        int cpu = smp_processor_id();
 288
 289        /* Don't do the funky fallback heuristics the AMD version employs
 290           for now. */
 291        node = numa_cpu_node(cpu);
 292        if (node == NUMA_NO_NODE || !node_online(node)) {
 293                /* reuse the value from init_cpu_to_node() */
 294                node = cpu_to_node(cpu);
 295        }
 296        numa_set_node(cpu, node);
 297#endif
 298}
 299
 300/*
 301 * find out the number of processor cores on the die
 302 */
 303static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
 304{
 305        unsigned int eax, ebx, ecx, edx;
 306
 307        if (c->cpuid_level < 4)
 308                return 1;
 309
 310        /* Intel has a non-standard dependency on %ecx for this CPUID level. */
 311        cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
 312        if (eax & 0x1f)
 313                return (eax >> 26) + 1;
 314        else
 315                return 1;
 316}
 317
 318static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
 319{
 320        /* Intel VMX MSR indicated features */
 321#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW    0x00200000
 322#define X86_VMX_FEATURE_PROC_CTLS_VNMI          0x00400000
 323#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS      0x80000000
 324#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC    0x00000001
 325#define X86_VMX_FEATURE_PROC_CTLS2_EPT          0x00000002
 326#define X86_VMX_FEATURE_PROC_CTLS2_VPID         0x00000020
 327
 328        u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
 329
 330        clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
 331        clear_cpu_cap(c, X86_FEATURE_VNMI);
 332        clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
 333        clear_cpu_cap(c, X86_FEATURE_EPT);
 334        clear_cpu_cap(c, X86_FEATURE_VPID);
 335
 336        rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
 337        msr_ctl = vmx_msr_high | vmx_msr_low;
 338        if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
 339                set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
 340        if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
 341                set_cpu_cap(c, X86_FEATURE_VNMI);
 342        if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
 343                rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
 344                      vmx_msr_low, vmx_msr_high);
 345                msr_ctl2 = vmx_msr_high | vmx_msr_low;
 346                if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
 347                    (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
 348                        set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
 349                if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
 350                        set_cpu_cap(c, X86_FEATURE_EPT);
 351                if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
 352                        set_cpu_cap(c, X86_FEATURE_VPID);
 353        }
 354}
 355
 356static void __cpuinit init_intel(struct cpuinfo_x86 *c)
 357{
 358        unsigned int l2 = 0;
 359
 360        early_init_intel(c);
 361
 362        intel_workarounds(c);
 363
 364        /*
 365         * Detect the extended topology information if available. This
 366         * will reinitialise the initial_apicid which will be used
 367         * in init_intel_cacheinfo()
 368         */
 369        detect_extended_topology(c);
 370
 371        l2 = init_intel_cacheinfo(c);
 372        if (c->cpuid_level > 9) {
 373                unsigned eax = cpuid_eax(10);
 374                /* Check for version and the number of counters */
 375                if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
 376                        set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
 377        }
 378
 379        if (cpu_has_xmm2)
 380                set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
 381        if (cpu_has_ds) {
 382                unsigned int l1;
 383                rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
 384                if (!(l1 & (1<<11)))
 385                        set_cpu_cap(c, X86_FEATURE_BTS);
 386                if (!(l1 & (1<<12)))
 387                        set_cpu_cap(c, X86_FEATURE_PEBS);
 388        }
 389
 390        if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
 391                set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
 392
 393#ifdef CONFIG_X86_64
 394        if (c->x86 == 15)
 395                c->x86_cache_alignment = c->x86_clflush_size * 2;
 396        if (c->x86 == 6)
 397                set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 398#else
 399        /*
 400         * Names for the Pentium II/Celeron processors
 401         * detectable only by also checking the cache size.
 402         * Dixon is NOT a Celeron.
 403         */
 404        if (c->x86 == 6) {
 405                char *p = NULL;
 406
 407                switch (c->x86_model) {
 408                case 5:
 409                        if (l2 == 0)
 410                                p = "Celeron (Covington)";
 411                        else if (l2 == 256)
 412                                p = "Mobile Pentium II (Dixon)";
 413                        break;
 414
 415                case 6:
 416                        if (l2 == 128)
 417                                p = "Celeron (Mendocino)";
 418                        else if (c->x86_mask == 0 || c->x86_mask == 5)
 419                                p = "Celeron-A";
 420                        break;
 421
 422                case 8:
 423                        if (l2 == 128)
 424                                p = "Celeron (Coppermine)";
 425                        break;
 426                }
 427
 428                if (p)
 429                        strcpy(c->x86_model_id, p);
 430        }
 431
 432        if (c->x86 == 15)
 433                set_cpu_cap(c, X86_FEATURE_P4);
 434        if (c->x86 == 6)
 435                set_cpu_cap(c, X86_FEATURE_P3);
 436#endif
 437
 438        if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
 439                /*
 440                 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
 441                 * detection.
 442                 */
 443                c->x86_max_cores = intel_num_cpu_cores(c);
 444#ifdef CONFIG_X86_32
 445                detect_ht(c);
 446#endif
 447        }
 448
 449        /* Work around errata */
 450        srat_detect_node(c);
 451
 452        if (cpu_has(c, X86_FEATURE_VMX))
 453                detect_vmx_virtcap(c);
 454
 455        /*
 456         * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not.
 457         * x86_energy_perf_policy(8) is available to change it at run-time
 458         */
 459        if (cpu_has(c, X86_FEATURE_EPB)) {
 460                u64 epb;
 461
 462                rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
 463                if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) {
 464                        printk_once(KERN_WARNING "ENERGY_PERF_BIAS:"
 465                                " Set to 'normal', was 'performance'\n"
 466                                "ENERGY_PERF_BIAS: View and update with"
 467                                " x86_energy_perf_policy(8)\n");
 468                        epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
 469                        wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
 470                }
 471        }
 472}
 473
 474#ifdef CONFIG_X86_32
 475static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 476{
 477        /*
 478         * Intel PIII Tualatin. This comes in two flavours.
 479         * One has 256kb of cache, the other 512. We have no way
 480         * to determine which, so we use a boottime override
 481         * for the 512kb model, and assume 256 otherwise.
 482         */
 483        if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
 484                size = 256;
 485        return size;
 486}
 487#endif
 488
 489#define TLB_INST_4K     0x01
 490#define TLB_INST_4M     0x02
 491#define TLB_INST_2M_4M  0x03
 492
 493#define TLB_INST_ALL    0x05
 494#define TLB_INST_1G     0x06
 495
 496#define TLB_DATA_4K     0x11
 497#define TLB_DATA_4M     0x12
 498#define TLB_DATA_2M_4M  0x13
 499#define TLB_DATA_4K_4M  0x14
 500
 501#define TLB_DATA_1G     0x16
 502
 503#define TLB_DATA0_4K    0x21
 504#define TLB_DATA0_4M    0x22
 505#define TLB_DATA0_2M_4M 0x23
 506
 507#define STLB_4K         0x41
 508
 509static const struct _tlb_table intel_tlb_table[] __cpuinitconst = {
 510        { 0x01, TLB_INST_4K,            32,     " TLB_INST 4 KByte pages, 4-way set associative" },
 511        { 0x02, TLB_INST_4M,            2,      " TLB_INST 4 MByte pages, full associative" },
 512        { 0x03, TLB_DATA_4K,            64,     " TLB_DATA 4 KByte pages, 4-way set associative" },
 513        { 0x04, TLB_DATA_4M,            8,      " TLB_DATA 4 MByte pages, 4-way set associative" },
 514        { 0x05, TLB_DATA_4M,            32,     " TLB_DATA 4 MByte pages, 4-way set associative" },
 515        { 0x0b, TLB_INST_4M,            4,      " TLB_INST 4 MByte pages, 4-way set associative" },
 516        { 0x4f, TLB_INST_4K,            32,     " TLB_INST 4 KByte pages */" },
 517        { 0x50, TLB_INST_ALL,           64,     " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
 518        { 0x51, TLB_INST_ALL,           128,    " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
 519        { 0x52, TLB_INST_ALL,           256,    " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
 520        { 0x55, TLB_INST_2M_4M,         7,      " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
 521        { 0x56, TLB_DATA0_4M,           16,     " TLB_DATA0 4 MByte pages, 4-way set associative" },
 522        { 0x57, TLB_DATA0_4K,           16,     " TLB_DATA0 4 KByte pages, 4-way associative" },
 523        { 0x59, TLB_DATA0_4K,           16,     " TLB_DATA0 4 KByte pages, fully associative" },
 524        { 0x5a, TLB_DATA0_2M_4M,        32,     " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
 525        { 0x5b, TLB_DATA_4K_4M,         64,     " TLB_DATA 4 KByte and 4 MByte pages" },
 526        { 0x5c, TLB_DATA_4K_4M,         128,    " TLB_DATA 4 KByte and 4 MByte pages" },
 527        { 0x5d, TLB_DATA_4K_4M,         256,    " TLB_DATA 4 KByte and 4 MByte pages" },
 528        { 0xb0, TLB_INST_4K,            128,    " TLB_INST 4 KByte pages, 4-way set associative" },
 529        { 0xb1, TLB_INST_2M_4M,         4,      " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
 530        { 0xb2, TLB_INST_4K,            64,     " TLB_INST 4KByte pages, 4-way set associative" },
 531        { 0xb3, TLB_DATA_4K,            128,    " TLB_DATA 4 KByte pages, 4-way set associative" },
 532        { 0xb4, TLB_DATA_4K,            256,    " TLB_DATA 4 KByte pages, 4-way associative" },
 533        { 0xba, TLB_DATA_4K,            64,     " TLB_DATA 4 KByte pages, 4-way associative" },
 534        { 0xc0, TLB_DATA_4K_4M,         8,      " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
 535        { 0xca, STLB_4K,                512,    " STLB 4 KByte pages, 4-way associative" },
 536        { 0x00, 0, 0 }
 537};
 538
 539static void __cpuinit intel_tlb_lookup(const unsigned char desc)
 540{
 541        unsigned char k;
 542        if (desc == 0)
 543                return;
 544
 545        /* look up this descriptor in the table */
 546        for (k = 0; intel_tlb_table[k].descriptor != desc && \
 547                        intel_tlb_table[k].descriptor != 0; k++)
 548                ;
 549
 550        if (intel_tlb_table[k].tlb_type == 0)
 551                return;
 552
 553        switch (intel_tlb_table[k].tlb_type) {
 554        case STLB_4K:
 555                if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 556                        tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 557                if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 558                        tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 559                break;
 560        case TLB_INST_ALL:
 561                if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 562                        tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 563                if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
 564                        tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
 565                if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 566                        tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 567                break;
 568        case TLB_INST_4K:
 569                if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 570                        tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 571                break;
 572        case TLB_INST_4M:
 573                if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 574                        tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 575                break;
 576        case TLB_INST_2M_4M:
 577                if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
 578                        tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
 579                if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 580                        tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 581                break;
 582        case TLB_DATA_4K:
 583        case TLB_DATA0_4K:
 584                if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 585                        tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 586                break;
 587        case TLB_DATA_4M:
 588        case TLB_DATA0_4M:
 589                if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 590                        tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 591                break;
 592        case TLB_DATA_2M_4M:
 593        case TLB_DATA0_2M_4M:
 594                if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
 595                        tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
 596                if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 597                        tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 598                break;
 599        case TLB_DATA_4K_4M:
 600                if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 601                        tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 602                if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 603                        tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 604                break;
 605        }
 606}
 607
 608static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
 609{
 610        switch ((c->x86 << 8) + c->x86_model) {
 611        case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
 612        case 0x616: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
 613        case 0x617: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
 614        case 0x61d: /* six-core 45 nm xeon "Dunnington" */
 615                tlb_flushall_shift = -1;
 616                break;
 617        case 0x61a: /* 45 nm nehalem, "Bloomfield" */
 618        case 0x61e: /* 45 nm nehalem, "Lynnfield" */
 619        case 0x625: /* 32 nm nehalem, "Clarkdale" */
 620        case 0x62c: /* 32 nm nehalem, "Gulftown" */
 621        case 0x62e: /* 45 nm nehalem-ex, "Beckton" */
 622        case 0x62f: /* 32 nm Xeon E7 */
 623                tlb_flushall_shift = 6;
 624                break;
 625        case 0x62a: /* SandyBridge */
 626        case 0x62d: /* SandyBridge, "Romely-EP" */
 627                tlb_flushall_shift = 5;
 628                break;
 629        case 0x63a: /* Ivybridge */
 630                tlb_flushall_shift = 1;
 631                break;
 632        default:
 633                tlb_flushall_shift = 6;
 634        }
 635}
 636
 637static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c)
 638{
 639        int i, j, n;
 640        unsigned int regs[4];
 641        unsigned char *desc = (unsigned char *)regs;
 642
 643        if (c->cpuid_level < 2)
 644                return;
 645
 646        /* Number of times to iterate */
 647        n = cpuid_eax(2) & 0xFF;
 648
 649        for (i = 0 ; i < n ; i++) {
 650                cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
 651
 652                /* If bit 31 is set, this is an unknown format */
 653                for (j = 0 ; j < 3 ; j++)
 654                        if (regs[j] & (1 << 31))
 655                                regs[j] = 0;
 656
 657                /* Byte 0 is level count, not a descriptor */
 658                for (j = 1 ; j < 16 ; j++)
 659                        intel_tlb_lookup(desc[j]);
 660        }
 661        intel_tlb_flushall_shift_set(c);
 662}
 663
 664static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
 665        .c_vendor       = "Intel",
 666        .c_ident        = { "GenuineIntel" },
 667#ifdef CONFIG_X86_32
 668        .c_models = {
 669                { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
 670                  {
 671                          [0] = "486 DX-25/33",
 672                          [1] = "486 DX-50",
 673                          [2] = "486 SX",
 674                          [3] = "486 DX/2",
 675                          [4] = "486 SL",
 676                          [5] = "486 SX/2",
 677                          [7] = "486 DX/2-WB",
 678                          [8] = "486 DX/4",
 679                          [9] = "486 DX/4-WB"
 680                  }
 681                },
 682                { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
 683                  {
 684                          [0] = "Pentium 60/66 A-step",
 685                          [1] = "Pentium 60/66",
 686                          [2] = "Pentium 75 - 200",
 687                          [3] = "OverDrive PODP5V83",
 688                          [4] = "Pentium MMX",
 689                          [7] = "Mobile Pentium 75 - 200",
 690                          [8] = "Mobile Pentium MMX"
 691                  }
 692                },
 693                { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
 694                  {
 695                          [0] = "Pentium Pro A-step",
 696                          [1] = "Pentium Pro",
 697                          [3] = "Pentium II (Klamath)",
 698                          [4] = "Pentium II (Deschutes)",
 699                          [5] = "Pentium II (Deschutes)",
 700                          [6] = "Mobile Pentium II",
 701                          [7] = "Pentium III (Katmai)",
 702                          [8] = "Pentium III (Coppermine)",
 703                          [10] = "Pentium III (Cascades)",
 704                          [11] = "Pentium III (Tualatin)",
 705                  }
 706                },
 707                { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
 708                  {
 709                          [0] = "Pentium 4 (Unknown)",
 710                          [1] = "Pentium 4 (Willamette)",
 711                          [2] = "Pentium 4 (Northwood)",
 712                          [4] = "Pentium 4 (Foster)",
 713                          [5] = "Pentium 4 (Foster)",
 714                  }
 715                },
 716        },
 717        .c_size_cache   = intel_size_cache,
 718#endif
 719        .c_detect_tlb   = intel_detect_tlb,
 720        .c_early_init   = early_init_intel,
 721        .c_init         = init_intel,
 722        .c_x86_vendor   = X86_VENDOR_INTEL,
 723};
 724
 725cpu_dev_register(intel_cpu_dev);
 726
 727