linux/arch/x86/kernel/cpu/intel.c
<<
>>
Prefs
   1#include <linux/kernel.h>
   2
   3#include <linux/string.h>
   4#include <linux/bitops.h>
   5#include <linux/smp.h>
   6#include <linux/sched.h>
   7#include <linux/thread_info.h>
   8#include <linux/module.h>
   9#include <linux/uaccess.h>
  10
  11#include <asm/processor.h>
  12#include <asm/pgtable.h>
  13#include <asm/msr.h>
  14#include <asm/bugs.h>
  15#include <asm/cpu.h>
  16
  17#ifdef CONFIG_X86_64
  18#include <linux/topology.h>
  19#endif
  20
  21#include "cpu.h"
  22
  23#ifdef CONFIG_X86_LOCAL_APIC
  24#include <asm/mpspec.h>
  25#include <asm/apic.h>
  26#endif
  27
  28static void early_init_intel(struct cpuinfo_x86 *c)
  29{
  30        u64 misc_enable;
  31
  32        /* Unmask CPUID levels if masked: */
  33        if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
  34                rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
  35
  36                if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
  37                        misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
  38                        wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
  39                        c->cpuid_level = cpuid_eax(0);
  40                        get_cpu_cap(c);
  41                }
  42        }
  43
  44        if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
  45                (c->x86 == 0x6 && c->x86_model >= 0x0e))
  46                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
  47
  48        if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
  49                unsigned lower_word;
  50
  51                wrmsr(MSR_IA32_UCODE_REV, 0, 0);
  52                /* Required by the SDM */
  53                sync_core();
  54                rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
  55        }
  56
  57        /*
  58         * Atom erratum AAE44/AAF40/AAG38/AAH41:
  59         *
  60         * A race condition between speculative fetches and invalidating
  61         * a large page.  This is worked around in microcode, but we
  62         * need the microcode to have already been loaded... so if it is
  63         * not, recommend a BIOS update and disable large pages.
  64         */
  65        if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
  66            c->microcode < 0x20e) {
  67                printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
  68                clear_cpu_cap(c, X86_FEATURE_PSE);
  69        }
  70
  71#ifdef CONFIG_X86_64
  72        set_cpu_cap(c, X86_FEATURE_SYSENTER32);
  73#else
  74        /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
  75        if (c->x86 == 15 && c->x86_cache_alignment == 64)
  76                c->x86_cache_alignment = 128;
  77#endif
  78
  79        /* CPUID workaround for 0F33/0F34 CPU */
  80        if (c->x86 == 0xF && c->x86_model == 0x3
  81            && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
  82                c->x86_phys_bits = 36;
  83
  84        /*
  85         * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
  86         * with P/T states and does not stop in deep C-states.
  87         *
  88         * It is also reliable across cores and sockets. (but not across
  89         * cabinets - we turn it off in that case explicitly.)
  90         */
  91        if (c->x86_power & (1 << 8)) {
  92                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
  93                set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
  94                if (!check_tsc_unstable())
  95                        set_sched_clock_stable();
  96        }
  97
  98        /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
  99        if (c->x86 == 6) {
 100                switch (c->x86_model) {
 101                case 0x27:      /* Penwell */
 102                case 0x35:      /* Cloverview */
 103                        set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
 104                        break;
 105                default:
 106                        break;
 107                }
 108        }
 109
 110        /*
 111         * There is a known erratum on Pentium III and Core Solo
 112         * and Core Duo CPUs.
 113         * " Page with PAT set to WC while associated MTRR is UC
 114         *   may consolidate to UC "
 115         * Because of this erratum, it is better to stick with
 116         * setting WC in MTRR rather than using PAT on these CPUs.
 117         *
 118         * Enable PAT WC only on P4, Core 2 or later CPUs.
 119         */
 120        if (c->x86 == 6 && c->x86_model < 15)
 121                clear_cpu_cap(c, X86_FEATURE_PAT);
 122
 123#ifdef CONFIG_KMEMCHECK
 124        /*
 125         * P4s have a "fast strings" feature which causes single-
 126         * stepping REP instructions to only generate a #DB on
 127         * cache-line boundaries.
 128         *
 129         * Ingo Molnar reported a Pentium D (model 6) and a Xeon
 130         * (model 2) with the same problem.
 131         */
 132        if (c->x86 == 15) {
 133                rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
 134
 135                if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
 136                        printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
 137
 138                        misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
 139                        wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
 140                }
 141        }
 142#endif
 143
 144        /*
 145         * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
 146         * clear the fast string and enhanced fast string CPU capabilities.
 147         */
 148        if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
 149                rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
 150                if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
 151                        printk(KERN_INFO "Disabled fast string operations\n");
 152                        setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
 153                        setup_clear_cpu_cap(X86_FEATURE_ERMS);
 154                }
 155        }
 156}
 157
 158#ifdef CONFIG_X86_32
 159/*
 160 *      Early probe support logic for ppro memory erratum #50
 161 *
 162 *      This is called before we do cpu ident work
 163 */
 164
 165int ppro_with_ram_bug(void)
 166{
 167        /* Uses data from early_cpu_detect now */
 168        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
 169            boot_cpu_data.x86 == 6 &&
 170            boot_cpu_data.x86_model == 1 &&
 171            boot_cpu_data.x86_mask < 8) {
 172                printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
 173                return 1;
 174        }
 175        return 0;
 176}
 177
 178static void intel_smp_check(struct cpuinfo_x86 *c)
 179{
 180        /* calling is from identify_secondary_cpu() ? */
 181        if (!c->cpu_index)
 182                return;
 183
 184        /*
 185         * Mask B, Pentium, but not Pentium MMX
 186         */
 187        if (c->x86 == 5 &&
 188            c->x86_mask >= 1 && c->x86_mask <= 4 &&
 189            c->x86_model <= 3) {
 190                /*
 191                 * Remember we have B step Pentia with bugs
 192                 */
 193                WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
 194                                    "with B stepping processors.\n");
 195        }
 196}
 197
 198static void intel_workarounds(struct cpuinfo_x86 *c)
 199{
 200        unsigned long lo, hi;
 201
 202#ifdef CONFIG_X86_F00F_BUG
 203        /*
 204         * All current models of Pentium and Pentium with MMX technology CPUs
 205         * have the F0 0F bug, which lets nonprivileged users lock up the
 206         * system. Announce that the fault handler will be checking for it.
 207         */
 208        clear_cpu_bug(c, X86_BUG_F00F);
 209        if (!paravirt_enabled() && c->x86 == 5) {
 210                static int f00f_workaround_enabled;
 211
 212                set_cpu_bug(c, X86_BUG_F00F);
 213                if (!f00f_workaround_enabled) {
 214                        printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
 215                        f00f_workaround_enabled = 1;
 216                }
 217        }
 218#endif
 219
 220        /*
 221         * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
 222         * model 3 mask 3
 223         */
 224        if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
 225                clear_cpu_cap(c, X86_FEATURE_SEP);
 226
 227        /*
 228         * P4 Xeon errata 037 workaround.
 229         * Hardware prefetcher may cause stale data to be loaded into the cache.
 230         */
 231        if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
 232                rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
 233                if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
 234                        printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
 235                        printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
 236                        lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
 237                        wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
 238                }
 239        }
 240
 241        /*
 242         * See if we have a good local APIC by checking for buggy Pentia,
 243         * i.e. all B steppings and the C2 stepping of P54C when using their
 244         * integrated APIC (see 11AP erratum in "Pentium Processor
 245         * Specification Update").
 246         */
 247        if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
 248            (c->x86_mask < 0x6 || c->x86_mask == 0xb))
 249                set_cpu_cap(c, X86_FEATURE_11AP);
 250
 251
 252#ifdef CONFIG_X86_INTEL_USERCOPY
 253        /*
 254         * Set up the preferred alignment for movsl bulk memory moves
 255         */
 256        switch (c->x86) {
 257        case 4:         /* 486: untested */
 258                break;
 259        case 5:         /* Old Pentia: untested */
 260                break;
 261        case 6:         /* PII/PIII only like movsl with 8-byte alignment */
 262                movsl_mask.mask = 7;
 263                break;
 264        case 15:        /* P4 is OK down to 8-byte alignment */
 265                movsl_mask.mask = 7;
 266                break;
 267        }
 268#endif
 269
 270#ifdef CONFIG_X86_NUMAQ
 271        numaq_tsc_disable();
 272#endif
 273
 274        intel_smp_check(c);
 275}
 276#else
 277static void intel_workarounds(struct cpuinfo_x86 *c)
 278{
 279}
 280#endif
 281
 282static void srat_detect_node(struct cpuinfo_x86 *c)
 283{
 284#ifdef CONFIG_NUMA
 285        unsigned node;
 286        int cpu = smp_processor_id();
 287
 288        /* Don't do the funky fallback heuristics the AMD version employs
 289           for now. */
 290        node = numa_cpu_node(cpu);
 291        if (node == NUMA_NO_NODE || !node_online(node)) {
 292                /* reuse the value from init_cpu_to_node() */
 293                node = cpu_to_node(cpu);
 294        }
 295        numa_set_node(cpu, node);
 296#endif
 297}
 298
 299/*
 300 * find out the number of processor cores on the die
 301 */
 302static int intel_num_cpu_cores(struct cpuinfo_x86 *c)
 303{
 304        unsigned int eax, ebx, ecx, edx;
 305
 306        if (c->cpuid_level < 4)
 307                return 1;
 308
 309        /* Intel has a non-standard dependency on %ecx for this CPUID level. */
 310        cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
 311        if (eax & 0x1f)
 312                return (eax >> 26) + 1;
 313        else
 314                return 1;
 315}
 316
 317static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
 318{
 319        /* Intel VMX MSR indicated features */
 320#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW    0x00200000
 321#define X86_VMX_FEATURE_PROC_CTLS_VNMI          0x00400000
 322#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS      0x80000000
 323#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC    0x00000001
 324#define X86_VMX_FEATURE_PROC_CTLS2_EPT          0x00000002
 325#define X86_VMX_FEATURE_PROC_CTLS2_VPID         0x00000020
 326
 327        u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
 328
 329        clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
 330        clear_cpu_cap(c, X86_FEATURE_VNMI);
 331        clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
 332        clear_cpu_cap(c, X86_FEATURE_EPT);
 333        clear_cpu_cap(c, X86_FEATURE_VPID);
 334
 335        rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
 336        msr_ctl = vmx_msr_high | vmx_msr_low;
 337        if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
 338                set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
 339        if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
 340                set_cpu_cap(c, X86_FEATURE_VNMI);
 341        if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
 342                rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
 343                      vmx_msr_low, vmx_msr_high);
 344                msr_ctl2 = vmx_msr_high | vmx_msr_low;
 345                if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
 346                    (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
 347                        set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
 348                if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
 349                        set_cpu_cap(c, X86_FEATURE_EPT);
 350                if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
 351                        set_cpu_cap(c, X86_FEATURE_VPID);
 352        }
 353}
 354
 355static void init_intel(struct cpuinfo_x86 *c)
 356{
 357        unsigned int l2 = 0;
 358
 359        early_init_intel(c);
 360
 361        intel_workarounds(c);
 362
 363        /*
 364         * Detect the extended topology information if available. This
 365         * will reinitialise the initial_apicid which will be used
 366         * in init_intel_cacheinfo()
 367         */
 368        detect_extended_topology(c);
 369
 370        l2 = init_intel_cacheinfo(c);
 371        if (c->cpuid_level > 9) {
 372                unsigned eax = cpuid_eax(10);
 373                /* Check for version and the number of counters */
 374                if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
 375                        set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
 376        }
 377
 378        if (cpu_has_xmm2)
 379                set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
 380        if (cpu_has_ds) {
 381                unsigned int l1;
 382                rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
 383                if (!(l1 & (1<<11)))
 384                        set_cpu_cap(c, X86_FEATURE_BTS);
 385                if (!(l1 & (1<<12)))
 386                        set_cpu_cap(c, X86_FEATURE_PEBS);
 387        }
 388
 389        if (c->x86 == 6 && cpu_has_clflush &&
 390            (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
 391                set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
 392
 393#ifdef CONFIG_X86_64
 394        if (c->x86 == 15)
 395                c->x86_cache_alignment = c->x86_clflush_size * 2;
 396        if (c->x86 == 6)
 397                set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 398#else
 399        /*
 400         * Names for the Pentium II/Celeron processors
 401         * detectable only by also checking the cache size.
 402         * Dixon is NOT a Celeron.
 403         */
 404        if (c->x86 == 6) {
 405                char *p = NULL;
 406
 407                switch (c->x86_model) {
 408                case 5:
 409                        if (l2 == 0)
 410                                p = "Celeron (Covington)";
 411                        else if (l2 == 256)
 412                                p = "Mobile Pentium II (Dixon)";
 413                        break;
 414
 415                case 6:
 416                        if (l2 == 128)
 417                                p = "Celeron (Mendocino)";
 418                        else if (c->x86_mask == 0 || c->x86_mask == 5)
 419                                p = "Celeron-A";
 420                        break;
 421
 422                case 8:
 423                        if (l2 == 128)
 424                                p = "Celeron (Coppermine)";
 425                        break;
 426                }
 427
 428                if (p)
 429                        strcpy(c->x86_model_id, p);
 430        }
 431
 432        if (c->x86 == 15)
 433                set_cpu_cap(c, X86_FEATURE_P4);
 434        if (c->x86 == 6)
 435                set_cpu_cap(c, X86_FEATURE_P3);
 436#endif
 437
 438        if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
 439                /*
 440                 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
 441                 * detection.
 442                 */
 443                c->x86_max_cores = intel_num_cpu_cores(c);
 444#ifdef CONFIG_X86_32
 445                detect_ht(c);
 446#endif
 447        }
 448
 449        /* Work around errata */
 450        srat_detect_node(c);
 451
 452        if (cpu_has(c, X86_FEATURE_VMX))
 453                detect_vmx_virtcap(c);
 454
 455        /*
 456         * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not.
 457         * x86_energy_perf_policy(8) is available to change it at run-time
 458         */
 459        if (cpu_has(c, X86_FEATURE_EPB)) {
 460                u64 epb;
 461
 462                rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
 463                if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) {
 464                        printk_once(KERN_WARNING "ENERGY_PERF_BIAS:"
 465                                " Set to 'normal', was 'performance'\n"
 466                                "ENERGY_PERF_BIAS: View and update with"
 467                                " x86_energy_perf_policy(8)\n");
 468                        epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
 469                        wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
 470                }
 471        }
 472}
 473
 474#ifdef CONFIG_X86_32
 475static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 476{
 477        /*
 478         * Intel PIII Tualatin. This comes in two flavours.
 479         * One has 256kb of cache, the other 512. We have no way
 480         * to determine which, so we use a boottime override
 481         * for the 512kb model, and assume 256 otherwise.
 482         */
 483        if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
 484                size = 256;
 485        return size;
 486}
 487#endif
 488
 489#define TLB_INST_4K     0x01
 490#define TLB_INST_4M     0x02
 491#define TLB_INST_2M_4M  0x03
 492
 493#define TLB_INST_ALL    0x05
 494#define TLB_INST_1G     0x06
 495
 496#define TLB_DATA_4K     0x11
 497#define TLB_DATA_4M     0x12
 498#define TLB_DATA_2M_4M  0x13
 499#define TLB_DATA_4K_4M  0x14
 500
 501#define TLB_DATA_1G     0x16
 502
 503#define TLB_DATA0_4K    0x21
 504#define TLB_DATA0_4M    0x22
 505#define TLB_DATA0_2M_4M 0x23
 506
 507#define STLB_4K         0x41
 508#define STLB_4K_2M      0x42
 509
 510static const struct _tlb_table intel_tlb_table[] = {
 511        { 0x01, TLB_INST_4K,            32,     " TLB_INST 4 KByte pages, 4-way set associative" },
 512        { 0x02, TLB_INST_4M,            2,      " TLB_INST 4 MByte pages, full associative" },
 513        { 0x03, TLB_DATA_4K,            64,     " TLB_DATA 4 KByte pages, 4-way set associative" },
 514        { 0x04, TLB_DATA_4M,            8,      " TLB_DATA 4 MByte pages, 4-way set associative" },
 515        { 0x05, TLB_DATA_4M,            32,     " TLB_DATA 4 MByte pages, 4-way set associative" },
 516        { 0x0b, TLB_INST_4M,            4,      " TLB_INST 4 MByte pages, 4-way set associative" },
 517        { 0x4f, TLB_INST_4K,            32,     " TLB_INST 4 KByte pages */" },
 518        { 0x50, TLB_INST_ALL,           64,     " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
 519        { 0x51, TLB_INST_ALL,           128,    " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
 520        { 0x52, TLB_INST_ALL,           256,    " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
 521        { 0x55, TLB_INST_2M_4M,         7,      " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
 522        { 0x56, TLB_DATA0_4M,           16,     " TLB_DATA0 4 MByte pages, 4-way set associative" },
 523        { 0x57, TLB_DATA0_4K,           16,     " TLB_DATA0 4 KByte pages, 4-way associative" },
 524        { 0x59, TLB_DATA0_4K,           16,     " TLB_DATA0 4 KByte pages, fully associative" },
 525        { 0x5a, TLB_DATA0_2M_4M,        32,     " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
 526        { 0x5b, TLB_DATA_4K_4M,         64,     " TLB_DATA 4 KByte and 4 MByte pages" },
 527        { 0x5c, TLB_DATA_4K_4M,         128,    " TLB_DATA 4 KByte and 4 MByte pages" },
 528        { 0x5d, TLB_DATA_4K_4M,         256,    " TLB_DATA 4 KByte and 4 MByte pages" },
 529        { 0x61, TLB_INST_4K,            48,     " TLB_INST 4 KByte pages, full associative" },
 530        { 0x63, TLB_DATA_1G,            4,      " TLB_DATA 1 GByte pages, 4-way set associative" },
 531        { 0x76, TLB_INST_2M_4M,         8,      " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
 532        { 0xb0, TLB_INST_4K,            128,    " TLB_INST 4 KByte pages, 4-way set associative" },
 533        { 0xb1, TLB_INST_2M_4M,         4,      " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
 534        { 0xb2, TLB_INST_4K,            64,     " TLB_INST 4KByte pages, 4-way set associative" },
 535        { 0xb3, TLB_DATA_4K,            128,    " TLB_DATA 4 KByte pages, 4-way set associative" },
 536        { 0xb4, TLB_DATA_4K,            256,    " TLB_DATA 4 KByte pages, 4-way associative" },
 537        { 0xb5, TLB_INST_4K,            64,     " TLB_INST 4 KByte pages, 8-way set ssociative" },
 538        { 0xb6, TLB_INST_4K,            128,    " TLB_INST 4 KByte pages, 8-way set ssociative" },
 539        { 0xba, TLB_DATA_4K,            64,     " TLB_DATA 4 KByte pages, 4-way associative" },
 540        { 0xc0, TLB_DATA_4K_4M,         8,      " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
 541        { 0xc1, STLB_4K_2M,             1024,   " STLB 4 KByte and 2 MByte pages, 8-way associative" },
 542        { 0xc2, TLB_DATA_2M_4M,         16,     " DTLB 2 MByte/4MByte pages, 4-way associative" },
 543        { 0xca, STLB_4K,                512,    " STLB 4 KByte pages, 4-way associative" },
 544        { 0x00, 0, 0 }
 545};
 546
 547static void intel_tlb_lookup(const unsigned char desc)
 548{
 549        unsigned char k;
 550        if (desc == 0)
 551                return;
 552
 553        /* look up this descriptor in the table */
 554        for (k = 0; intel_tlb_table[k].descriptor != desc && \
 555                        intel_tlb_table[k].descriptor != 0; k++)
 556                ;
 557
 558        if (intel_tlb_table[k].tlb_type == 0)
 559                return;
 560
 561        switch (intel_tlb_table[k].tlb_type) {
 562        case STLB_4K:
 563                if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 564                        tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 565                if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 566                        tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 567                break;
 568        case STLB_4K_2M:
 569                if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 570                        tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 571                if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 572                        tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 573                if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
 574                        tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
 575                if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
 576                        tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
 577                if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 578                        tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 579                if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 580                        tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 581                break;
 582        case TLB_INST_ALL:
 583                if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 584                        tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 585                if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
 586                        tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
 587                if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 588                        tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 589                break;
 590        case TLB_INST_4K:
 591                if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 592                        tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 593                break;
 594        case TLB_INST_4M:
 595                if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 596                        tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 597                break;
 598        case TLB_INST_2M_4M:
 599                if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
 600                        tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
 601                if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 602                        tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 603                break;
 604        case TLB_DATA_4K:
 605        case TLB_DATA0_4K:
 606                if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 607                        tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 608                break;
 609        case TLB_DATA_4M:
 610        case TLB_DATA0_4M:
 611                if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 612                        tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 613                break;
 614        case TLB_DATA_2M_4M:
 615        case TLB_DATA0_2M_4M:
 616                if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
 617                        tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
 618                if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 619                        tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 620                break;
 621        case TLB_DATA_4K_4M:
 622                if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 623                        tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 624                if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 625                        tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 626                break;
 627        case TLB_DATA_1G:
 628                if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
 629                        tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
 630                break;
 631        }
 632}
 633
 634static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
 635{
 636        switch ((c->x86 << 8) + c->x86_model) {
 637        case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
 638        case 0x616: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
 639        case 0x617: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
 640        case 0x61d: /* six-core 45 nm xeon "Dunnington" */
 641                tlb_flushall_shift = -1;
 642                break;
 643        case 0x63a: /* Ivybridge */
 644                tlb_flushall_shift = 2;
 645                break;
 646        case 0x61a: /* 45 nm nehalem, "Bloomfield" */
 647        case 0x61e: /* 45 nm nehalem, "Lynnfield" */
 648        case 0x625: /* 32 nm nehalem, "Clarkdale" */
 649        case 0x62c: /* 32 nm nehalem, "Gulftown" */
 650        case 0x62e: /* 45 nm nehalem-ex, "Beckton" */
 651        case 0x62f: /* 32 nm Xeon E7 */
 652        case 0x62a: /* SandyBridge */
 653        case 0x62d: /* SandyBridge, "Romely-EP" */
 654        default:
 655                tlb_flushall_shift = 6;
 656        }
 657}
 658
 659static void intel_detect_tlb(struct cpuinfo_x86 *c)
 660{
 661        int i, j, n;
 662        unsigned int regs[4];
 663        unsigned char *desc = (unsigned char *)regs;
 664
 665        if (c->cpuid_level < 2)
 666                return;
 667
 668        /* Number of times to iterate */
 669        n = cpuid_eax(2) & 0xFF;
 670
 671        for (i = 0 ; i < n ; i++) {
 672                cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
 673
 674                /* If bit 31 is set, this is an unknown format */
 675                for (j = 0 ; j < 3 ; j++)
 676                        if (regs[j] & (1 << 31))
 677                                regs[j] = 0;
 678
 679                /* Byte 0 is level count, not a descriptor */
 680                for (j = 1 ; j < 16 ; j++)
 681                        intel_tlb_lookup(desc[j]);
 682        }
 683        intel_tlb_flushall_shift_set(c);
 684}
 685
 686static const struct cpu_dev intel_cpu_dev = {
 687        .c_vendor       = "Intel",
 688        .c_ident        = { "GenuineIntel" },
 689#ifdef CONFIG_X86_32
 690        .legacy_models = {
 691                { .family = 4, .model_names =
 692                  {
 693                          [0] = "486 DX-25/33",
 694                          [1] = "486 DX-50",
 695                          [2] = "486 SX",
 696                          [3] = "486 DX/2",
 697                          [4] = "486 SL",
 698                          [5] = "486 SX/2",
 699                          [7] = "486 DX/2-WB",
 700                          [8] = "486 DX/4",
 701                          [9] = "486 DX/4-WB"
 702                  }
 703                },
 704                { .family = 5, .model_names =
 705                  {
 706                          [0] = "Pentium 60/66 A-step",
 707                          [1] = "Pentium 60/66",
 708                          [2] = "Pentium 75 - 200",
 709                          [3] = "OverDrive PODP5V83",
 710                          [4] = "Pentium MMX",
 711                          [7] = "Mobile Pentium 75 - 200",
 712                          [8] = "Mobile Pentium MMX"
 713                  }
 714                },
 715                { .family = 6, .model_names =
 716                  {
 717                          [0] = "Pentium Pro A-step",
 718                          [1] = "Pentium Pro",
 719                          [3] = "Pentium II (Klamath)",
 720                          [4] = "Pentium II (Deschutes)",
 721                          [5] = "Pentium II (Deschutes)",
 722                          [6] = "Mobile Pentium II",
 723                          [7] = "Pentium III (Katmai)",
 724                          [8] = "Pentium III (Coppermine)",
 725                          [10] = "Pentium III (Cascades)",
 726                          [11] = "Pentium III (Tualatin)",
 727                  }
 728                },
 729                { .family = 15, .model_names =
 730                  {
 731                          [0] = "Pentium 4 (Unknown)",
 732                          [1] = "Pentium 4 (Willamette)",
 733                          [2] = "Pentium 4 (Northwood)",
 734                          [4] = "Pentium 4 (Foster)",
 735                          [5] = "Pentium 4 (Foster)",
 736                  }
 737                },
 738        },
 739        .legacy_cache_size = intel_size_cache,
 740#endif
 741        .c_detect_tlb   = intel_detect_tlb,
 742        .c_early_init   = early_init_intel,
 743        .c_init         = init_intel,
 744        .c_x86_vendor   = X86_VENDOR_INTEL,
 745};
 746
 747cpu_dev_register(intel_cpu_dev);
 748
 749