linux/arch/x86/kernel/cpu/intel.c
<<
>>
Prefs
   1#include <linux/kernel.h>
   2
   3#include <linux/string.h>
   4#include <linux/bitops.h>
   5#include <linux/smp.h>
   6#include <linux/sched.h>
   7#include <linux/thread_info.h>
   8#include <linux/module.h>
   9#include <linux/uaccess.h>
  10
  11#include <asm/processor.h>
  12#include <asm/pgtable.h>
  13#include <asm/msr.h>
  14#include <asm/bugs.h>
  15#include <asm/cpu.h>
  16
  17#ifdef CONFIG_X86_64
  18#include <linux/topology.h>
  19#endif
  20
  21#include "cpu.h"
  22
  23#ifdef CONFIG_X86_LOCAL_APIC
  24#include <asm/mpspec.h>
  25#include <asm/apic.h>
  26#endif
  27
  28static void early_init_intel(struct cpuinfo_x86 *c)
  29{
  30        u64 misc_enable;
  31
  32        /* Unmask CPUID levels if masked: */
  33        if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
  34                if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
  35                                  MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
  36                        c->cpuid_level = cpuid_eax(0);
  37                        get_cpu_cap(c);
  38                }
  39        }
  40
  41        if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
  42                (c->x86 == 0x6 && c->x86_model >= 0x0e))
  43                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
  44
  45        if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
  46                unsigned lower_word;
  47
  48                wrmsr(MSR_IA32_UCODE_REV, 0, 0);
  49                /* Required by the SDM */
  50                sync_core();
  51                rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
  52        }
  53
  54        /*
  55         * Atom erratum AAE44/AAF40/AAG38/AAH41:
  56         *
  57         * A race condition between speculative fetches and invalidating
  58         * a large page.  This is worked around in microcode, but we
  59         * need the microcode to have already been loaded... so if it is
  60         * not, recommend a BIOS update and disable large pages.
  61         */
  62        if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
  63            c->microcode < 0x20e) {
  64                printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
  65                clear_cpu_cap(c, X86_FEATURE_PSE);
  66        }
  67
  68#ifdef CONFIG_X86_64
  69        set_cpu_cap(c, X86_FEATURE_SYSENTER32);
  70#else
  71        /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
  72        if (c->x86 == 15 && c->x86_cache_alignment == 64)
  73                c->x86_cache_alignment = 128;
  74#endif
  75
  76        /* CPUID workaround for 0F33/0F34 CPU */
  77        if (c->x86 == 0xF && c->x86_model == 0x3
  78            && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
  79                c->x86_phys_bits = 36;
  80
  81        /*
  82         * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
  83         * with P/T states and does not stop in deep C-states.
  84         *
  85         * It is also reliable across cores and sockets. (but not across
  86         * cabinets - we turn it off in that case explicitly.)
  87         */
  88        if (c->x86_power & (1 << 8)) {
  89                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
  90                set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
  91                if (!check_tsc_unstable())
  92                        set_sched_clock_stable();
  93        }
  94
  95        /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
  96        if (c->x86 == 6) {
  97                switch (c->x86_model) {
  98                case 0x27:      /* Penwell */
  99                case 0x35:      /* Cloverview */
 100                case 0x4a:      /* Merrifield */
 101                        set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
 102                        break;
 103                default:
 104                        break;
 105                }
 106        }
 107
 108        /*
 109         * There is a known erratum on Pentium III and Core Solo
 110         * and Core Duo CPUs.
 111         * " Page with PAT set to WC while associated MTRR is UC
 112         *   may consolidate to UC "
 113         * Because of this erratum, it is better to stick with
 114         * setting WC in MTRR rather than using PAT on these CPUs.
 115         *
 116         * Enable PAT WC only on P4, Core 2 or later CPUs.
 117         */
 118        if (c->x86 == 6 && c->x86_model < 15)
 119                clear_cpu_cap(c, X86_FEATURE_PAT);
 120
 121#ifdef CONFIG_KMEMCHECK
 122        /*
 123         * P4s have a "fast strings" feature which causes single-
 124         * stepping REP instructions to only generate a #DB on
 125         * cache-line boundaries.
 126         *
 127         * Ingo Molnar reported a Pentium D (model 6) and a Xeon
 128         * (model 2) with the same problem.
 129         */
 130        if (c->x86 == 15)
 131                if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
 132                                  MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0)
 133                        pr_info("kmemcheck: Disabling fast string operations\n");
 134#endif
 135
 136        /*
 137         * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
 138         * clear the fast string and enhanced fast string CPU capabilities.
 139         */
 140        if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
 141                rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
 142                if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
 143                        printk(KERN_INFO "Disabled fast string operations\n");
 144                        setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
 145                        setup_clear_cpu_cap(X86_FEATURE_ERMS);
 146                }
 147        }
 148
 149        /*
 150         * Intel Quark Core DevMan_001.pdf section 6.4.11
 151         * "The operating system also is required to invalidate (i.e., flush)
 152         *  the TLB when any changes are made to any of the page table entries.
 153         *  The operating system must reload CR3 to cause the TLB to be flushed"
 154         *
 155         * As a result cpu_has_pge() in arch/x86/include/asm/tlbflush.h should
 156         * be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
 157         * to be modified
 158         */
 159        if (c->x86 == 5 && c->x86_model == 9) {
 160                pr_info("Disabling PGE capability bit\n");
 161                setup_clear_cpu_cap(X86_FEATURE_PGE);
 162        }
 163}
 164
 165#ifdef CONFIG_X86_32
 166/*
 167 *      Early probe support logic for ppro memory erratum #50
 168 *
 169 *      This is called before we do cpu ident work
 170 */
 171
 172int ppro_with_ram_bug(void)
 173{
 174        /* Uses data from early_cpu_detect now */
 175        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
 176            boot_cpu_data.x86 == 6 &&
 177            boot_cpu_data.x86_model == 1 &&
 178            boot_cpu_data.x86_mask < 8) {
 179                printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
 180                return 1;
 181        }
 182        return 0;
 183}
 184
 185static void intel_smp_check(struct cpuinfo_x86 *c)
 186{
 187        /* calling is from identify_secondary_cpu() ? */
 188        if (!c->cpu_index)
 189                return;
 190
 191        /*
 192         * Mask B, Pentium, but not Pentium MMX
 193         */
 194        if (c->x86 == 5 &&
 195            c->x86_mask >= 1 && c->x86_mask <= 4 &&
 196            c->x86_model <= 3) {
 197                /*
 198                 * Remember we have B step Pentia with bugs
 199                 */
 200                WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
 201                                    "with B stepping processors.\n");
 202        }
 203}
 204
 205static int forcepae;
 206static int __init forcepae_setup(char *__unused)
 207{
 208        forcepae = 1;
 209        return 1;
 210}
 211__setup("forcepae", forcepae_setup);
 212
 213static void intel_workarounds(struct cpuinfo_x86 *c)
 214{
 215#ifdef CONFIG_X86_F00F_BUG
 216        /*
 217         * All models of Pentium and Pentium with MMX technology CPUs
 218         * have the F0 0F bug, which lets nonprivileged users lock up the
 219         * system. Announce that the fault handler will be checking for it.
 220         * The Quark is also family 5, but does not have the same bug.
 221         */
 222        clear_cpu_bug(c, X86_BUG_F00F);
 223        if (!paravirt_enabled() && c->x86 == 5 && c->x86_model < 9) {
 224                static int f00f_workaround_enabled;
 225
 226                set_cpu_bug(c, X86_BUG_F00F);
 227                if (!f00f_workaround_enabled) {
 228                        printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
 229                        f00f_workaround_enabled = 1;
 230                }
 231        }
 232#endif
 233
 234        /*
 235         * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
 236         * model 3 mask 3
 237         */
 238        if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
 239                clear_cpu_cap(c, X86_FEATURE_SEP);
 240
 241        /*
 242         * PAE CPUID issue: many Pentium M report no PAE but may have a
 243         * functionally usable PAE implementation.
 244         * Forcefully enable PAE if kernel parameter "forcepae" is present.
 245         */
 246        if (forcepae) {
 247                printk(KERN_WARNING "PAE forced!\n");
 248                set_cpu_cap(c, X86_FEATURE_PAE);
 249                add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
 250        }
 251
 252        /*
 253         * P4 Xeon errata 037 workaround.
 254         * Hardware prefetcher may cause stale data to be loaded into the cache.
 255         */
 256        if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
 257                if (msr_set_bit(MSR_IA32_MISC_ENABLE,
 258                                MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
 259                    > 0) {
 260                        pr_info("CPU: C0 stepping P4 Xeon detected.\n");
 261                        pr_info("CPU: Disabling hardware prefetching (Errata 037)\n");
 262                }
 263        }
 264
 265        /*
 266         * See if we have a good local APIC by checking for buggy Pentia,
 267         * i.e. all B steppings and the C2 stepping of P54C when using their
 268         * integrated APIC (see 11AP erratum in "Pentium Processor
 269         * Specification Update").
 270         */
 271        if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
 272            (c->x86_mask < 0x6 || c->x86_mask == 0xb))
 273                set_cpu_bug(c, X86_BUG_11AP);
 274
 275
 276#ifdef CONFIG_X86_INTEL_USERCOPY
 277        /*
 278         * Set up the preferred alignment for movsl bulk memory moves
 279         */
 280        switch (c->x86) {
 281        case 4:         /* 486: untested */
 282                break;
 283        case 5:         /* Old Pentia: untested */
 284                break;
 285        case 6:         /* PII/PIII only like movsl with 8-byte alignment */
 286                movsl_mask.mask = 7;
 287                break;
 288        case 15:        /* P4 is OK down to 8-byte alignment */
 289                movsl_mask.mask = 7;
 290                break;
 291        }
 292#endif
 293
 294        intel_smp_check(c);
 295}
 296#else
 297static void intel_workarounds(struct cpuinfo_x86 *c)
 298{
 299}
 300#endif
 301
 302static void srat_detect_node(struct cpuinfo_x86 *c)
 303{
 304#ifdef CONFIG_NUMA
 305        unsigned node;
 306        int cpu = smp_processor_id();
 307
 308        /* Don't do the funky fallback heuristics the AMD version employs
 309           for now. */
 310        node = numa_cpu_node(cpu);
 311        if (node == NUMA_NO_NODE || !node_online(node)) {
 312                /* reuse the value from init_cpu_to_node() */
 313                node = cpu_to_node(cpu);
 314        }
 315        numa_set_node(cpu, node);
 316#endif
 317}
 318
 319/*
 320 * find out the number of processor cores on the die
 321 */
 322static int intel_num_cpu_cores(struct cpuinfo_x86 *c)
 323{
 324        unsigned int eax, ebx, ecx, edx;
 325
 326        if (c->cpuid_level < 4)
 327                return 1;
 328
 329        /* Intel has a non-standard dependency on %ecx for this CPUID level. */
 330        cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
 331        if (eax & 0x1f)
 332                return (eax >> 26) + 1;
 333        else
 334                return 1;
 335}
 336
 337static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
 338{
 339        /* Intel VMX MSR indicated features */
 340#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW    0x00200000
 341#define X86_VMX_FEATURE_PROC_CTLS_VNMI          0x00400000
 342#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS      0x80000000
 343#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC    0x00000001
 344#define X86_VMX_FEATURE_PROC_CTLS2_EPT          0x00000002
 345#define X86_VMX_FEATURE_PROC_CTLS2_VPID         0x00000020
 346
 347        u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
 348
 349        clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
 350        clear_cpu_cap(c, X86_FEATURE_VNMI);
 351        clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
 352        clear_cpu_cap(c, X86_FEATURE_EPT);
 353        clear_cpu_cap(c, X86_FEATURE_VPID);
 354
 355        rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
 356        msr_ctl = vmx_msr_high | vmx_msr_low;
 357        if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
 358                set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
 359        if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
 360                set_cpu_cap(c, X86_FEATURE_VNMI);
 361        if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
 362                rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
 363                      vmx_msr_low, vmx_msr_high);
 364                msr_ctl2 = vmx_msr_high | vmx_msr_low;
 365                if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
 366                    (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
 367                        set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
 368                if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
 369                        set_cpu_cap(c, X86_FEATURE_EPT);
 370                if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
 371                        set_cpu_cap(c, X86_FEATURE_VPID);
 372        }
 373}
 374
 375static void init_intel_energy_perf(struct cpuinfo_x86 *c)
 376{
 377        u64 epb;
 378
 379        /*
 380         * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized.
 381         * (x86_energy_perf_policy(8) is available to change it at run-time.)
 382         */
 383        if (!cpu_has(c, X86_FEATURE_EPB))
 384                return;
 385
 386        rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
 387        if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE)
 388                return;
 389
 390        pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
 391        pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
 392        epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
 393        wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
 394}
 395
 396static void intel_bsp_resume(struct cpuinfo_x86 *c)
 397{
 398        /*
 399         * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume,
 400         * so reinitialize it properly like during bootup:
 401         */
 402        init_intel_energy_perf(c);
 403}
 404
 405static void init_intel(struct cpuinfo_x86 *c)
 406{
 407        unsigned int l2 = 0;
 408
 409        early_init_intel(c);
 410
 411        intel_workarounds(c);
 412
 413        /*
 414         * Detect the extended topology information if available. This
 415         * will reinitialise the initial_apicid which will be used
 416         * in init_intel_cacheinfo()
 417         */
 418        detect_extended_topology(c);
 419
 420        if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
 421                /*
 422                 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
 423                 * detection.
 424                 */
 425                c->x86_max_cores = intel_num_cpu_cores(c);
 426#ifdef CONFIG_X86_32
 427                detect_ht(c);
 428#endif
 429        }
 430
 431        l2 = init_intel_cacheinfo(c);
 432
 433        /* Detect legacy cache sizes if init_intel_cacheinfo did not */
 434        if (l2 == 0) {
 435                cpu_detect_cache_sizes(c);
 436                l2 = c->x86_cache_size;
 437        }
 438
 439        if (c->cpuid_level > 9) {
 440                unsigned eax = cpuid_eax(10);
 441                /* Check for version and the number of counters */
 442                if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
 443                        set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
 444        }
 445
 446        if (cpu_has_xmm2)
 447                set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
 448        if (cpu_has_ds) {
 449                unsigned int l1;
 450                rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
 451                if (!(l1 & (1<<11)))
 452                        set_cpu_cap(c, X86_FEATURE_BTS);
 453                if (!(l1 & (1<<12)))
 454                        set_cpu_cap(c, X86_FEATURE_PEBS);
 455        }
 456
 457        if (c->x86 == 6 && cpu_has_clflush &&
 458            (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
 459                set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
 460
 461#ifdef CONFIG_X86_64
 462        if (c->x86 == 15)
 463                c->x86_cache_alignment = c->x86_clflush_size * 2;
 464        if (c->x86 == 6)
 465                set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 466#else
 467        /*
 468         * Names for the Pentium II/Celeron processors
 469         * detectable only by also checking the cache size.
 470         * Dixon is NOT a Celeron.
 471         */
 472        if (c->x86 == 6) {
 473                char *p = NULL;
 474
 475                switch (c->x86_model) {
 476                case 5:
 477                        if (l2 == 0)
 478                                p = "Celeron (Covington)";
 479                        else if (l2 == 256)
 480                                p = "Mobile Pentium II (Dixon)";
 481                        break;
 482
 483                case 6:
 484                        if (l2 == 128)
 485                                p = "Celeron (Mendocino)";
 486                        else if (c->x86_mask == 0 || c->x86_mask == 5)
 487                                p = "Celeron-A";
 488                        break;
 489
 490                case 8:
 491                        if (l2 == 128)
 492                                p = "Celeron (Coppermine)";
 493                        break;
 494                }
 495
 496                if (p)
 497                        strcpy(c->x86_model_id, p);
 498        }
 499
 500        if (c->x86 == 15)
 501                set_cpu_cap(c, X86_FEATURE_P4);
 502        if (c->x86 == 6)
 503                set_cpu_cap(c, X86_FEATURE_P3);
 504#endif
 505
 506        /* Work around errata */
 507        srat_detect_node(c);
 508
 509        if (cpu_has(c, X86_FEATURE_VMX))
 510                detect_vmx_virtcap(c);
 511
 512        init_intel_energy_perf(c);
 513}
 514
 515#ifdef CONFIG_X86_32
 516static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 517{
 518        /*
 519         * Intel PIII Tualatin. This comes in two flavours.
 520         * One has 256kb of cache, the other 512. We have no way
 521         * to determine which, so we use a boottime override
 522         * for the 512kb model, and assume 256 otherwise.
 523         */
 524        if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
 525                size = 256;
 526
 527        /*
 528         * Intel Quark SoC X1000 contains a 4-way set associative
 529         * 16K cache with a 16 byte cache line and 256 lines per tag
 530         */
 531        if ((c->x86 == 5) && (c->x86_model == 9))
 532                size = 16;
 533        return size;
 534}
 535#endif
 536
 537#define TLB_INST_4K     0x01
 538#define TLB_INST_4M     0x02
 539#define TLB_INST_2M_4M  0x03
 540
 541#define TLB_INST_ALL    0x05
 542#define TLB_INST_1G     0x06
 543
 544#define TLB_DATA_4K     0x11
 545#define TLB_DATA_4M     0x12
 546#define TLB_DATA_2M_4M  0x13
 547#define TLB_DATA_4K_4M  0x14
 548
 549#define TLB_DATA_1G     0x16
 550
 551#define TLB_DATA0_4K    0x21
 552#define TLB_DATA0_4M    0x22
 553#define TLB_DATA0_2M_4M 0x23
 554
 555#define STLB_4K         0x41
 556#define STLB_4K_2M      0x42
 557
 558static const struct _tlb_table intel_tlb_table[] = {
 559        { 0x01, TLB_INST_4K,            32,     " TLB_INST 4 KByte pages, 4-way set associative" },
 560        { 0x02, TLB_INST_4M,            2,      " TLB_INST 4 MByte pages, full associative" },
 561        { 0x03, TLB_DATA_4K,            64,     " TLB_DATA 4 KByte pages, 4-way set associative" },
 562        { 0x04, TLB_DATA_4M,            8,      " TLB_DATA 4 MByte pages, 4-way set associative" },
 563        { 0x05, TLB_DATA_4M,            32,     " TLB_DATA 4 MByte pages, 4-way set associative" },
 564        { 0x0b, TLB_INST_4M,            4,      " TLB_INST 4 MByte pages, 4-way set associative" },
 565        { 0x4f, TLB_INST_4K,            32,     " TLB_INST 4 KByte pages */" },
 566        { 0x50, TLB_INST_ALL,           64,     " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
 567        { 0x51, TLB_INST_ALL,           128,    " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
 568        { 0x52, TLB_INST_ALL,           256,    " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
 569        { 0x55, TLB_INST_2M_4M,         7,      " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
 570        { 0x56, TLB_DATA0_4M,           16,     " TLB_DATA0 4 MByte pages, 4-way set associative" },
 571        { 0x57, TLB_DATA0_4K,           16,     " TLB_DATA0 4 KByte pages, 4-way associative" },
 572        { 0x59, TLB_DATA0_4K,           16,     " TLB_DATA0 4 KByte pages, fully associative" },
 573        { 0x5a, TLB_DATA0_2M_4M,        32,     " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
 574        { 0x5b, TLB_DATA_4K_4M,         64,     " TLB_DATA 4 KByte and 4 MByte pages" },
 575        { 0x5c, TLB_DATA_4K_4M,         128,    " TLB_DATA 4 KByte and 4 MByte pages" },
 576        { 0x5d, TLB_DATA_4K_4M,         256,    " TLB_DATA 4 KByte and 4 MByte pages" },
 577        { 0x61, TLB_INST_4K,            48,     " TLB_INST 4 KByte pages, full associative" },
 578        { 0x63, TLB_DATA_1G,            4,      " TLB_DATA 1 GByte pages, 4-way set associative" },
 579        { 0x76, TLB_INST_2M_4M,         8,      " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
 580        { 0xb0, TLB_INST_4K,            128,    " TLB_INST 4 KByte pages, 4-way set associative" },
 581        { 0xb1, TLB_INST_2M_4M,         4,      " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
 582        { 0xb2, TLB_INST_4K,            64,     " TLB_INST 4KByte pages, 4-way set associative" },
 583        { 0xb3, TLB_DATA_4K,            128,    " TLB_DATA 4 KByte pages, 4-way set associative" },
 584        { 0xb4, TLB_DATA_4K,            256,    " TLB_DATA 4 KByte pages, 4-way associative" },
 585        { 0xb5, TLB_INST_4K,            64,     " TLB_INST 4 KByte pages, 8-way set associative" },
 586        { 0xb6, TLB_INST_4K,            128,    " TLB_INST 4 KByte pages, 8-way set associative" },
 587        { 0xba, TLB_DATA_4K,            64,     " TLB_DATA 4 KByte pages, 4-way associative" },
 588        { 0xc0, TLB_DATA_4K_4M,         8,      " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
 589        { 0xc1, STLB_4K_2M,             1024,   " STLB 4 KByte and 2 MByte pages, 8-way associative" },
 590        { 0xc2, TLB_DATA_2M_4M,         16,     " DTLB 2 MByte/4MByte pages, 4-way associative" },
 591        { 0xca, STLB_4K,                512,    " STLB 4 KByte pages, 4-way associative" },
 592        { 0x00, 0, 0 }
 593};
 594
 595static void intel_tlb_lookup(const unsigned char desc)
 596{
 597        unsigned char k;
 598        if (desc == 0)
 599                return;
 600
 601        /* look up this descriptor in the table */
 602        for (k = 0; intel_tlb_table[k].descriptor != desc && \
 603                        intel_tlb_table[k].descriptor != 0; k++)
 604                ;
 605
 606        if (intel_tlb_table[k].tlb_type == 0)
 607                return;
 608
 609        switch (intel_tlb_table[k].tlb_type) {
 610        case STLB_4K:
 611                if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 612                        tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 613                if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 614                        tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 615                break;
 616        case STLB_4K_2M:
 617                if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 618                        tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 619                if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 620                        tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 621                if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
 622                        tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
 623                if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
 624                        tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
 625                if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 626                        tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 627                if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 628                        tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 629                break;
 630        case TLB_INST_ALL:
 631                if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 632                        tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 633                if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
 634                        tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
 635                if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 636                        tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 637                break;
 638        case TLB_INST_4K:
 639                if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 640                        tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 641                break;
 642        case TLB_INST_4M:
 643                if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 644                        tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 645                break;
 646        case TLB_INST_2M_4M:
 647                if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
 648                        tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
 649                if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 650                        tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 651                break;
 652        case TLB_DATA_4K:
 653        case TLB_DATA0_4K:
 654                if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 655                        tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 656                break;
 657        case TLB_DATA_4M:
 658        case TLB_DATA0_4M:
 659                if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 660                        tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 661                break;
 662        case TLB_DATA_2M_4M:
 663        case TLB_DATA0_2M_4M:
 664                if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
 665                        tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
 666                if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 667                        tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 668                break;
 669        case TLB_DATA_4K_4M:
 670                if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 671                        tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 672                if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 673                        tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 674                break;
 675        case TLB_DATA_1G:
 676                if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
 677                        tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
 678                break;
 679        }
 680}
 681
 682static void intel_detect_tlb(struct cpuinfo_x86 *c)
 683{
 684        int i, j, n;
 685        unsigned int regs[4];
 686        unsigned char *desc = (unsigned char *)regs;
 687
 688        if (c->cpuid_level < 2)
 689                return;
 690
 691        /* Number of times to iterate */
 692        n = cpuid_eax(2) & 0xFF;
 693
 694        for (i = 0 ; i < n ; i++) {
 695                cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
 696
 697                /* If bit 31 is set, this is an unknown format */
 698                for (j = 0 ; j < 3 ; j++)
 699                        if (regs[j] & (1 << 31))
 700                                regs[j] = 0;
 701
 702                /* Byte 0 is level count, not a descriptor */
 703                for (j = 1 ; j < 16 ; j++)
 704                        intel_tlb_lookup(desc[j]);
 705        }
 706}
 707
 708static const struct cpu_dev intel_cpu_dev = {
 709        .c_vendor       = "Intel",
 710        .c_ident        = { "GenuineIntel" },
 711#ifdef CONFIG_X86_32
 712        .legacy_models = {
 713                { .family = 4, .model_names =
 714                  {
 715                          [0] = "486 DX-25/33",
 716                          [1] = "486 DX-50",
 717                          [2] = "486 SX",
 718                          [3] = "486 DX/2",
 719                          [4] = "486 SL",
 720                          [5] = "486 SX/2",
 721                          [7] = "486 DX/2-WB",
 722                          [8] = "486 DX/4",
 723                          [9] = "486 DX/4-WB"
 724                  }
 725                },
 726                { .family = 5, .model_names =
 727                  {
 728                          [0] = "Pentium 60/66 A-step",
 729                          [1] = "Pentium 60/66",
 730                          [2] = "Pentium 75 - 200",
 731                          [3] = "OverDrive PODP5V83",
 732                          [4] = "Pentium MMX",
 733                          [7] = "Mobile Pentium 75 - 200",
 734                          [8] = "Mobile Pentium MMX",
 735                          [9] = "Quark SoC X1000",
 736                  }
 737                },
 738                { .family = 6, .model_names =
 739                  {
 740                          [0] = "Pentium Pro A-step",
 741                          [1] = "Pentium Pro",
 742                          [3] = "Pentium II (Klamath)",
 743                          [4] = "Pentium II (Deschutes)",
 744                          [5] = "Pentium II (Deschutes)",
 745                          [6] = "Mobile Pentium II",
 746                          [7] = "Pentium III (Katmai)",
 747                          [8] = "Pentium III (Coppermine)",
 748                          [10] = "Pentium III (Cascades)",
 749                          [11] = "Pentium III (Tualatin)",
 750                  }
 751                },
 752                { .family = 15, .model_names =
 753                  {
 754                          [0] = "Pentium 4 (Unknown)",
 755                          [1] = "Pentium 4 (Willamette)",
 756                          [2] = "Pentium 4 (Northwood)",
 757                          [4] = "Pentium 4 (Foster)",
 758                          [5] = "Pentium 4 (Foster)",
 759                  }
 760                },
 761        },
 762        .legacy_cache_size = intel_size_cache,
 763#endif
 764        .c_detect_tlb   = intel_detect_tlb,
 765        .c_early_init   = early_init_intel,
 766        .c_init         = init_intel,
 767        .c_bsp_resume   = intel_bsp_resume,
 768        .c_x86_vendor   = X86_VENDOR_INTEL,
 769};
 770
 771cpu_dev_register(intel_cpu_dev);
 772
 773