1#include <linux/kernel.h>
2
3#include <linux/string.h>
4#include <linux/bitops.h>
5#include <linux/smp.h>
6#include <linux/sched.h>
7#include <linux/thread_info.h>
8#include <linux/module.h>
9#include <linux/uaccess.h>
10
11#include <asm/processor.h>
12#include <asm/pgtable.h>
13#include <asm/msr.h>
14#include <asm/bugs.h>
15#include <asm/cpu.h>
16#include <asm/intel-family.h>
17#include <asm/hwcap2.h>
18#include <asm/elf.h>
19
20#ifdef CONFIG_X86_64
21#include <linux/topology.h>
22#endif
23
24#include "cpu.h"
25
26#ifdef CONFIG_X86_LOCAL_APIC
27#include <asm/mpspec.h>
28#include <asm/apic.h>
29#endif
30
31
32
33
34
35static int forcempx;
36
37static int __init forcempx_setup(char *__unused)
38{
39 forcempx = 1;
40
41 return 1;
42}
43__setup("intel-skd-046-workaround=disable", forcempx_setup);
44
45void check_mpx_erratum(struct cpuinfo_x86 *c)
46{
47 if (forcempx)
48 return;
49
50
51
52
53
54
55
56
57
58
59
60 if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) {
61 setup_clear_cpu_cap(X86_FEATURE_MPX);
62 pr_warn("x86/mpx: Disabling MPX since SMEP not present\n");
63 }
64}
65
66static bool ring3mwait_disabled __read_mostly;
67
68static int __init ring3mwait_disable(char *__unused)
69{
70 ring3mwait_disabled = true;
71 return 0;
72}
73__setup("ring3mwait=disable", ring3mwait_disable);
74
75static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
76{
77
78
79
80
81 if (c->x86 != 6)
82 return;
83 switch (c->x86_model) {
84 case INTEL_FAM6_XEON_PHI_KNL:
85 case INTEL_FAM6_XEON_PHI_KNM:
86 break;
87 default:
88 return;
89 }
90
91 if (ring3mwait_disabled) {
92 msr_clear_bit(MSR_MISC_FEATURE_ENABLES,
93 MSR_MISC_FEATURE_ENABLES_RING3MWAIT_BIT);
94 return;
95 }
96
97 msr_set_bit(MSR_MISC_FEATURE_ENABLES,
98 MSR_MISC_FEATURE_ENABLES_RING3MWAIT_BIT);
99
100 set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
101
102 if (c == &boot_cpu_data)
103 ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
104}
105
106static void early_init_intel(struct cpuinfo_x86 *c)
107{
108 u64 misc_enable;
109
110
111 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
112 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
113
114 if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
115 misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
116 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
117 c->cpuid_level = cpuid_eax(0);
118 get_cpu_cap(c);
119 }
120 }
121
122 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
123 (c->x86 == 0x6 && c->x86_model >= 0x0e))
124 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
125
126 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
127 unsigned lower_word;
128
129 wrmsr(MSR_IA32_UCODE_REV, 0, 0);
130
131 sync_core();
132 rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
133 }
134
135
136
137
138
139
140
141
142
143 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
144 c->microcode < 0x20e) {
145 printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
146 clear_cpu_cap(c, X86_FEATURE_PSE);
147 }
148
149#ifdef CONFIG_X86_64
150 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
151#else
152
153 if (c->x86 == 15 && c->x86_cache_alignment == 64)
154 c->x86_cache_alignment = 128;
155#endif
156
157
158 if (c->x86 == 0xF && c->x86_model == 0x3
159 && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
160 c->x86_phys_bits = 36;
161
162
163
164
165
166
167
168
169 if (c->x86_power & (1 << 8)) {
170 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
171 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
172 if (!check_tsc_unstable())
173 set_sched_clock_stable();
174 }
175
176
177 if (c->x86 == 6) {
178 switch (c->x86_model) {
179 case 0x27:
180 case 0x35:
181 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
182 break;
183 default:
184 break;
185 }
186 }
187
188
189
190
191
192
193
194
195
196
197
198 if (c->x86 == 6 && c->x86_model < 15)
199 clear_cpu_cap(c, X86_FEATURE_PAT);
200
201#ifdef CONFIG_KMEMCHECK
202
203
204
205
206
207
208
209
210 if (c->x86 == 15) {
211 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
212
213 if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
214 printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
215
216 misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
217 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
218 }
219 }
220#endif
221
222
223
224
225
226 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
227 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
228 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
229 printk(KERN_INFO "Disabled fast string operations\n");
230 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
231 setup_clear_cpu_cap(X86_FEATURE_ERMS);
232 }
233 }
234
235 if (c->cpuid_level >= 0x00000001) {
236 u32 eax, ebx, ecx, edx;
237
238 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
239
240
241
242
243
244 if (edx & (1U << 28))
245 c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
246 }
247
248 check_mpx_erratum(c);
249}
250
251#ifdef CONFIG_X86_32
252
253
254
255
256
257
258int ppro_with_ram_bug(void)
259{
260
261 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
262 boot_cpu_data.x86 == 6 &&
263 boot_cpu_data.x86_model == 1 &&
264 boot_cpu_data.x86_mask < 8) {
265 printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
266 return 1;
267 }
268 return 0;
269}
270
271static void intel_smp_check(struct cpuinfo_x86 *c)
272{
273
274 if (!c->cpu_index)
275 return;
276
277
278
279
280 if (c->x86 == 5 &&
281 c->x86_mask >= 1 && c->x86_mask <= 4 &&
282 c->x86_model <= 3) {
283
284
285
286 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
287 "with B stepping processors.\n");
288 }
289}
290
291static void intel_workarounds(struct cpuinfo_x86 *c)
292{
293 unsigned long lo, hi;
294
295#ifdef CONFIG_X86_F00F_BUG
296
297
298
299
300
301 clear_cpu_bug(c, X86_BUG_F00F);
302 if (!paravirt_enabled() && c->x86 == 5) {
303 static int f00f_workaround_enabled;
304
305 set_cpu_bug(c, X86_BUG_F00F);
306 if (!f00f_workaround_enabled) {
307 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
308 f00f_workaround_enabled = 1;
309 }
310 }
311#endif
312
313
314
315
316
317 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
318 clear_cpu_cap(c, X86_FEATURE_SEP);
319
320
321
322
323
324 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
325 rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
326 if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
327 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
328 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
329 lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
330 wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
331 }
332 }
333
334
335
336
337
338
339
340 if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
341 (c->x86_mask < 0x6 || c->x86_mask == 0xb))
342 set_cpu_cap(c, X86_FEATURE_11AP);
343
344
345#ifdef CONFIG_X86_INTEL_USERCOPY
346
347
348
349 switch (c->x86) {
350 case 4:
351 break;
352 case 5:
353 break;
354 case 6:
355 movsl_mask.mask = 7;
356 break;
357 case 15:
358 movsl_mask.mask = 7;
359 break;
360 }
361#endif
362
363#ifdef CONFIG_X86_NUMAQ
364 numaq_tsc_disable();
365#endif
366
367 intel_smp_check(c);
368}
369#else
370static void intel_workarounds(struct cpuinfo_x86 *c)
371{
372}
373#endif
374
375static void srat_detect_node(struct cpuinfo_x86 *c)
376{
377#ifdef CONFIG_NUMA
378 unsigned node;
379 int cpu = smp_processor_id();
380
381
382
383 node = numa_cpu_node(cpu);
384 if (node == NUMA_NO_NODE || !node_online(node)) {
385
386 node = cpu_to_node(cpu);
387 }
388 numa_set_node(cpu, node);
389#endif
390}
391
392
393
394
395static int intel_num_cpu_cores(struct cpuinfo_x86 *c)
396{
397 unsigned int eax, ebx, ecx, edx;
398
399 if (c->cpuid_level < 4)
400 return 1;
401
402
403 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
404 if (eax & 0x1f)
405 return (eax >> 26) + 1;
406 else
407 return 1;
408}
409
410static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
411{
412
413#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
414#define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
415#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
416#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
417#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
418#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
419
420 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
421
422 clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
423 clear_cpu_cap(c, X86_FEATURE_VNMI);
424 clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
425 clear_cpu_cap(c, X86_FEATURE_EPT);
426 clear_cpu_cap(c, X86_FEATURE_VPID);
427
428 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
429 msr_ctl = vmx_msr_high | vmx_msr_low;
430 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
431 set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
432 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
433 set_cpu_cap(c, X86_FEATURE_VNMI);
434 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
435 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
436 vmx_msr_low, vmx_msr_high);
437 msr_ctl2 = vmx_msr_high | vmx_msr_low;
438 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
439 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
440 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
441 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
442 set_cpu_cap(c, X86_FEATURE_EPT);
443 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
444 set_cpu_cap(c, X86_FEATURE_VPID);
445 }
446}
447
448static void init_intel(struct cpuinfo_x86 *c)
449{
450 unsigned int l2 = 0;
451
452 early_init_intel(c);
453
454 intel_workarounds(c);
455
456
457
458
459
460
461 detect_extended_topology(c);
462
463 l2 = init_intel_cacheinfo(c);
464 if (c->cpuid_level > 9) {
465 unsigned eax = cpuid_eax(10);
466
467 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
468 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
469 }
470
471 if (cpu_has_xmm2)
472 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
473 if (cpu_has_ds) {
474 unsigned int l1;
475 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
476 if (!(l1 & (1<<11)))
477 set_cpu_cap(c, X86_FEATURE_BTS);
478 if (!(l1 & (1<<12)))
479 set_cpu_cap(c, X86_FEATURE_PEBS);
480 }
481
482 if (c->x86 == 6 && cpu_has_clflush &&
483 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
484 set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
485
486#ifdef CONFIG_X86_64
487 if (c->x86 == 15)
488 c->x86_cache_alignment = c->x86_clflush_size * 2;
489 if (c->x86 == 6)
490 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
491#else
492
493
494
495
496
497 if (c->x86 == 6) {
498 char *p = NULL;
499
500 switch (c->x86_model) {
501 case 5:
502 if (l2 == 0)
503 p = "Celeron (Covington)";
504 else if (l2 == 256)
505 p = "Mobile Pentium II (Dixon)";
506 break;
507
508 case 6:
509 if (l2 == 128)
510 p = "Celeron (Mendocino)";
511 else if (c->x86_mask == 0 || c->x86_mask == 5)
512 p = "Celeron-A";
513 break;
514
515 case 8:
516 if (l2 == 128)
517 p = "Celeron (Coppermine)";
518 break;
519 }
520
521 if (p)
522 strcpy(c->x86_model_id, p);
523 }
524
525 if (c->x86 == 15)
526 set_cpu_cap(c, X86_FEATURE_P4);
527 if (c->x86 == 6)
528 set_cpu_cap(c, X86_FEATURE_P3);
529#endif
530
531 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
532
533
534
535
536 c->x86_max_cores = intel_num_cpu_cores(c);
537#ifdef CONFIG_X86_32
538 detect_ht(c);
539#endif
540 }
541
542
543 srat_detect_node(c);
544
545 if (cpu_has(c, X86_FEATURE_VMX))
546 detect_vmx_virtcap(c);
547
548
549
550
551
552 if (cpu_has(c, X86_FEATURE_EPB)) {
553 u64 epb;
554
555 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
556 if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) {
557 pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
558 pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
559 epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
560 wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
561 }
562 }
563
564 probe_xeon_phi_r3mwait(c);
565}
566
567#ifdef CONFIG_X86_32
568static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
569{
570
571
572
573
574
575
576 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
577 size = 256;
578 return size;
579}
580#endif
581
582#define TLB_INST_4K 0x01
583#define TLB_INST_4M 0x02
584#define TLB_INST_2M_4M 0x03
585
586#define TLB_INST_ALL 0x05
587#define TLB_INST_1G 0x06
588
589#define TLB_DATA_4K 0x11
590#define TLB_DATA_4M 0x12
591#define TLB_DATA_2M_4M 0x13
592#define TLB_DATA_4K_4M 0x14
593
594#define TLB_DATA_1G 0x16
595
596#define TLB_DATA0_4K 0x21
597#define TLB_DATA0_4M 0x22
598#define TLB_DATA0_2M_4M 0x23
599
600#define STLB_4K 0x41
601
602static const struct _tlb_table intel_tlb_table[] = {
603 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
604 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
605 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
606 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
607 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
608 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
609 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" },
610 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
611 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
612 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
613 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
614 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
615 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
616 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
617 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
618 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
619 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
620 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
621 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
622 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
623 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
624 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
625 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
626 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
627 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
628 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
629 { 0x00, 0, 0 }
630};
631
632static void intel_tlb_lookup(const unsigned char desc)
633{
634 unsigned char k;
635 if (desc == 0)
636 return;
637
638
639 for (k = 0; intel_tlb_table[k].descriptor != desc && \
640 intel_tlb_table[k].descriptor != 0; k++)
641 ;
642
643 if (intel_tlb_table[k].tlb_type == 0)
644 return;
645
646 switch (intel_tlb_table[k].tlb_type) {
647 case STLB_4K:
648 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
649 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
650 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
651 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
652 break;
653 case TLB_INST_ALL:
654 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
655 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
656 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
657 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
658 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
659 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
660 break;
661 case TLB_INST_4K:
662 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
663 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
664 break;
665 case TLB_INST_4M:
666 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
667 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
668 break;
669 case TLB_INST_2M_4M:
670 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
671 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
672 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
673 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
674 break;
675 case TLB_DATA_4K:
676 case TLB_DATA0_4K:
677 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
678 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
679 break;
680 case TLB_DATA_4M:
681 case TLB_DATA0_4M:
682 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
683 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
684 break;
685 case TLB_DATA_2M_4M:
686 case TLB_DATA0_2M_4M:
687 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
688 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
689 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
690 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
691 break;
692 case TLB_DATA_4K_4M:
693 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
694 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
695 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
696 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
697 break;
698 }
699}
700
701static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
702{
703 switch ((c->x86 << 8) + c->x86_model) {
704 case 0x60f:
705 case 0x616:
706 case 0x617:
707 case 0x61d:
708 tlb_flushall_shift = -1;
709 break;
710 case 0x63a:
711 tlb_flushall_shift = 2;
712 break;
713 case 0x61a:
714 case 0x61e:
715 case 0x625:
716 case 0x62c:
717 case 0x62e:
718 case 0x62f:
719 case 0x62a:
720 case 0x62d:
721 default:
722 tlb_flushall_shift = 6;
723 }
724}
725
726static void intel_detect_tlb(struct cpuinfo_x86 *c)
727{
728 int i, j, n;
729 unsigned int regs[4];
730 unsigned char *desc = (unsigned char *)regs;
731
732 if (c->cpuid_level < 2)
733 return;
734
735
736 n = cpuid_eax(2) & 0xFF;
737
738 for (i = 0 ; i < n ; i++) {
739 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
740
741
742 for (j = 0 ; j < 3 ; j++)
743 if (regs[j] & (1 << 31))
744 regs[j] = 0;
745
746
747 for (j = 1 ; j < 16 ; j++)
748 intel_tlb_lookup(desc[j]);
749 }
750 intel_tlb_flushall_shift_set(c);
751}
752
753static const struct cpu_dev intel_cpu_dev = {
754 .c_vendor = "Intel",
755 .c_ident = { "GenuineIntel" },
756#ifdef CONFIG_X86_32
757 .c_models = {
758 { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
759 {
760 [0] = "486 DX-25/33",
761 [1] = "486 DX-50",
762 [2] = "486 SX",
763 [3] = "486 DX/2",
764 [4] = "486 SL",
765 [5] = "486 SX/2",
766 [7] = "486 DX/2-WB",
767 [8] = "486 DX/4",
768 [9] = "486 DX/4-WB"
769 }
770 },
771 { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
772 {
773 [0] = "Pentium 60/66 A-step",
774 [1] = "Pentium 60/66",
775 [2] = "Pentium 75 - 200",
776 [3] = "OverDrive PODP5V83",
777 [4] = "Pentium MMX",
778 [7] = "Mobile Pentium 75 - 200",
779 [8] = "Mobile Pentium MMX"
780 }
781 },
782 { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
783 {
784 [0] = "Pentium Pro A-step",
785 [1] = "Pentium Pro",
786 [3] = "Pentium II (Klamath)",
787 [4] = "Pentium II (Deschutes)",
788 [5] = "Pentium II (Deschutes)",
789 [6] = "Mobile Pentium II",
790 [7] = "Pentium III (Katmai)",
791 [8] = "Pentium III (Coppermine)",
792 [10] = "Pentium III (Cascades)",
793 [11] = "Pentium III (Tualatin)",
794 }
795 },
796 { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
797 {
798 [0] = "Pentium 4 (Unknown)",
799 [1] = "Pentium 4 (Willamette)",
800 [2] = "Pentium 4 (Northwood)",
801 [4] = "Pentium 4 (Foster)",
802 [5] = "Pentium 4 (Foster)",
803 }
804 },
805 },
806 .c_size_cache = intel_size_cache,
807#endif
808 .c_detect_tlb = intel_detect_tlb,
809 .c_early_init = early_init_intel,
810 .c_init = init_intel,
811 .c_x86_vendor = X86_VENDOR_INTEL,
812};
813
814cpu_dev_register(intel_cpu_dev);
815
816