1#include <linux/init.h>
2#include <linux/bitops.h>
3#include <linux/mm.h>
4
5#include <linux/io.h>
6#include <asm/processor.h>
7#include <asm/apic.h>
8#include <asm/cpu.h>
9#include <asm/pci-direct.h>
10
11#ifdef CONFIG_X86_64
12# include <asm/numa_64.h>
13# include <asm/mmconfig.h>
14# include <asm/cacheflush.h>
15#endif
16
17#include "cpu.h"
18
19#ifdef CONFIG_X86_32
20
21
22
23
24
25
26
27
28
29
30
31
32
33extern void vide(void);
34__asm__(".align 4\nvide: ret");
35
36static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
37{
38
39
40
41
42
43
44#define CBAR (0xfffc)
45#define CBAR_ENB (0x80000000)
46#define CBAR_KEY (0X000000CB)
47 if (c->x86_model == 9 || c->x86_model == 10) {
48 if (inl(CBAR) & CBAR_ENB)
49 outl(0 | CBAR_KEY, CBAR);
50 }
51}
52
53
54static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
55{
56 u32 l, h;
57 int mbytes = num_physpages >> (20-PAGE_SHIFT);
58
59 if (c->x86_model < 6) {
60
61 if (c->x86_model == 0) {
62 clear_cpu_cap(c, X86_FEATURE_APIC);
63 set_cpu_cap(c, X86_FEATURE_PGE);
64 }
65 return;
66 }
67
68 if (c->x86_model == 6 && c->x86_mask == 1) {
69 const int K6_BUG_LOOP = 1000000;
70 int n;
71 void (*f_vide)(void);
72 unsigned long d, d2;
73
74 printk(KERN_INFO "AMD K6 stepping B detected - ");
75
76
77
78
79
80
81 n = K6_BUG_LOOP;
82 f_vide = vide;
83 rdtscl(d);
84 while (n--)
85 f_vide();
86 rdtscl(d2);
87 d = d2-d;
88
89 if (d > 20*K6_BUG_LOOP)
90 printk(KERN_CONT
91 "system stability may be impaired when more than 32 MB are used.\n");
92 else
93 printk(KERN_CONT "probably OK (after B9730xxxx).\n");
94 printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
95 }
96
97
98 if (c->x86_model < 8 ||
99 (c->x86_model == 8 && c->x86_mask < 8)) {
100
101 if (mbytes > 508)
102 mbytes = 508;
103
104 rdmsr(MSR_K6_WHCR, l, h);
105 if ((l&0x0000FFFF) == 0) {
106 unsigned long flags;
107 l = (1<<0)|((mbytes/4)<<1);
108 local_irq_save(flags);
109 wbinvd();
110 wrmsr(MSR_K6_WHCR, l, h);
111 local_irq_restore(flags);
112 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
113 mbytes);
114 }
115 return;
116 }
117
118 if ((c->x86_model == 8 && c->x86_mask > 7) ||
119 c->x86_model == 9 || c->x86_model == 13) {
120
121
122 if (mbytes > 4092)
123 mbytes = 4092;
124
125 rdmsr(MSR_K6_WHCR, l, h);
126 if ((l&0xFFFF0000) == 0) {
127 unsigned long flags;
128 l = ((mbytes>>2)<<22)|(1<<16);
129 local_irq_save(flags);
130 wbinvd();
131 wrmsr(MSR_K6_WHCR, l, h);
132 local_irq_restore(flags);
133 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
134 mbytes);
135 }
136
137 return;
138 }
139
140 if (c->x86_model == 10) {
141
142
143 return;
144 }
145}
146
147static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
148{
149#ifdef CONFIG_SMP
150
151 if (c->cpu_index == boot_cpu_id)
152 return;
153
154
155
156
157
158
159 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
160 (c->x86_mask == 1)))
161 goto valid_k7;
162
163
164 if ((c->x86_model == 7) && (c->x86_mask == 0))
165 goto valid_k7;
166
167
168
169
170
171
172
173
174 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
175 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
176 (c->x86_model > 7))
177 if (cpu_has_mp)
178 goto valid_k7;
179
180
181
182
183
184
185
186 WARN_ONCE(1, "WARNING: This combination of AMD"
187 " processors is not suitable for SMP.\n");
188 if (!test_taint(TAINT_UNSAFE_SMP))
189 add_taint(TAINT_UNSAFE_SMP);
190
191valid_k7:
192 ;
193#endif
194}
195
196static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
197{
198 u32 l, h;
199
200
201
202
203
204
205 if (c->x86_model >= 6 && c->x86_model <= 10) {
206 if (!cpu_has(c, X86_FEATURE_XMM)) {
207 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
208 rdmsr(MSR_K7_HWCR, l, h);
209 l &= ~0x00008000;
210 wrmsr(MSR_K7_HWCR, l, h);
211 set_cpu_cap(c, X86_FEATURE_XMM);
212 }
213 }
214
215
216
217
218
219
220 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
221 rdmsr(MSR_K7_CLK_CTL, l, h);
222 if ((l & 0xfff00000) != 0x20000000) {
223 printk(KERN_INFO
224 "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
225 l, ((l & 0x000fffff)|0x20000000));
226 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
227 }
228 }
229
230 set_cpu_cap(c, X86_FEATURE_K7);
231
232 amd_k7_smp_check(c);
233}
234#endif
235
236#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
237static int __cpuinit nearby_node(int apicid)
238{
239 int i, node;
240
241 for (i = apicid - 1; i >= 0; i--) {
242 node = apicid_to_node[i];
243 if (node != NUMA_NO_NODE && node_online(node))
244 return node;
245 }
246 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
247 node = apicid_to_node[i];
248 if (node != NUMA_NO_NODE && node_online(node))
249 return node;
250 }
251 return first_node(node_online_map);
252}
253#endif
254
255
256
257
258
259
260
261#ifdef CONFIG_X86_HT
262static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c)
263{
264#ifdef CONFIG_PCI
265 u32 t, cpn;
266 u8 n, n_id;
267 int cpu = smp_processor_id();
268
269
270 if (cpu_has(c, X86_FEATURE_AMD_DCM))
271 return;
272
273
274 t = read_pci_config(0, 24, 3, 0xe8);
275 if (!(t & (1 << 29)))
276 return;
277
278 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
279
280
281 cpn = c->x86_max_cores >> 1;
282
283
284 n = c->phys_proc_id << 1;
285
286
287
288
289
290 t = read_pci_config(0, 24 + n, 3, 0xe8);
291 n = (t>>30) & 0x3;
292 if (n == 0) {
293 if (c->cpu_core_id < cpn)
294 n_id = 0;
295 else
296 n_id = 1;
297 } else {
298 if (c->cpu_core_id < cpn)
299 n_id = 1;
300 else
301 n_id = 0;
302 }
303
304
305 per_cpu(cpu_llc_id, cpu) = (c->phys_proc_id << 1) + n_id;
306
307
308 c->cpu_core_id = c->cpu_core_id % cpn;
309#endif
310}
311#endif
312
313
314
315
316
317static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
318{
319#ifdef CONFIG_X86_HT
320 unsigned bits;
321 int cpu = smp_processor_id();
322
323 bits = c->x86_coreid_bits;
324
325 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
326
327 c->phys_proc_id = c->initial_apicid >> bits;
328
329 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
330
331 if ((c->x86 == 0x10) && (c->x86_model == 9))
332 amd_fixup_dcm(c);
333#endif
334}
335
336int amd_get_nb_id(int cpu)
337{
338 int id = 0;
339#ifdef CONFIG_SMP
340 id = per_cpu(cpu_llc_id, cpu);
341#endif
342 return id;
343}
344EXPORT_SYMBOL_GPL(amd_get_nb_id);
345
346static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
347{
348#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
349 int cpu = smp_processor_id();
350 int node;
351 unsigned apicid = c->apicid;
352
353 node = per_cpu(cpu_llc_id, cpu);
354
355 if (apicid_to_node[apicid] != NUMA_NO_NODE)
356 node = apicid_to_node[apicid];
357 if (!node_online(node)) {
358
359
360
361
362
363
364
365
366
367
368 int ht_nodeid = c->initial_apicid;
369
370 if (ht_nodeid >= 0 &&
371 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
372 node = apicid_to_node[ht_nodeid];
373
374 if (!node_online(node))
375 node = nearby_node(apicid);
376 }
377 numa_set_node(cpu, node);
378
379 printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
380#endif
381}
382
383static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
384{
385#ifdef CONFIG_X86_HT
386 unsigned bits, ecx;
387
388
389 if (c->extended_cpuid_level < 0x80000008)
390 return;
391
392 ecx = cpuid_ecx(0x80000008);
393
394 c->x86_max_cores = (ecx & 0xff) + 1;
395
396
397 bits = (ecx >> 12) & 0xF;
398
399
400 if (bits == 0) {
401 while ((1 << bits) < c->x86_max_cores)
402 bits++;
403 }
404
405 c->x86_coreid_bits = bits;
406#endif
407}
408
409static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
410{
411 early_init_amd_mc(c);
412
413
414
415
416
417 if (c->x86_power & (1 << 8)) {
418 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
419 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
420 }
421
422#ifdef CONFIG_X86_64
423 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
424#else
425
426 if (c->x86 == 5)
427 if (c->x86_model == 13 || c->x86_model == 9 ||
428 (c->x86_model == 8 && c->x86_mask >= 8))
429 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
430#endif
431#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
432
433 if (cpu_has_apic && c->x86 >= 0xf) {
434 unsigned int val;
435 val = read_pci_config(0, 24, 0, 0x68);
436 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
437 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
438 }
439#endif
440}
441
442static void __cpuinit init_amd(struct cpuinfo_x86 *c)
443{
444#ifdef CONFIG_SMP
445 unsigned long long value;
446
447
448
449
450
451
452
453
454 if (c->x86 == 0xf) {
455 rdmsrl(MSR_K7_HWCR, value);
456 value |= 1 << 6;
457 wrmsrl(MSR_K7_HWCR, value);
458 }
459#endif
460
461 early_init_amd(c);
462
463
464
465
466
467 clear_cpu_cap(c, 0*32+31);
468
469#ifdef CONFIG_X86_64
470
471 if (c->x86 == 0xf) {
472 u32 level;
473
474 level = cpuid_eax(1);
475 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
476 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
477
478
479
480
481
482
483 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
484 u64 val;
485
486 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
487 if (!rdmsrl_amd_safe(0xc001100d, &val)) {
488 val &= ~(1ULL << 32);
489 wrmsrl_amd_safe(0xc001100d, val);
490 }
491 }
492
493 }
494 if (c->x86 == 0x10 || c->x86 == 0x11)
495 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
496
497
498 c->apicid = hard_smp_processor_id();
499#else
500
501
502
503
504
505
506
507 switch (c->x86) {
508 case 4:
509 init_amd_k5(c);
510 break;
511 case 5:
512 init_amd_k6(c);
513 break;
514 case 6:
515 init_amd_k7(c);
516 break;
517 }
518
519
520 if (c->x86 < 6)
521 clear_cpu_cap(c, X86_FEATURE_MCE);
522#endif
523
524
525 if (c->x86 >= 6)
526 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
527
528 if (!c->x86_model_id[0]) {
529 switch (c->x86) {
530 case 0xf:
531
532
533 strcpy(c->x86_model_id, "Hammer");
534 break;
535 }
536 }
537
538 display_cacheinfo(c);
539
540
541 if (c->extended_cpuid_level >= 0x80000008) {
542 amd_detect_cmp(c);
543 srat_detect_node(c);
544 }
545
546#ifdef CONFIG_X86_32
547 detect_ht(c);
548#endif
549
550 if (c->extended_cpuid_level >= 0x80000006) {
551 if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000))
552 num_cache_leaves = 4;
553 else
554 num_cache_leaves = 3;
555 }
556
557 if (c->x86 >= 0xf && c->x86 <= 0x11)
558 set_cpu_cap(c, X86_FEATURE_K8);
559
560 if (cpu_has_xmm2) {
561
562 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
563 }
564
565#ifdef CONFIG_X86_64
566 if (c->x86 == 0x10) {
567
568 if (c == &boot_cpu_data)
569 check_enable_amd_mmconf_dmi();
570
571 fam10h_check_enable_mmcfg();
572 }
573
574 if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) {
575 unsigned long long tseg;
576
577
578
579
580
581
582 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
583 printk(KERN_DEBUG "tseg: %010llx\n", tseg);
584 if ((tseg>>PMD_SHIFT) <
585 (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
586 ((tseg>>PMD_SHIFT) <
587 (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
588 (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
589 set_memory_4k((unsigned long)__va(tseg), 1);
590 }
591 }
592#endif
593}
594
595#ifdef CONFIG_X86_32
596static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
597 unsigned int size)
598{
599
600 if ((c->x86 == 6)) {
601
602 if (c->x86_model == 3 && c->x86_mask == 0)
603 size = 64;
604
605 if (c->x86_model == 4 &&
606 (c->x86_mask == 0 || c->x86_mask == 1))
607 size = 256;
608 }
609 return size;
610}
611#endif
612
613static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
614 .c_vendor = "AMD",
615 .c_ident = { "AuthenticAMD" },
616#ifdef CONFIG_X86_32
617 .c_models = {
618 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
619 {
620 [3] = "486 DX/2",
621 [7] = "486 DX/2-WB",
622 [8] = "486 DX/4",
623 [9] = "486 DX/4-WB",
624 [14] = "Am5x86-WT",
625 [15] = "Am5x86-WB"
626 }
627 },
628 },
629 .c_size_cache = amd_size_cache,
630#endif
631 .c_early_init = early_init_amd,
632 .c_init = init_amd,
633 .c_x86_vendor = X86_VENDOR_AMD,
634};
635
636cpu_dev_register(amd_cpu_dev);
637