1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <common.h>
23#include <command.h>
24#include <dm.h>
25#include <errno.h>
26#include <malloc.h>
27#include <syscon.h>
28#include <asm/control_regs.h>
29#include <asm/coreboot_tables.h>
30#include <asm/cpu.h>
31#include <asm/lapic.h>
32#include <asm/microcode.h>
33#include <asm/mp.h>
34#include <asm/mrccache.h>
35#include <asm/msr.h>
36#include <asm/mtrr.h>
37#include <asm/post.h>
38#include <asm/processor.h>
39#include <asm/processor-flags.h>
40#include <asm/interrupt.h>
41#include <asm/tables.h>
42#include <linux/compiler.h>
43
44DECLARE_GLOBAL_DATA_PTR;
45
46
47
48
49
50#define GDT_ENTRY(flags, base, limit) \
51 ((((base) & 0xff000000ULL) << (56-24)) | \
52 (((flags) & 0x0000f0ffULL) << 40) | \
53 (((limit) & 0x000f0000ULL) << (48-16)) | \
54 (((base) & 0x00ffffffULL) << 16) | \
55 (((limit) & 0x0000ffffULL)))
56
57struct gdt_ptr {
58 u16 len;
59 u32 ptr;
60} __packed;
61
62struct cpu_device_id {
63 unsigned vendor;
64 unsigned device;
65};
66
67struct cpuinfo_x86 {
68 uint8_t x86;
69 uint8_t x86_vendor;
70 uint8_t x86_model;
71 uint8_t x86_mask;
72};
73
74
75
76
77
78static const struct {
79 int vendor;
80 const char *name;
81} x86_vendors[] = {
82 { X86_VENDOR_INTEL, "GenuineIntel", },
83 { X86_VENDOR_CYRIX, "CyrixInstead", },
84 { X86_VENDOR_AMD, "AuthenticAMD", },
85 { X86_VENDOR_UMC, "UMC UMC UMC ", },
86 { X86_VENDOR_NEXGEN, "NexGenDriven", },
87 { X86_VENDOR_CENTAUR, "CentaurHauls", },
88 { X86_VENDOR_RISE, "RiseRiseRise", },
89 { X86_VENDOR_TRANSMETA, "GenuineTMx86", },
90 { X86_VENDOR_TRANSMETA, "TransmetaCPU", },
91 { X86_VENDOR_NSC, "Geode by NSC", },
92 { X86_VENDOR_SIS, "SiS SiS SiS ", },
93};
94
95static const char *const x86_vendor_name[] = {
96 [X86_VENDOR_INTEL] = "Intel",
97 [X86_VENDOR_CYRIX] = "Cyrix",
98 [X86_VENDOR_AMD] = "AMD",
99 [X86_VENDOR_UMC] = "UMC",
100 [X86_VENDOR_NEXGEN] = "NexGen",
101 [X86_VENDOR_CENTAUR] = "Centaur",
102 [X86_VENDOR_RISE] = "Rise",
103 [X86_VENDOR_TRANSMETA] = "Transmeta",
104 [X86_VENDOR_NSC] = "NSC",
105 [X86_VENDOR_SIS] = "SiS",
106};
107
108static void load_ds(u32 segment)
109{
110 asm volatile("movl %0, %%ds" : : "r" (segment * X86_GDT_ENTRY_SIZE));
111}
112
113static void load_es(u32 segment)
114{
115 asm volatile("movl %0, %%es" : : "r" (segment * X86_GDT_ENTRY_SIZE));
116}
117
118static void load_fs(u32 segment)
119{
120 asm volatile("movl %0, %%fs" : : "r" (segment * X86_GDT_ENTRY_SIZE));
121}
122
123static void load_gs(u32 segment)
124{
125 asm volatile("movl %0, %%gs" : : "r" (segment * X86_GDT_ENTRY_SIZE));
126}
127
128static void load_ss(u32 segment)
129{
130 asm volatile("movl %0, %%ss" : : "r" (segment * X86_GDT_ENTRY_SIZE));
131}
132
133static void load_gdt(const u64 *boot_gdt, u16 num_entries)
134{
135 struct gdt_ptr gdt;
136
137 gdt.len = (num_entries * X86_GDT_ENTRY_SIZE) - 1;
138 gdt.ptr = (u32)boot_gdt;
139
140 asm volatile("lgdtl %0\n" : : "m" (gdt));
141}
142
143void arch_setup_gd(gd_t *new_gd)
144{
145 u64 *gdt_addr;
146
147 gdt_addr = new_gd->arch.gdt;
148
149
150
151
152
153
154 gdt_addr[X86_GDT_ENTRY_UNUSED] = GDT_ENTRY(0xc09b, 0, 0xfffff);
155 gdt_addr[X86_GDT_ENTRY_32BIT_CS] = GDT_ENTRY(0xc09b, 0, 0xfffff);
156
157
158 gdt_addr[X86_GDT_ENTRY_32BIT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff);
159
160
161 new_gd->arch.gd_addr = new_gd;
162 gdt_addr[X86_GDT_ENTRY_32BIT_FS] = GDT_ENTRY(0xc093,
163 (ulong)&new_gd->arch.gd_addr, 0xfffff);
164
165
166 gdt_addr[X86_GDT_ENTRY_16BIT_CS] = GDT_ENTRY(0x009b, 0, 0x0ffff);
167
168
169 gdt_addr[X86_GDT_ENTRY_16BIT_DS] = GDT_ENTRY(0x0093, 0, 0x0ffff);
170
171 gdt_addr[X86_GDT_ENTRY_16BIT_FLAT_CS] = GDT_ENTRY(0x809b, 0, 0xfffff);
172 gdt_addr[X86_GDT_ENTRY_16BIT_FLAT_DS] = GDT_ENTRY(0x8093, 0, 0xfffff);
173
174 load_gdt(gdt_addr, X86_GDT_NUM_ENTRIES);
175 load_ds(X86_GDT_ENTRY_32BIT_DS);
176 load_es(X86_GDT_ENTRY_32BIT_DS);
177 load_gs(X86_GDT_ENTRY_32BIT_DS);
178 load_ss(X86_GDT_ENTRY_32BIT_DS);
179 load_fs(X86_GDT_ENTRY_32BIT_FS);
180}
181
182#ifdef CONFIG_HAVE_FSP
183
184
185
186
187
188
189
190
191void setup_fsp_gdt(void)
192{
193 load_gdt((const u64 *)(gdt_rom + CONFIG_RESET_SEG_START), 4);
194 load_ds(X86_GDT_ENTRY_32BIT_DS);
195 load_ss(X86_GDT_ENTRY_32BIT_DS);
196 load_es(X86_GDT_ENTRY_32BIT_DS);
197 load_fs(X86_GDT_ENTRY_32BIT_DS);
198 load_gs(X86_GDT_ENTRY_32BIT_DS);
199}
200#endif
201
202int __weak x86_cleanup_before_linux(void)
203{
204#ifdef CONFIG_BOOTSTAGE_STASH
205 bootstage_stash((void *)CONFIG_BOOTSTAGE_STASH_ADDR,
206 CONFIG_BOOTSTAGE_STASH_SIZE);
207#endif
208
209 return 0;
210}
211
212
213
214
215
216
217
218
219
220
221
222static inline int test_cyrix_52div(void)
223{
224 unsigned int test;
225
226 __asm__ __volatile__(
227 "sahf\n\t"
228 "div %b2\n\t"
229 "lahf"
230 : "=a" (test)
231 : "0" (5), "q" (2)
232 : "cc");
233
234
235 return (unsigned char) (test >> 8) == 0x02;
236}
237
238
239
240
241
242
243static int deep_magic_nexgen_probe(void)
244{
245 int ret;
246
247 __asm__ __volatile__ (
248 " movw $0x5555, %%ax\n"
249 " xorw %%dx,%%dx\n"
250 " movw $2, %%cx\n"
251 " divw %%cx\n"
252 " movl $0, %%eax\n"
253 " jnz 1f\n"
254 " movl $1, %%eax\n"
255 "1:\n"
256 : "=a" (ret) : : "cx", "dx");
257 return ret;
258}
259
260static bool has_cpuid(void)
261{
262 return flag_is_changeable_p(X86_EFLAGS_ID);
263}
264
265static bool has_mtrr(void)
266{
267 return cpuid_edx(0x00000001) & (1 << 12) ? true : false;
268}
269
270static int build_vendor_name(char *vendor_name)
271{
272 struct cpuid_result result;
273 result = cpuid(0x00000000);
274 unsigned int *name_as_ints = (unsigned int *)vendor_name;
275
276 name_as_ints[0] = result.ebx;
277 name_as_ints[1] = result.edx;
278 name_as_ints[2] = result.ecx;
279
280 return result.eax;
281}
282
283static void identify_cpu(struct cpu_device_id *cpu)
284{
285 char vendor_name[16];
286 int i;
287
288 vendor_name[0] = '\0';
289 cpu->device = 0;
290
291
292 if (!has_cpuid()) {
293
294 if (flag_is_changeable_p(X86_EFLAGS_AC))
295 cpu->device = 0x00000400;
296 else
297 cpu->device = 0x00000300;
298 if ((cpu->device == 0x00000400) && test_cyrix_52div()) {
299 memcpy(vendor_name, "CyrixInstead", 13);
300
301 }
302
303 else if (deep_magic_nexgen_probe())
304 memcpy(vendor_name, "NexGenDriven", 13);
305 }
306 if (has_cpuid()) {
307 int cpuid_level;
308
309 cpuid_level = build_vendor_name(vendor_name);
310 vendor_name[12] = '\0';
311
312
313 if (cpuid_level >= 0x00000001) {
314 cpu->device = cpuid_eax(0x00000001);
315 } else {
316
317 cpu->device = 0x00000400;
318 }
319 }
320 cpu->vendor = X86_VENDOR_UNKNOWN;
321 for (i = 0; i < ARRAY_SIZE(x86_vendors); i++) {
322 if (memcmp(vendor_name, x86_vendors[i].name, 12) == 0) {
323 cpu->vendor = x86_vendors[i].vendor;
324 break;
325 }
326 }
327}
328
329static inline void get_fms(struct cpuinfo_x86 *c, uint32_t tfms)
330{
331 c->x86 = (tfms >> 8) & 0xf;
332 c->x86_model = (tfms >> 4) & 0xf;
333 c->x86_mask = tfms & 0xf;
334 if (c->x86 == 0xf)
335 c->x86 += (tfms >> 20) & 0xff;
336 if (c->x86 >= 0x6)
337 c->x86_model += ((tfms >> 16) & 0xF) << 4;
338}
339
340u32 cpu_get_family_model(void)
341{
342 return gd->arch.x86_device & 0x0fff0ff0;
343}
344
345u32 cpu_get_stepping(void)
346{
347 return gd->arch.x86_mask;
348}
349
350int x86_cpu_init_f(void)
351{
352 const u32 em_rst = ~X86_CR0_EM;
353 const u32 mp_ne_set = X86_CR0_MP | X86_CR0_NE;
354
355 if (ll_boot_init()) {
356
357 asm ("fninit\n" \
358 "movl %%cr0, %%eax\n" \
359 "andl %0, %%eax\n" \
360 "orl %1, %%eax\n" \
361 "movl %%eax, %%cr0\n" \
362 : : "i" (em_rst), "i" (mp_ne_set) : "eax");
363 }
364
365
366 if (has_cpuid()) {
367 struct cpu_device_id cpu;
368 struct cpuinfo_x86 c;
369
370 identify_cpu(&cpu);
371 get_fms(&c, cpu.device);
372 gd->arch.x86 = c.x86;
373 gd->arch.x86_vendor = cpu.vendor;
374 gd->arch.x86_model = c.x86_model;
375 gd->arch.x86_mask = c.x86_mask;
376 gd->arch.x86_device = cpu.device;
377
378 gd->arch.has_mtrr = has_mtrr();
379 }
380
381 gd->pci_ram_top = 0x80000000U;
382
383
384 if (gd->arch.has_mtrr) {
385 u64 mtrr_cap;
386
387 mtrr_cap = native_read_msr(MTRR_CAP_MSR);
388 if (mtrr_cap & MTRR_CAP_FIX) {
389
390 native_write_msr(MTRR_FIX_16K_A0000_MSR,
391 MTRR_FIX_TYPE(MTRR_TYPE_UNCACHEABLE),
392 MTRR_FIX_TYPE(MTRR_TYPE_UNCACHEABLE));
393
394
395
396
397
398 native_write_msr(MTRR_FIX_4K_C0000_MSR,
399 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
400 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
401 native_write_msr(MTRR_FIX_4K_C8000_MSR,
402 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
403 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
404 native_write_msr(MTRR_FIX_4K_D0000_MSR,
405 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
406 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
407 native_write_msr(MTRR_FIX_4K_D8000_MSR,
408 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
409 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
410
411
412 msr_setbits_64(MTRR_DEF_TYPE_MSR, MTRR_DEF_TYPE_FIX_EN);
413 }
414 }
415
416#ifdef CONFIG_I8254_TIMER
417
418 i8254_init();
419#endif
420
421 return 0;
422}
423
424void x86_enable_caches(void)
425{
426 unsigned long cr0;
427
428 cr0 = read_cr0();
429 cr0 &= ~(X86_CR0_NW | X86_CR0_CD);
430 write_cr0(cr0);
431 wbinvd();
432}
433void enable_caches(void) __attribute__((weak, alias("x86_enable_caches")));
434
435void x86_disable_caches(void)
436{
437 unsigned long cr0;
438
439 cr0 = read_cr0();
440 cr0 |= X86_CR0_NW | X86_CR0_CD;
441 wbinvd();
442 write_cr0(cr0);
443 wbinvd();
444}
445void disable_caches(void) __attribute__((weak, alias("x86_disable_caches")));
446
447int x86_init_cache(void)
448{
449 enable_caches();
450
451 return 0;
452}
453int init_cache(void) __attribute__((weak, alias("x86_init_cache")));
454
455int do_reset(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
456{
457 printf("resetting ...\n");
458
459
460 udelay(50000);
461 disable_interrupts();
462 reset_cpu(0);
463
464
465 return 0;
466}
467
468void flush_cache(unsigned long dummy1, unsigned long dummy2)
469{
470 asm("wbinvd\n");
471}
472
473__weak void reset_cpu(ulong addr)
474{
475
476 outb(SYS_RST | RST_CPU, IO_PORT_RESET);
477 for (;;)
478 cpu_hlt();
479}
480
481void x86_full_reset(void)
482{
483 outb(FULL_RST | SYS_RST | RST_CPU, IO_PORT_RESET);
484}
485
486int dcache_status(void)
487{
488 return !(read_cr0() & X86_CR0_CD);
489}
490
491
492void flush_dcache_range(unsigned long start, unsigned long stop)
493{
494}
495
496void invalidate_dcache_range(unsigned long start, unsigned long stop)
497{
498}
499
500void dcache_enable(void)
501{
502 enable_caches();
503}
504
505void dcache_disable(void)
506{
507 disable_caches();
508}
509
510void icache_enable(void)
511{
512}
513
514void icache_disable(void)
515{
516}
517
518int icache_status(void)
519{
520 return 1;
521}
522
523void cpu_enable_paging_pae(ulong cr3)
524{
525 __asm__ __volatile__(
526
527 "movl %0, %%cr3\n"
528
529 "movl %%cr4, %%eax\n"
530 "orl $0x00000020, %%eax\n"
531 "movl %%eax, %%cr4\n"
532
533 "movl %%cr0, %%eax\n"
534 "orl $0x80000000, %%eax\n"
535 "movl %%eax, %%cr0\n"
536 :
537 : "r" (cr3)
538 : "eax");
539}
540
541void cpu_disable_paging_pae(void)
542{
543
544 __asm__ __volatile__ (
545
546 "movl %%cr0, %%eax\n"
547 "andl $0x7fffffff, %%eax\n"
548 "movl %%eax, %%cr0\n"
549
550 "movl %%cr4, %%eax\n"
551 "andl $0xffffffdf, %%eax\n"
552 "movl %%eax, %%cr4\n"
553 :
554 :
555 : "eax");
556}
557
558static bool can_detect_long_mode(void)
559{
560 return cpuid_eax(0x80000000) > 0x80000000UL;
561}
562
563static bool has_long_mode(void)
564{
565 return cpuid_edx(0x80000001) & (1 << 29) ? true : false;
566}
567
568int cpu_has_64bit(void)
569{
570 return has_cpuid() && can_detect_long_mode() &&
571 has_long_mode();
572}
573
574const char *cpu_vendor_name(int vendor)
575{
576 const char *name;
577 name = "<invalid cpu vendor>";
578 if ((vendor < (ARRAY_SIZE(x86_vendor_name))) &&
579 (x86_vendor_name[vendor] != 0))
580 name = x86_vendor_name[vendor];
581
582 return name;
583}
584
585char *cpu_get_name(char *name)
586{
587 unsigned int *name_as_ints = (unsigned int *)name;
588 struct cpuid_result regs;
589 char *ptr;
590 int i;
591
592
593 for (i = 0; i < 3; i++) {
594 regs = cpuid(0x80000002 + i);
595 name_as_ints[i * 4 + 0] = regs.eax;
596 name_as_ints[i * 4 + 1] = regs.ebx;
597 name_as_ints[i * 4 + 2] = regs.ecx;
598 name_as_ints[i * 4 + 3] = regs.edx;
599 }
600 name[CPU_MAX_NAME_LEN - 1] = '\0';
601
602
603 ptr = name;
604 while (*ptr == ' ')
605 ptr++;
606
607 return ptr;
608}
609
610int default_print_cpuinfo(void)
611{
612 printf("CPU: %s, vendor %s, device %xh\n",
613 cpu_has_64bit() ? "x86_64" : "x86",
614 cpu_vendor_name(gd->arch.x86_vendor), gd->arch.x86_device);
615
616 return 0;
617}
618
619#define PAGETABLE_SIZE (6 * 4096)
620
621
622
623
624
625
626static void build_pagetable(uint32_t *pgtable)
627{
628 uint i;
629
630 memset(pgtable, '\0', PAGETABLE_SIZE);
631
632
633 pgtable[0] = (uint32_t)&pgtable[1024] + 7;
634
635
636 for (i = 0; i < 4; i++) {
637 pgtable[1024 + i * 2] = (uint32_t)&pgtable[2048] +
638 0x1000 * i + 7;
639 }
640
641
642 for (i = 0; i < 2048; i++)
643 pgtable[2048 + i * 2] = 0x183 + (i << 21UL);
644}
645
646int cpu_jump_to_64bit(ulong setup_base, ulong target)
647{
648 uint32_t *pgtable;
649
650 pgtable = memalign(4096, PAGETABLE_SIZE);
651 if (!pgtable)
652 return -ENOMEM;
653
654 build_pagetable(pgtable);
655 cpu_call64((ulong)pgtable, setup_base, target);
656 free(pgtable);
657
658 return -EFAULT;
659}
660
661void show_boot_progress(int val)
662{
663 outb(val, POST_PORT);
664}
665
666#ifndef CONFIG_SYS_COREBOOT
667
668
669
670
671__weak void board_final_cleanup(void)
672{
673}
674
675int last_stage_init(void)
676{
677 write_tables();
678
679 board_final_cleanup();
680
681 return 0;
682}
683#endif
684
685#ifdef CONFIG_SMP
686static int enable_smis(struct udevice *cpu, void *unused)
687{
688 return 0;
689}
690
691static struct mp_flight_record mp_steps[] = {
692 MP_FR_BLOCK_APS(mp_init_cpu, NULL, mp_init_cpu, NULL),
693
694 MP_FR_BLOCK_APS(NULL, NULL, enable_smis, NULL),
695};
696
697static int x86_mp_init(void)
698{
699 struct mp_params mp_params;
700
701 mp_params.parallel_microcode_load = 0,
702 mp_params.flight_plan = &mp_steps[0];
703 mp_params.num_records = ARRAY_SIZE(mp_steps);
704 mp_params.microcode_pointer = 0;
705
706 if (mp_init(&mp_params)) {
707 printf("Warning: MP init failure\n");
708 return -EIO;
709 }
710
711 return 0;
712}
713#endif
714
715static int x86_init_cpus(void)
716{
717#ifdef CONFIG_SMP
718 debug("Init additional CPUs\n");
719 x86_mp_init();
720#else
721 struct udevice *dev;
722
723
724
725
726
727
728 uclass_first_device(UCLASS_CPU, &dev);
729#endif
730
731 return 0;
732}
733
734int cpu_init_r(void)
735{
736 struct udevice *dev;
737 int ret;
738
739 if (!ll_boot_init())
740 return 0;
741
742 ret = x86_init_cpus();
743 if (ret)
744 return ret;
745
746
747
748
749
750
751 uclass_first_device(UCLASS_NORTHBRIDGE, &dev);
752 uclass_first_device(UCLASS_PCH, &dev);
753 uclass_first_device(UCLASS_LPC, &dev);
754
755
756 ret = syscon_get_by_driver_data(X86_SYSCON_PINCONF, &dev);
757 debug("%s, pinctrl=%p, ret=%d\n", __func__, dev, ret);
758
759 return 0;
760}
761
762#ifndef CONFIG_EFI_STUB
763int reserve_arch(void)
764{
765#ifdef CONFIG_ENABLE_MRC_CACHE
766 mrccache_reserve();
767#endif
768
769#ifdef CONFIG_SEABIOS
770 high_table_reserve();
771#endif
772
773 return 0;
774}
775#endif
776