1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <common.h>
23#include <command.h>
24#include <dm.h>
25#include <errno.h>
26#include <malloc.h>
27#include <asm/control_regs.h>
28#include <asm/cpu.h>
29#include <asm/lapic.h>
30#include <asm/mp.h>
31#include <asm/msr.h>
32#include <asm/mtrr.h>
33#include <asm/post.h>
34#include <asm/processor.h>
35#include <asm/processor-flags.h>
36#include <asm/interrupt.h>
37#include <asm/tables.h>
38#include <linux/compiler.h>
39
40DECLARE_GLOBAL_DATA_PTR;
41
42
43
44
45
46#define GDT_ENTRY(flags, base, limit) \
47 ((((base) & 0xff000000ULL) << (56-24)) | \
48 (((flags) & 0x0000f0ffULL) << 40) | \
49 (((limit) & 0x000f0000ULL) << (48-16)) | \
50 (((base) & 0x00ffffffULL) << 16) | \
51 (((limit) & 0x0000ffffULL)))
52
53struct gdt_ptr {
54 u16 len;
55 u32 ptr;
56} __packed;
57
58struct cpu_device_id {
59 unsigned vendor;
60 unsigned device;
61};
62
63struct cpuinfo_x86 {
64 uint8_t x86;
65 uint8_t x86_vendor;
66 uint8_t x86_model;
67 uint8_t x86_mask;
68};
69
70
71
72
73
74static struct {
75 int vendor;
76 const char *name;
77} x86_vendors[] = {
78 { X86_VENDOR_INTEL, "GenuineIntel", },
79 { X86_VENDOR_CYRIX, "CyrixInstead", },
80 { X86_VENDOR_AMD, "AuthenticAMD", },
81 { X86_VENDOR_UMC, "UMC UMC UMC ", },
82 { X86_VENDOR_NEXGEN, "NexGenDriven", },
83 { X86_VENDOR_CENTAUR, "CentaurHauls", },
84 { X86_VENDOR_RISE, "RiseRiseRise", },
85 { X86_VENDOR_TRANSMETA, "GenuineTMx86", },
86 { X86_VENDOR_TRANSMETA, "TransmetaCPU", },
87 { X86_VENDOR_NSC, "Geode by NSC", },
88 { X86_VENDOR_SIS, "SiS SiS SiS ", },
89};
90
91static const char *const x86_vendor_name[] = {
92 [X86_VENDOR_INTEL] = "Intel",
93 [X86_VENDOR_CYRIX] = "Cyrix",
94 [X86_VENDOR_AMD] = "AMD",
95 [X86_VENDOR_UMC] = "UMC",
96 [X86_VENDOR_NEXGEN] = "NexGen",
97 [X86_VENDOR_CENTAUR] = "Centaur",
98 [X86_VENDOR_RISE] = "Rise",
99 [X86_VENDOR_TRANSMETA] = "Transmeta",
100 [X86_VENDOR_NSC] = "NSC",
101 [X86_VENDOR_SIS] = "SiS",
102};
103
104static void load_ds(u32 segment)
105{
106 asm volatile("movl %0, %%ds" : : "r" (segment * X86_GDT_ENTRY_SIZE));
107}
108
109static void load_es(u32 segment)
110{
111 asm volatile("movl %0, %%es" : : "r" (segment * X86_GDT_ENTRY_SIZE));
112}
113
114static void load_fs(u32 segment)
115{
116 asm volatile("movl %0, %%fs" : : "r" (segment * X86_GDT_ENTRY_SIZE));
117}
118
119static void load_gs(u32 segment)
120{
121 asm volatile("movl %0, %%gs" : : "r" (segment * X86_GDT_ENTRY_SIZE));
122}
123
124static void load_ss(u32 segment)
125{
126 asm volatile("movl %0, %%ss" : : "r" (segment * X86_GDT_ENTRY_SIZE));
127}
128
129static void load_gdt(const u64 *boot_gdt, u16 num_entries)
130{
131 struct gdt_ptr gdt;
132
133 gdt.len = (num_entries * X86_GDT_ENTRY_SIZE) - 1;
134 gdt.ptr = (u32)boot_gdt;
135
136 asm volatile("lgdtl %0\n" : : "m" (gdt));
137}
138
139void arch_setup_gd(gd_t *new_gd)
140{
141 u64 *gdt_addr;
142
143 gdt_addr = new_gd->arch.gdt;
144
145
146 gdt_addr[X86_GDT_ENTRY_32BIT_CS] = GDT_ENTRY(0xc09b, 0, 0xfffff);
147
148
149 gdt_addr[X86_GDT_ENTRY_32BIT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff);
150
151
152 new_gd->arch.gd_addr = new_gd;
153 gdt_addr[X86_GDT_ENTRY_32BIT_FS] = GDT_ENTRY(0xc093,
154 (ulong)&new_gd->arch.gd_addr, 0xfffff);
155
156
157 gdt_addr[X86_GDT_ENTRY_16BIT_CS] = GDT_ENTRY(0x009b, 0, 0x0ffff);
158
159
160 gdt_addr[X86_GDT_ENTRY_16BIT_DS] = GDT_ENTRY(0x0093, 0, 0x0ffff);
161
162 gdt_addr[X86_GDT_ENTRY_16BIT_FLAT_CS] = GDT_ENTRY(0x809b, 0, 0xfffff);
163 gdt_addr[X86_GDT_ENTRY_16BIT_FLAT_DS] = GDT_ENTRY(0x8093, 0, 0xfffff);
164
165 load_gdt(gdt_addr, X86_GDT_NUM_ENTRIES);
166 load_ds(X86_GDT_ENTRY_32BIT_DS);
167 load_es(X86_GDT_ENTRY_32BIT_DS);
168 load_gs(X86_GDT_ENTRY_32BIT_DS);
169 load_ss(X86_GDT_ENTRY_32BIT_DS);
170 load_fs(X86_GDT_ENTRY_32BIT_FS);
171}
172
173#ifdef CONFIG_HAVE_FSP
174
175
176
177
178
179
180
181
182void setup_fsp_gdt(void)
183{
184 load_gdt((const u64 *)(gdt_rom + CONFIG_RESET_SEG_START), 4);
185 load_ds(X86_GDT_ENTRY_32BIT_DS);
186 load_ss(X86_GDT_ENTRY_32BIT_DS);
187 load_es(X86_GDT_ENTRY_32BIT_DS);
188 load_fs(X86_GDT_ENTRY_32BIT_DS);
189 load_gs(X86_GDT_ENTRY_32BIT_DS);
190}
191#endif
192
193int __weak x86_cleanup_before_linux(void)
194{
195#ifdef CONFIG_BOOTSTAGE_STASH
196 bootstage_stash((void *)CONFIG_BOOTSTAGE_STASH_ADDR,
197 CONFIG_BOOTSTAGE_STASH_SIZE);
198#endif
199
200 return 0;
201}
202
203
204
205
206
207
208
209
210
211
212
213static inline int test_cyrix_52div(void)
214{
215 unsigned int test;
216
217 __asm__ __volatile__(
218 "sahf\n\t"
219 "div %b2\n\t"
220 "lahf"
221 : "=a" (test)
222 : "0" (5), "q" (2)
223 : "cc");
224
225
226 return (unsigned char) (test >> 8) == 0x02;
227}
228
229
230
231
232
233
234static int deep_magic_nexgen_probe(void)
235{
236 int ret;
237
238 __asm__ __volatile__ (
239 " movw $0x5555, %%ax\n"
240 " xorw %%dx,%%dx\n"
241 " movw $2, %%cx\n"
242 " divw %%cx\n"
243 " movl $0, %%eax\n"
244 " jnz 1f\n"
245 " movl $1, %%eax\n"
246 "1:\n"
247 : "=a" (ret) : : "cx", "dx");
248 return ret;
249}
250
251static bool has_cpuid(void)
252{
253 return flag_is_changeable_p(X86_EFLAGS_ID);
254}
255
256static bool has_mtrr(void)
257{
258 return cpuid_edx(0x00000001) & (1 << 12) ? true : false;
259}
260
261static int build_vendor_name(char *vendor_name)
262{
263 struct cpuid_result result;
264 result = cpuid(0x00000000);
265 unsigned int *name_as_ints = (unsigned int *)vendor_name;
266
267 name_as_ints[0] = result.ebx;
268 name_as_ints[1] = result.edx;
269 name_as_ints[2] = result.ecx;
270
271 return result.eax;
272}
273
274static void identify_cpu(struct cpu_device_id *cpu)
275{
276 char vendor_name[16];
277 int i;
278
279 vendor_name[0] = '\0';
280 cpu->device = 0;
281
282
283 if (!has_cpuid()) {
284
285 if (flag_is_changeable_p(X86_EFLAGS_AC))
286 cpu->device = 0x00000400;
287 else
288 cpu->device = 0x00000300;
289 if ((cpu->device == 0x00000400) && test_cyrix_52div()) {
290 memcpy(vendor_name, "CyrixInstead", 13);
291
292 }
293
294 else if (deep_magic_nexgen_probe())
295 memcpy(vendor_name, "NexGenDriven", 13);
296 }
297 if (has_cpuid()) {
298 int cpuid_level;
299
300 cpuid_level = build_vendor_name(vendor_name);
301 vendor_name[12] = '\0';
302
303
304 if (cpuid_level >= 0x00000001) {
305 cpu->device = cpuid_eax(0x00000001);
306 } else {
307
308 cpu->device = 0x00000400;
309 }
310 }
311 cpu->vendor = X86_VENDOR_UNKNOWN;
312 for (i = 0; i < ARRAY_SIZE(x86_vendors); i++) {
313 if (memcmp(vendor_name, x86_vendors[i].name, 12) == 0) {
314 cpu->vendor = x86_vendors[i].vendor;
315 break;
316 }
317 }
318}
319
320static inline void get_fms(struct cpuinfo_x86 *c, uint32_t tfms)
321{
322 c->x86 = (tfms >> 8) & 0xf;
323 c->x86_model = (tfms >> 4) & 0xf;
324 c->x86_mask = tfms & 0xf;
325 if (c->x86 == 0xf)
326 c->x86 += (tfms >> 20) & 0xff;
327 if (c->x86 >= 0x6)
328 c->x86_model += ((tfms >> 16) & 0xF) << 4;
329}
330
331int x86_cpu_init_f(void)
332{
333 const u32 em_rst = ~X86_CR0_EM;
334 const u32 mp_ne_set = X86_CR0_MP | X86_CR0_NE;
335
336 if (ll_boot_init()) {
337
338 asm ("fninit\n" \
339 "movl %%cr0, %%eax\n" \
340 "andl %0, %%eax\n" \
341 "orl %1, %%eax\n" \
342 "movl %%eax, %%cr0\n" \
343 : : "i" (em_rst), "i" (mp_ne_set) : "eax");
344 }
345
346
347 if (has_cpuid()) {
348 struct cpu_device_id cpu;
349 struct cpuinfo_x86 c;
350
351 identify_cpu(&cpu);
352 get_fms(&c, cpu.device);
353 gd->arch.x86 = c.x86;
354 gd->arch.x86_vendor = cpu.vendor;
355 gd->arch.x86_model = c.x86_model;
356 gd->arch.x86_mask = c.x86_mask;
357 gd->arch.x86_device = cpu.device;
358
359 gd->arch.has_mtrr = has_mtrr();
360 }
361
362 gd->pci_ram_top = 0x80000000U;
363
364
365 if (gd->arch.has_mtrr) {
366 u64 mtrr_cap;
367
368 mtrr_cap = native_read_msr(MTRR_CAP_MSR);
369 if (mtrr_cap & MTRR_CAP_FIX) {
370
371 native_write_msr(MTRR_FIX_16K_A0000_MSR,
372 MTRR_FIX_TYPE(MTRR_TYPE_UNCACHEABLE),
373 MTRR_FIX_TYPE(MTRR_TYPE_UNCACHEABLE));
374
375
376
377
378
379 native_write_msr(MTRR_FIX_4K_C0000_MSR,
380 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
381 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
382 native_write_msr(MTRR_FIX_4K_C8000_MSR,
383 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
384 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
385 native_write_msr(MTRR_FIX_4K_D0000_MSR,
386 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
387 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
388 native_write_msr(MTRR_FIX_4K_D8000_MSR,
389 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK),
390 MTRR_FIX_TYPE(MTRR_TYPE_WRBACK));
391
392
393 msr_setbits_64(MTRR_DEF_TYPE_MSR, MTRR_DEF_TYPE_FIX_EN);
394 }
395 }
396
397 return 0;
398}
399
400void x86_enable_caches(void)
401{
402 unsigned long cr0;
403
404 cr0 = read_cr0();
405 cr0 &= ~(X86_CR0_NW | X86_CR0_CD);
406 write_cr0(cr0);
407 wbinvd();
408}
409void enable_caches(void) __attribute__((weak, alias("x86_enable_caches")));
410
411void x86_disable_caches(void)
412{
413 unsigned long cr0;
414
415 cr0 = read_cr0();
416 cr0 |= X86_CR0_NW | X86_CR0_CD;
417 wbinvd();
418 write_cr0(cr0);
419 wbinvd();
420}
421void disable_caches(void) __attribute__((weak, alias("x86_disable_caches")));
422
423int x86_init_cache(void)
424{
425 enable_caches();
426
427 return 0;
428}
429int init_cache(void) __attribute__((weak, alias("x86_init_cache")));
430
431int do_reset(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
432{
433 printf("resetting ...\n");
434
435
436 udelay(50000);
437 disable_interrupts();
438 reset_cpu(0);
439
440
441 return 0;
442}
443
444void flush_cache(unsigned long dummy1, unsigned long dummy2)
445{
446 asm("wbinvd\n");
447}
448
449__weak void reset_cpu(ulong addr)
450{
451
452 outb(SYS_RST | RST_CPU, PORT_RESET);
453 for (;;)
454 cpu_hlt();
455}
456
457void x86_full_reset(void)
458{
459 outb(FULL_RST | SYS_RST | RST_CPU, PORT_RESET);
460}
461
462int dcache_status(void)
463{
464 return !(read_cr0() & X86_CR0_CD);
465}
466
467
468void flush_dcache_range(unsigned long start, unsigned long stop)
469{
470}
471
472void invalidate_dcache_range(unsigned long start, unsigned long stop)
473{
474}
475
476void dcache_enable(void)
477{
478 enable_caches();
479}
480
481void dcache_disable(void)
482{
483 disable_caches();
484}
485
486void icache_enable(void)
487{
488}
489
490void icache_disable(void)
491{
492}
493
494int icache_status(void)
495{
496 return 1;
497}
498
499void cpu_enable_paging_pae(ulong cr3)
500{
501 __asm__ __volatile__(
502
503 "movl %0, %%cr3\n"
504
505 "movl %%cr4, %%eax\n"
506 "orl $0x00000020, %%eax\n"
507 "movl %%eax, %%cr4\n"
508
509 "movl %%cr0, %%eax\n"
510 "orl $0x80000000, %%eax\n"
511 "movl %%eax, %%cr0\n"
512 :
513 : "r" (cr3)
514 : "eax");
515}
516
517void cpu_disable_paging_pae(void)
518{
519
520 __asm__ __volatile__ (
521
522 "movl %%cr0, %%eax\n"
523 "andl $0x7fffffff, %%eax\n"
524 "movl %%eax, %%cr0\n"
525
526 "movl %%cr4, %%eax\n"
527 "andl $0xffffffdf, %%eax\n"
528 "movl %%eax, %%cr4\n"
529 :
530 :
531 : "eax");
532}
533
534static bool can_detect_long_mode(void)
535{
536 return cpuid_eax(0x80000000) > 0x80000000UL;
537}
538
539static bool has_long_mode(void)
540{
541 return cpuid_edx(0x80000001) & (1 << 29) ? true : false;
542}
543
544int cpu_has_64bit(void)
545{
546 return has_cpuid() && can_detect_long_mode() &&
547 has_long_mode();
548}
549
550const char *cpu_vendor_name(int vendor)
551{
552 const char *name;
553 name = "<invalid cpu vendor>";
554 if ((vendor < (ARRAY_SIZE(x86_vendor_name))) &&
555 (x86_vendor_name[vendor] != 0))
556 name = x86_vendor_name[vendor];
557
558 return name;
559}
560
561char *cpu_get_name(char *name)
562{
563 unsigned int *name_as_ints = (unsigned int *)name;
564 struct cpuid_result regs;
565 char *ptr;
566 int i;
567
568
569 for (i = 0; i < 3; i++) {
570 regs = cpuid(0x80000002 + i);
571 name_as_ints[i * 4 + 0] = regs.eax;
572 name_as_ints[i * 4 + 1] = regs.ebx;
573 name_as_ints[i * 4 + 2] = regs.ecx;
574 name_as_ints[i * 4 + 3] = regs.edx;
575 }
576 name[CPU_MAX_NAME_LEN - 1] = '\0';
577
578
579 ptr = name;
580 while (*ptr == ' ')
581 ptr++;
582
583 return ptr;
584}
585
586int default_print_cpuinfo(void)
587{
588 printf("CPU: %s, vendor %s, device %xh\n",
589 cpu_has_64bit() ? "x86_64" : "x86",
590 cpu_vendor_name(gd->arch.x86_vendor), gd->arch.x86_device);
591
592 return 0;
593}
594
595#define PAGETABLE_SIZE (6 * 4096)
596
597
598
599
600
601
602static void build_pagetable(uint32_t *pgtable)
603{
604 uint i;
605
606 memset(pgtable, '\0', PAGETABLE_SIZE);
607
608
609 pgtable[0] = (uint32_t)&pgtable[1024] + 7;
610
611
612 for (i = 0; i < 4; i++) {
613 pgtable[1024 + i * 2] = (uint32_t)&pgtable[2048] +
614 0x1000 * i + 7;
615 }
616
617
618 for (i = 0; i < 2048; i++)
619 pgtable[2048 + i * 2] = 0x183 + (i << 21UL);
620}
621
622int cpu_jump_to_64bit(ulong setup_base, ulong target)
623{
624 uint32_t *pgtable;
625
626 pgtable = memalign(4096, PAGETABLE_SIZE);
627 if (!pgtable)
628 return -ENOMEM;
629
630 build_pagetable(pgtable);
631 cpu_call64((ulong)pgtable, setup_base, target);
632 free(pgtable);
633
634 return -EFAULT;
635}
636
637void show_boot_progress(int val)
638{
639#if MIN_PORT80_KCLOCKS_DELAY
640
641
642
643
644
645 if (!gd->arch.tsc_prev) {
646 gd->arch.tsc_base_kclocks = rdtsc() / 1000;
647 gd->arch.tsc_prev = 0;
648 } else {
649 uint32_t now;
650
651 do {
652 now = rdtsc() / 1000 - gd->arch.tsc_base_kclocks;
653 } while (now < (gd->arch.tsc_prev + MIN_PORT80_KCLOCKS_DELAY));
654 gd->arch.tsc_prev = now;
655 }
656#endif
657 outb(val, POST_PORT);
658}
659
660#ifndef CONFIG_SYS_COREBOOT
661int last_stage_init(void)
662{
663 write_tables();
664
665 return 0;
666}
667#endif
668
669#ifdef CONFIG_SMP
670static int enable_smis(struct udevice *cpu, void *unused)
671{
672 return 0;
673}
674
675static struct mp_flight_record mp_steps[] = {
676 MP_FR_BLOCK_APS(mp_init_cpu, NULL, mp_init_cpu, NULL),
677
678 MP_FR_BLOCK_APS(NULL, NULL, enable_smis, NULL),
679};
680
681static int x86_mp_init(void)
682{
683 struct mp_params mp_params;
684
685 mp_params.parallel_microcode_load = 0,
686 mp_params.flight_plan = &mp_steps[0];
687 mp_params.num_records = ARRAY_SIZE(mp_steps);
688 mp_params.microcode_pointer = 0;
689
690 if (mp_init(&mp_params)) {
691 printf("Warning: MP init failure\n");
692 return -EIO;
693 }
694
695 return 0;
696}
697#endif
698
699__weak int x86_init_cpus(void)
700{
701#ifdef CONFIG_SMP
702 debug("Init additional CPUs\n");
703 x86_mp_init();
704#else
705 struct udevice *dev;
706
707
708
709
710
711
712 uclass_first_device(UCLASS_CPU, &dev);
713#endif
714
715 return 0;
716}
717
718int cpu_init_r(void)
719{
720 if (ll_boot_init())
721 return x86_init_cpus();
722
723 return 0;
724}
725