1
2
3
4
5
6
7
8#include <common.h>
9#include <cpu.h>
10#include <dm.h>
11#include <errno.h>
12#include <log.h>
13#include <malloc.h>
14#include <qfw.h>
15#include <asm/atomic.h>
16#include <asm/cpu.h>
17#include <asm/global_data.h>
18#include <asm/interrupt.h>
19#include <asm/io.h>
20#include <asm/lapic.h>
21#include <asm/microcode.h>
22#include <asm/mp.h>
23#include <asm/msr.h>
24#include <asm/mtrr.h>
25#include <asm/processor.h>
26#include <asm/sipi.h>
27#include <dm/device-internal.h>
28#include <dm/uclass-internal.h>
29#include <dm/lists.h>
30#include <dm/root.h>
31#include <linux/delay.h>
32#include <linux/linkage.h>
33
34DECLARE_GLOBAL_DATA_PTR;
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117struct __packed saved_msr {
118 uint32_t index;
119 uint32_t lo;
120 uint32_t hi;
121};
122
123
124
125
126
127
128
129struct mp_flight_plan {
130 int num_records;
131 struct mp_flight_record *records;
132};
133
134
135
136
137
138
139
140
141
142
143struct mp_callback {
144 mp_run_func func;
145 void *arg;
146 int logical_cpu_number;
147};
148
149
150static struct mp_flight_plan mp_info;
151
152
153
154
155
156
157
158
159
160
161static struct mp_callback **ap_callbacks;
162
163static inline void barrier_wait(atomic_t *b)
164{
165 while (atomic_read(b) == 0)
166 asm("pause");
167 mfence();
168}
169
170static inline void release_barrier(atomic_t *b)
171{
172 mfence();
173 atomic_set(b, 1);
174}
175
176static inline void stop_this_cpu(void)
177{
178
179 for (;;)
180 cpu_hlt();
181}
182
183
184static int wait_for_aps(atomic_t *val, int target, int total_delay,
185 int delay_step)
186{
187 int timeout = 0;
188 int delayed = 0;
189
190 while (atomic_read(val) != target) {
191 udelay(delay_step);
192 delayed += delay_step;
193 if (delayed >= total_delay) {
194 timeout = 1;
195 break;
196 }
197 }
198
199 return timeout;
200}
201
202static void ap_do_flight_plan(struct udevice *cpu)
203{
204 int i;
205
206 for (i = 0; i < mp_info.num_records; i++) {
207 struct mp_flight_record *rec = &mp_info.records[i];
208
209 atomic_inc(&rec->cpus_entered);
210 barrier_wait(&rec->barrier);
211
212 if (rec->ap_call != NULL)
213 rec->ap_call(cpu, rec->ap_arg);
214 }
215}
216
217static int find_cpu_by_apic_id(int apic_id, struct udevice **devp)
218{
219 struct udevice *dev;
220
221 *devp = NULL;
222 for (uclass_find_first_device(UCLASS_CPU, &dev);
223 dev;
224 uclass_find_next_device(&dev)) {
225 struct cpu_plat *plat = dev_get_parent_plat(dev);
226
227 if (plat->cpu_id == apic_id) {
228 *devp = dev;
229 return 0;
230 }
231 }
232
233 return -ENOENT;
234}
235
236
237
238
239
240static void ap_init(unsigned int cpu_index)
241{
242 struct udevice *dev;
243 int apic_id;
244 int ret;
245
246
247 enable_lapic();
248
249 apic_id = lapicid();
250 ret = find_cpu_by_apic_id(apic_id, &dev);
251 if (ret) {
252 debug("Unknown CPU apic_id %x\n", apic_id);
253 goto done;
254 }
255
256 debug("AP: slot %d apic_id %x, dev %s\n", cpu_index, apic_id,
257 dev ? dev->name : "(apic_id not found)");
258
259
260
261
262
263 ap_do_flight_plan(dev);
264
265done:
266 stop_this_cpu();
267}
268
269static const unsigned int fixed_mtrrs[NUM_FIXED_MTRRS] = {
270 MTRR_FIX_64K_00000_MSR, MTRR_FIX_16K_80000_MSR, MTRR_FIX_16K_A0000_MSR,
271 MTRR_FIX_4K_C0000_MSR, MTRR_FIX_4K_C8000_MSR, MTRR_FIX_4K_D0000_MSR,
272 MTRR_FIX_4K_D8000_MSR, MTRR_FIX_4K_E0000_MSR, MTRR_FIX_4K_E8000_MSR,
273 MTRR_FIX_4K_F0000_MSR, MTRR_FIX_4K_F8000_MSR,
274};
275
276static inline struct saved_msr *save_msr(int index, struct saved_msr *entry)
277{
278 msr_t msr;
279
280 msr = msr_read(index);
281 entry->index = index;
282 entry->lo = msr.lo;
283 entry->hi = msr.hi;
284
285
286 entry++;
287 return entry;
288}
289
290static int save_bsp_msrs(char *start, int size)
291{
292 int msr_count;
293 int num_var_mtrrs;
294 struct saved_msr *msr_entry;
295 int i;
296 msr_t msr;
297
298
299 msr = msr_read(MTRR_CAP_MSR);
300 num_var_mtrrs = msr.lo & 0xff;
301
302
303 msr_count = 2 * num_var_mtrrs + NUM_FIXED_MTRRS + 1;
304
305 if ((msr_count * sizeof(struct saved_msr)) > size) {
306 printf("Cannot mirror all %d msrs\n", msr_count);
307 return -ENOSPC;
308 }
309
310 msr_entry = (void *)start;
311 for (i = 0; i < NUM_FIXED_MTRRS; i++)
312 msr_entry = save_msr(fixed_mtrrs[i], msr_entry);
313
314 for (i = 0; i < num_var_mtrrs; i++) {
315 msr_entry = save_msr(MTRR_PHYS_BASE_MSR(i), msr_entry);
316 msr_entry = save_msr(MTRR_PHYS_MASK_MSR(i), msr_entry);
317 }
318
319 msr_entry = save_msr(MTRR_DEF_TYPE_MSR, msr_entry);
320
321 return msr_count;
322}
323
324static int load_sipi_vector(atomic_t **ap_countp, int num_cpus)
325{
326 struct sipi_params_16bit *params16;
327 struct sipi_params *params;
328 static char msr_save[512];
329 char *stack;
330 ulong addr;
331 int code_len;
332 int size;
333 int ret;
334
335
336 code_len = ap_start16_code_end - ap_start16;
337 debug("Copying SIPI code to %x: %d bytes\n", AP_DEFAULT_BASE,
338 code_len);
339 memcpy((void *)AP_DEFAULT_BASE, ap_start16, code_len);
340
341 addr = AP_DEFAULT_BASE + (ulong)sipi_params_16bit - (ulong)ap_start16;
342 params16 = (struct sipi_params_16bit *)addr;
343 params16->ap_start = (uint32_t)ap_start;
344 params16->gdt = (uint32_t)gd->arch.gdt;
345 params16->gdt_limit = X86_GDT_SIZE - 1;
346 debug("gdt = %x, gdt_limit = %x\n", params16->gdt, params16->gdt_limit);
347
348 params = (struct sipi_params *)sipi_params;
349 debug("SIPI 32-bit params at %p\n", params);
350 params->idt_ptr = (uint32_t)x86_get_idt();
351
352 params->stack_size = CONFIG_AP_STACK_SIZE;
353 size = params->stack_size * num_cpus;
354 stack = memalign(4096, size);
355 if (!stack)
356 return -ENOMEM;
357 params->stack_top = (u32)(stack + size);
358#if !defined(CONFIG_QEMU) && !defined(CONFIG_HAVE_FSP) && \
359 !defined(CONFIG_INTEL_MID)
360 params->microcode_ptr = ucode_base;
361 debug("Microcode at %x\n", params->microcode_ptr);
362#endif
363 params->msr_table_ptr = (u32)msr_save;
364 ret = save_bsp_msrs(msr_save, sizeof(msr_save));
365 if (ret < 0)
366 return ret;
367 params->msr_count = ret;
368
369 params->c_handler = (uint32_t)&ap_init;
370
371 *ap_countp = ¶ms->ap_count;
372 atomic_set(*ap_countp, 0);
373 debug("SIPI vector is ready\n");
374
375 return 0;
376}
377
378static int check_cpu_devices(int expected_cpus)
379{
380 int i;
381
382 for (i = 0; i < expected_cpus; i++) {
383 struct udevice *dev;
384 int ret;
385
386 ret = uclass_find_device(UCLASS_CPU, i, &dev);
387 if (ret) {
388 debug("Cannot find CPU %d in device tree\n", i);
389 return ret;
390 }
391 }
392
393 return 0;
394}
395
396
397static int apic_wait_timeout(int total_delay, const char *msg)
398{
399 int total = 0;
400
401 if (!(lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY))
402 return 0;
403
404 debug("Waiting for %s...", msg);
405 while (lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY) {
406 udelay(50);
407 total += 50;
408 if (total >= total_delay) {
409 debug("timed out: aborting\n");
410 return -ETIMEDOUT;
411 }
412 }
413 debug("done\n");
414
415 return 0;
416}
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431static int start_aps(int num_aps, atomic_t *ap_count)
432{
433 int sipi_vector;
434
435 const int max_vector_loc = ((1 << 20) - (1 << 12)) >> 12;
436
437 if (num_aps == 0)
438 return 0;
439
440
441 sipi_vector = AP_DEFAULT_BASE >> 12;
442
443 if (sipi_vector > max_vector_loc) {
444 printf("SIPI vector too large! 0x%08x\n",
445 sipi_vector);
446 return -ENOSPC;
447 }
448
449 debug("Attempting to start %d APs\n", num_aps);
450
451 if (apic_wait_timeout(1000, "ICR not to be busy"))
452 return -ETIMEDOUT;
453
454
455 lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
456 lapic_write(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
457 LAPIC_DM_INIT);
458 debug("Waiting for 10ms after sending INIT\n");
459 mdelay(10);
460
461
462 if (apic_wait_timeout(1000, "ICR not to be busy"))
463 return -ETIMEDOUT;
464
465 lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
466 lapic_write(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
467 LAPIC_DM_STARTUP | sipi_vector);
468 if (apic_wait_timeout(10000, "first SIPI to complete"))
469 return -ETIMEDOUT;
470
471
472 wait_for_aps(ap_count, num_aps, 200, 15);
473
474
475 if (apic_wait_timeout(1000, "ICR not to be busy"))
476 return -ETIMEDOUT;
477
478 lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
479 lapic_write(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
480 LAPIC_DM_STARTUP | sipi_vector);
481 if (apic_wait_timeout(10000, "second SIPI to complete"))
482 return -ETIMEDOUT;
483
484
485 if (wait_for_aps(ap_count, num_aps, 10000, 50)) {
486 debug("Not all APs checked in: %d/%d\n",
487 atomic_read(ap_count), num_aps);
488 return -EIO;
489 }
490
491 return 0;
492}
493
494
495
496
497
498
499
500
501
502
503
504static int bsp_do_flight_plan(struct udevice *cpu, struct mp_flight_plan *plan,
505 int num_aps)
506{
507 int i;
508 int ret = 0;
509 const int timeout_us = 100000;
510 const int step_us = 100;
511
512 for (i = 0; i < plan->num_records; i++) {
513 struct mp_flight_record *rec = &plan->records[i];
514
515
516 if (atomic_read(&rec->barrier) == 0) {
517
518 if (wait_for_aps(&rec->cpus_entered, num_aps,
519 timeout_us, step_us)) {
520 debug("MP record %d timeout\n", i);
521 ret = -ETIMEDOUT;
522 }
523 }
524
525 if (rec->bsp_call != NULL)
526 rec->bsp_call(cpu, rec->bsp_arg);
527
528 release_barrier(&rec->barrier);
529 }
530
531 return ret;
532}
533
534
535
536
537
538
539
540
541
542static int get_bsp(struct udevice **devp, int *cpu_countp)
543{
544 char processor_name[CPU_MAX_NAME_LEN];
545 struct udevice *dev;
546 int apic_id;
547 int ret;
548
549 cpu_get_name(processor_name);
550 debug("CPU: %s\n", processor_name);
551
552 apic_id = lapicid();
553 ret = find_cpu_by_apic_id(apic_id, &dev);
554 if (ret < 0) {
555 printf("Cannot find boot CPU, APIC ID %d\n", apic_id);
556 return ret;
557 }
558 ret = cpu_get_count(dev);
559 if (ret < 0)
560 return log_msg_ret("count", ret);
561 if (devp)
562 *devp = dev;
563 if (cpu_countp)
564 *cpu_countp = ret;
565
566 return dev_seq(dev) >= 0 ? dev_seq(dev) : 0;
567}
568
569
570
571
572
573
574
575
576
577
578static struct mp_callback *read_callback(struct mp_callback **slot)
579{
580 dmb();
581
582 return *slot;
583}
584
585
586
587
588
589
590
591
592
593
594static void store_callback(struct mp_callback **slot, struct mp_callback *val)
595{
596 *slot = val;
597 dmb();
598}
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615static int run_ap_work(struct mp_callback *callback, struct udevice *bsp,
616 int num_cpus, uint expire_ms)
617{
618 int cur_cpu = dev_seq(bsp);
619 int num_aps = num_cpus - 1;
620 int cpus_accepted;
621 ulong start;
622 int i;
623
624 if (!IS_ENABLED(CONFIG_SMP_AP_WORK)) {
625 printf("APs already parked. CONFIG_SMP_AP_WORK not enabled\n");
626 return -ENOTSUPP;
627 }
628
629
630 for (i = 0; i < num_cpus; i++) {
631 if (cur_cpu != i)
632 store_callback(&ap_callbacks[i], callback);
633 }
634 mfence();
635
636
637 start = get_timer(0);
638
639 do {
640 mdelay(1);
641 cpus_accepted = 0;
642
643 for (i = 0; i < num_cpus; i++) {
644 if (cur_cpu == i)
645 continue;
646 if (!read_callback(&ap_callbacks[i]))
647 cpus_accepted++;
648 }
649
650 if (expire_ms && get_timer(start) >= expire_ms) {
651 log(UCLASS_CPU, LOGL_CRIT,
652 "AP call expired; %d/%d CPUs accepted\n",
653 cpus_accepted, num_aps);
654 return -ETIMEDOUT;
655 }
656 } while (cpus_accepted != num_aps);
657
658
659 mfence();
660
661 return 0;
662}
663
664
665
666
667
668
669
670
671
672
673
674
675static int ap_wait_for_instruction(struct udevice *cpu, void *unused)
676{
677 struct mp_callback lcb;
678 struct mp_callback **per_cpu_slot;
679
680 if (!IS_ENABLED(CONFIG_SMP_AP_WORK))
681 return 0;
682
683 per_cpu_slot = &ap_callbacks[dev_seq(cpu)];
684
685 while (1) {
686 struct mp_callback *cb = read_callback(per_cpu_slot);
687
688 if (!cb) {
689 asm ("pause");
690 continue;
691 }
692
693
694 memcpy(&lcb, cb, sizeof(lcb));
695 mfence();
696 if (lcb.logical_cpu_number == MP_SELECT_ALL ||
697 lcb.logical_cpu_number == MP_SELECT_APS ||
698 dev_seq(cpu) == lcb.logical_cpu_number)
699 lcb.func(lcb.arg);
700
701
702 store_callback(per_cpu_slot, NULL);
703 }
704
705 return 0;
706}
707
708static int mp_init_cpu(struct udevice *cpu, void *unused)
709{
710 struct cpu_plat *plat = dev_get_parent_plat(cpu);
711
712 plat->ucode_version = microcode_read_rev();
713 plat->device_id = gd->arch.x86_device;
714
715 return device_probe(cpu);
716}
717
718static struct mp_flight_record mp_steps[] = {
719 MP_FR_BLOCK_APS(mp_init_cpu, NULL, mp_init_cpu, NULL),
720 MP_FR_BLOCK_APS(ap_wait_for_instruction, NULL, NULL, NULL),
721};
722
723int mp_run_on_cpus(int cpu_select, mp_run_func func, void *arg)
724{
725 struct mp_callback lcb = {
726 .func = func,
727 .arg = arg,
728 .logical_cpu_number = cpu_select,
729 };
730 struct udevice *dev;
731 int num_cpus;
732 int ret;
733
734 ret = get_bsp(&dev, &num_cpus);
735 if (ret < 0)
736 return log_msg_ret("bsp", ret);
737 if (cpu_select == MP_SELECT_ALL || cpu_select == MP_SELECT_BSP ||
738 cpu_select == ret) {
739
740 func(arg);
741 }
742
743 if (!IS_ENABLED(CONFIG_SMP_AP_WORK) ||
744 !(gd->flags & GD_FLG_SMP_READY)) {
745
746 if (cpu_select == MP_SELECT_BSP || !cpu_select)
747 return 0;
748 return -ENOTSUPP;
749 }
750
751
752 ret = run_ap_work(&lcb, dev, num_cpus, 1000 );
753 if (ret)
754 return log_msg_ret("aps", ret);
755
756 return 0;
757}
758
759static void park_this_cpu(void *unused)
760{
761 stop_this_cpu();
762}
763
764int mp_park_aps(void)
765{
766 int ret;
767
768 ret = mp_run_on_cpus(MP_SELECT_APS, park_this_cpu, NULL);
769 if (ret)
770 return log_ret(ret);
771
772 return 0;
773}
774
775int mp_first_cpu(int cpu_select)
776{
777 struct udevice *dev;
778 int num_cpus;
779 int ret;
780
781
782
783
784
785 if (cpu_select == MP_SELECT_ALL)
786 return 0;
787
788 ret = get_bsp(&dev, &num_cpus);
789 if (ret < 0)
790 return log_msg_ret("bsp", ret);
791
792
793 if (cpu_select == MP_SELECT_BSP)
794 return ret;
795
796
797 if (cpu_select == MP_SELECT_APS && num_cpus > 1)
798 return ret == 0 ? 1 : 0;
799
800
801 if (cpu_select < 0 || cpu_select >= num_cpus)
802 return -EINVAL;
803
804 return cpu_select;
805}
806
807int mp_next_cpu(int cpu_select, int prev_cpu)
808{
809 struct udevice *dev;
810 int num_cpus;
811 int ret;
812 int bsp;
813
814
815 if (!IS_ENABLED(CONFIG_SMP_AP_WORK) || cpu_select == MP_SELECT_BSP ||
816 cpu_select >= 0)
817 return -EFBIG;
818
819
820 ret = get_bsp(&dev, &num_cpus);
821 if (ret < 0)
822 return log_msg_ret("bsp", ret);
823 bsp = ret;
824
825
826 assert(prev_cpu >= 0);
827 ret = prev_cpu + 1;
828
829
830 if (cpu_select == MP_SELECT_APS && ret == bsp)
831 ret++;
832 if (ret >= num_cpus)
833 return -EFBIG;
834
835 return ret;
836}
837
838int mp_init(void)
839{
840 int num_aps, num_cpus;
841 atomic_t *ap_count;
842 struct udevice *cpu;
843 int ret;
844
845 if (IS_ENABLED(CONFIG_QFW)) {
846 ret = qemu_cpu_fixup();
847 if (ret)
848 return ret;
849 }
850
851 ret = get_bsp(&cpu, &num_cpus);
852 if (ret < 0) {
853 debug("Cannot init boot CPU: err=%d\n", ret);
854 return ret;
855 }
856
857 if (num_cpus < 2)
858 debug("Warning: Only 1 CPU is detected\n");
859
860 ret = check_cpu_devices(num_cpus);
861 if (ret)
862 log_warning("Warning: Device tree does not describe all CPUs. Extra ones will not be started correctly\n");
863
864 ap_callbacks = calloc(num_cpus, sizeof(struct mp_callback *));
865 if (!ap_callbacks)
866 return -ENOMEM;
867
868
869 mp_info.num_records = ARRAY_SIZE(mp_steps);
870 mp_info.records = mp_steps;
871
872
873 ret = load_sipi_vector(&ap_count, num_cpus);
874 if (ap_count == NULL)
875 return -ENOENT;
876
877
878
879
880
881 wbinvd();
882
883
884 num_aps = num_cpus - 1;
885 ret = start_aps(num_aps, ap_count);
886 if (ret) {
887 mdelay(1000);
888 debug("%d/%d eventually checked in?\n", atomic_read(ap_count),
889 num_aps);
890 return ret;
891 }
892
893
894 ret = bsp_do_flight_plan(cpu, &mp_info, num_aps);
895 if (ret) {
896 debug("CPU init failed: err=%d\n", ret);
897 return ret;
898 }
899 gd->flags |= GD_FLG_SMP_READY;
900
901 return 0;
902}
903