1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/interrupt.h>
17#include <linux/spinlock.h>
18#include <linux/init.h>
19#include <linux/jiffies.h>
20#include <linux/cpumask.h>
21#include <linux/err.h>
22#include <linux/kernel.h>
23#include <linux/delay.h>
24#include <linux/sched.h>
25#include <linux/profile.h>
26#include <linux/smp.h>
27#include <linux/cpu.h>
28#include <asm/tlbflush.h>
29#include <asm/bitops.h>
30#include <asm/processor.h>
31#include <asm/bug.h>
32#include <asm/exceptions.h>
33#include <asm/hardirq.h>
34#include <asm/fpu.h>
35#include <asm/mmu_context.h>
36#include <asm/thread_info.h>
37#include <asm/cpu-regs.h>
38#include <asm/intctl-regs.h>
39#include "internal.h"
40
41#ifdef CONFIG_HOTPLUG_CPU
42#include <asm/cacheflush.h>
43
44static unsigned long sleep_mode[NR_CPUS];
45
46static void run_sleep_cpu(unsigned int cpu);
47static void run_wakeup_cpu(unsigned int cpu);
48#endif
49
50
51
52
53
54#undef DEBUG_SMP
55#ifdef DEBUG_SMP
56#define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
57#else
58#define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
59#endif
60
61
62#define CALL_FUNCTION_NMI_IPI_TIMEOUT 0
63
64
65
66
67struct nmi_call_data_struct {
68 smp_call_func_t func;
69 void *info;
70 cpumask_t started;
71 cpumask_t finished;
72 int wait;
73 char size_alignment[0]
74 __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
75} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
76
77static DEFINE_SPINLOCK(smp_nmi_call_lock);
78static struct nmi_call_data_struct *nmi_call_data;
79
80
81
82
83static cpumask_t cpu_callin_map;
84static cpumask_t cpu_callout_map;
85cpumask_t cpu_boot_map;
86unsigned long start_stack[NR_CPUS - 1];
87
88
89
90
91struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned;
92
93static int cpucount;
94static cpumask_t smp_commenced_mask;
95cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
96
97
98
99
100static int do_boot_cpu(int);
101static void smp_show_cpu_info(int cpu_id);
102static void smp_callin(void);
103static void smp_online(void);
104static void smp_store_cpu_info(int);
105static void smp_cpu_init(void);
106static void smp_tune_scheduling(void);
107static void send_IPI_mask(const cpumask_t *cpumask, int irq);
108static void init_ipi(void);
109
110
111
112
113static void mn10300_ipi_disable(unsigned int irq);
114static void mn10300_ipi_enable(unsigned int irq);
115static void mn10300_ipi_chip_disable(struct irq_data *d);
116static void mn10300_ipi_chip_enable(struct irq_data *d);
117static void mn10300_ipi_ack(struct irq_data *d);
118static void mn10300_ipi_nop(struct irq_data *d);
119
120static struct irq_chip mn10300_ipi_type = {
121 .name = "cpu_ipi",
122 .irq_disable = mn10300_ipi_chip_disable,
123 .irq_enable = mn10300_ipi_chip_enable,
124 .irq_ack = mn10300_ipi_ack,
125 .irq_eoi = mn10300_ipi_nop
126};
127
128static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id);
129static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id);
130
131static struct irqaction reschedule_ipi = {
132 .handler = smp_reschedule_interrupt,
133 .flags = IRQF_NOBALANCING,
134 .name = "smp reschedule IPI"
135};
136static struct irqaction call_function_ipi = {
137 .handler = smp_call_function_interrupt,
138 .flags = IRQF_NOBALANCING,
139 .name = "smp call function IPI"
140};
141
142#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
143static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);
144static struct irqaction local_timer_ipi = {
145 .handler = smp_ipi_timer_interrupt,
146 .flags = IRQF_DISABLED | IRQF_NOBALANCING,
147 .name = "smp local timer IPI"
148};
149#endif
150
151
152
153
154static void init_ipi(void)
155{
156 unsigned long flags;
157 u16 tmp16;
158
159
160 irq_set_chip_and_handler(RESCHEDULE_IPI, &mn10300_ipi_type,
161 handle_percpu_irq);
162 setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
163 set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);
164 mn10300_ipi_enable(RESCHEDULE_IPI);
165
166
167 irq_set_chip_and_handler(CALL_FUNC_SINGLE_IPI, &mn10300_ipi_type,
168 handle_percpu_irq);
169 setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
170 set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);
171 mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
172
173
174#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
175 defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
176 irq_set_chip_and_handler(LOCAL_TIMER_IPI, &mn10300_ipi_type,
177 handle_percpu_irq);
178 setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
179 set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);
180 mn10300_ipi_enable(LOCAL_TIMER_IPI);
181#endif
182
183#ifdef CONFIG_MN10300_CACHE_ENABLED
184
185 irq_set_chip(FLUSH_CACHE_IPI, &mn10300_ipi_type);
186 flags = arch_local_cli_save();
187 __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV),
188 mn10300_low_ipi_handler);
189 GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
190 mn10300_ipi_enable(FLUSH_CACHE_IPI);
191 arch_local_irq_restore(flags);
192#endif
193
194
195 irq_set_chip(CALL_FUNCTION_NMI_IPI, &mn10300_ipi_type);
196 flags = arch_local_cli_save();
197 GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
198 tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
199 arch_local_irq_restore(flags);
200
201
202 flags = arch_local_cli_save();
203 __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV),
204 mn10300_low_ipi_handler);
205 arch_local_irq_restore(flags);
206
207#ifdef CONFIG_KERNEL_DEBUGGER
208 irq_set_chip(DEBUGGER_NMI_IPI, &mn10300_ipi_type);
209#endif
210}
211
212
213
214
215
216static void mn10300_ipi_shutdown(unsigned int irq)
217{
218 unsigned long flags;
219 u16 tmp;
220
221 flags = arch_local_cli_save();
222
223 tmp = GxICR(irq);
224 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
225 tmp = GxICR(irq);
226
227 arch_local_irq_restore(flags);
228}
229
230
231
232
233
234static void mn10300_ipi_enable(unsigned int irq)
235{
236 unsigned long flags;
237 u16 tmp;
238
239 flags = arch_local_cli_save();
240
241 tmp = GxICR(irq);
242 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
243 tmp = GxICR(irq);
244
245 arch_local_irq_restore(flags);
246}
247
248static void mn10300_ipi_chip_enable(struct irq_data *d)
249{
250 mn10300_ipi_enable(d->irq);
251}
252
253
254
255
256
257static void mn10300_ipi_disable(unsigned int irq)
258{
259 unsigned long flags;
260 u16 tmp;
261
262 flags = arch_local_cli_save();
263
264 tmp = GxICR(irq);
265 GxICR(irq) = tmp & GxICR_LEVEL;
266 tmp = GxICR(irq);
267
268 arch_local_irq_restore(flags);
269}
270
271static void mn10300_ipi_chip_disable(struct irq_data *d)
272{
273 mn10300_ipi_disable(d->irq);
274}
275
276
277
278
279
280
281
282
283
284static void mn10300_ipi_ack(struct irq_data *d)
285{
286 unsigned int irq = d->irq;
287 unsigned long flags;
288 u16 tmp;
289
290 flags = arch_local_cli_save();
291 GxICR_u8(irq) = GxICR_DETECT;
292 tmp = GxICR(irq);
293 arch_local_irq_restore(flags);
294}
295
296
297
298
299
300static void mn10300_ipi_nop(struct irq_data *d)
301{
302}
303
304
305
306
307
308
309
310
311
312
313static void send_IPI_mask(const cpumask_t *cpumask, int irq)
314{
315 int i;
316 u16 tmp;
317
318 for (i = 0; i < NR_CPUS; i++) {
319 if (cpumask_test_cpu(i, cpumask)) {
320
321 tmp = CROSS_GxICR(irq, i);
322 CROSS_GxICR(irq, i) =
323 tmp | GxICR_REQUEST | GxICR_DETECT;
324 tmp = CROSS_GxICR(irq, i);
325 }
326 }
327}
328
329
330
331
332
333
334
335void send_IPI_self(int irq)
336{
337 send_IPI_mask(cpumask_of(smp_processor_id()), irq);
338}
339
340
341
342
343
344
345
346
347
348void send_IPI_allbutself(int irq)
349{
350 cpumask_t cpumask;
351
352 cpumask_copy(&cpumask, cpu_online_mask);
353 cpumask_clear_cpu(smp_processor_id(), &cpumask);
354 send_IPI_mask(&cpumask, irq);
355}
356
357void arch_send_call_function_ipi_mask(const struct cpumask *mask)
358{
359 BUG();
360
361}
362
363void arch_send_call_function_single_ipi(int cpu)
364{
365 send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI);
366}
367
368
369
370
371
372void smp_send_reschedule(int cpu)
373{
374 send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI);
375}
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
391{
392 struct nmi_call_data_struct data;
393 unsigned long flags;
394 unsigned int cnt;
395 int cpus, ret = 0;
396
397 cpus = num_online_cpus() - 1;
398 if (cpus < 1)
399 return 0;
400
401 data.func = func;
402 data.info = info;
403 cpumask_copy(&data.started, cpu_online_mask);
404 cpumask_clear_cpu(smp_processor_id(), &data.started);
405 data.wait = wait;
406 if (wait)
407 data.finished = data.started;
408
409 spin_lock_irqsave(&smp_nmi_call_lock, flags);
410 nmi_call_data = &data;
411 smp_mb();
412
413
414 send_IPI_allbutself(CALL_FUNCTION_NMI_IPI);
415
416
417 if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
418 for (cnt = 0;
419 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
420 !cpumask_empty(&data.started);
421 cnt++)
422 mdelay(1);
423
424 if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
425 for (cnt = 0;
426 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
427 !cpumask_empty(&data.finished);
428 cnt++)
429 mdelay(1);
430 }
431
432 if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT)
433 ret = -ETIMEDOUT;
434
435 } else {
436
437
438 while (!cpumask_empty(&data.started))
439 barrier();
440 if (wait)
441 while (!cpumask_empty(&data.finished))
442 barrier();
443 }
444
445 spin_unlock_irqrestore(&smp_nmi_call_lock, flags);
446 return ret;
447}
448
449
450
451
452
453
454
455
456
457
458void smp_jump_to_debugger(void)
459{
460 if (num_online_cpus() > 1)
461
462 send_IPI_allbutself(DEBUGGER_NMI_IPI);
463}
464
465
466
467
468
469void stop_this_cpu(void *unused)
470{
471 static volatile int stopflag;
472 unsigned long flags;
473
474#ifdef CONFIG_GDBSTUB
475
476
477
478 atomic_set(&procindebug[smp_processor_id()], 0);
479#endif
480
481 flags = arch_local_cli_save();
482 set_cpu_online(smp_processor_id(), false);
483
484 while (!stopflag)
485 cpu_relax();
486
487 set_cpu_online(smp_processor_id(), true);
488 arch_local_irq_restore(flags);
489}
490
491
492
493
494void smp_send_stop(void)
495{
496 smp_nmi_call_function(stop_this_cpu, NULL, 0);
497}
498
499
500
501
502
503
504
505
506static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
507{
508 scheduler_ipi();
509 return IRQ_HANDLED;
510}
511
512
513
514
515
516
517
518
519static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
520{
521
522 generic_smp_call_function_single_interrupt();
523 return IRQ_HANDLED;
524}
525
526
527
528
529void smp_nmi_call_function_interrupt(void)
530{
531 smp_call_func_t func = nmi_call_data->func;
532 void *info = nmi_call_data->info;
533 int wait = nmi_call_data->wait;
534
535
536
537
538 smp_mb();
539 cpumask_clear_cpu(smp_processor_id(), &nmi_call_data->started);
540 (*func)(info);
541
542 if (wait) {
543 smp_mb();
544 cpumask_clear_cpu(smp_processor_id(),
545 &nmi_call_data->finished);
546 }
547}
548
549#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
550 defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
551
552
553
554
555
556
557
558static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id)
559{
560 return local_timer_interrupt();
561}
562#endif
563
564void __init smp_init_cpus(void)
565{
566 int i;
567 for (i = 0; i < NR_CPUS; i++) {
568 set_cpu_possible(i, true);
569 set_cpu_present(i, true);
570 }
571}
572
573
574
575
576
577
578
579static void __init smp_cpu_init(void)
580{
581 unsigned long flags;
582 int cpu_id = smp_processor_id();
583 u16 tmp16;
584
585 if (test_and_set_bit(cpu_id, &cpu_initialized)) {
586 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
587 for (;;)
588 local_irq_enable();
589 }
590 printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
591
592 atomic_inc(&init_mm.mm_count);
593 current->active_mm = &init_mm;
594 BUG_ON(current->mm);
595
596 enter_lazy_tlb(&init_mm, current);
597
598
599 clear_using_fpu(current);
600
601 GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT;
602 mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
603
604 GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT;
605 mn10300_ipi_enable(LOCAL_TIMER_IPI);
606
607 GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT;
608 mn10300_ipi_enable(RESCHEDULE_IPI);
609
610#ifdef CONFIG_MN10300_CACHE_ENABLED
611 GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
612 mn10300_ipi_enable(FLUSH_CACHE_IPI);
613#endif
614
615 mn10300_ipi_shutdown(SMP_BOOT_IRQ);
616
617
618 flags = arch_local_cli_save();
619 GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
620 tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
621 arch_local_irq_restore(flags);
622}
623
624
625
626
627
628
629void smp_prepare_cpu_init(void)
630{
631 int loop;
632
633
634 IVAR0 = EXCEP_IRQ_LEVEL0;
635 IVAR1 = EXCEP_IRQ_LEVEL1;
636 IVAR2 = EXCEP_IRQ_LEVEL2;
637 IVAR3 = EXCEP_IRQ_LEVEL3;
638 IVAR4 = EXCEP_IRQ_LEVEL4;
639 IVAR5 = EXCEP_IRQ_LEVEL5;
640 IVAR6 = EXCEP_IRQ_LEVEL6;
641
642
643 for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
644 GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
645
646#ifdef CONFIG_KERNEL_DEBUGGER
647
648 do {
649 unsigned long flags;
650 u16 tmp16;
651
652 flags = arch_local_cli_save();
653 GxICR(DEBUGGER_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
654 tmp16 = GxICR(DEBUGGER_NMI_IPI);
655 arch_local_irq_restore(flags);
656 } while (0);
657#endif
658}
659
660
661
662
663
664int __init start_secondary(void *unused)
665{
666 smp_cpu_init();
667 smp_callin();
668 while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
669 cpu_relax();
670
671 local_flush_tlb();
672 preempt_disable();
673 smp_online();
674
675#ifdef CONFIG_GENERIC_CLOCKEVENTS
676 init_clockevents();
677#endif
678 cpu_idle();
679 return 0;
680}
681
682
683
684
685
686
687
688void __init smp_prepare_cpus(unsigned int max_cpus)
689{
690 int phy_id;
691
692
693 smp_store_cpu_info(0);
694 smp_tune_scheduling();
695
696 init_ipi();
697
698
699 if (max_cpus == 0) {
700 printk(KERN_INFO "SMP mode deactivated.\n");
701 goto smp_done;
702 }
703
704
705 for (phy_id = 0; phy_id < NR_CPUS; phy_id++) {
706
707 if (max_cpus <= cpucount + 1)
708 continue;
709 if (phy_id != 0)
710 do_boot_cpu(phy_id);
711 set_cpu_possible(phy_id, true);
712 smp_show_cpu_info(phy_id);
713 }
714
715smp_done:
716 Dprintk("Boot done.\n");
717}
718
719
720
721
722
723
724
725static void __init smp_store_cpu_info(int cpu)
726{
727 struct mn10300_cpuinfo *ci = &cpu_data[cpu];
728
729 *ci = boot_cpu_data;
730 ci->loops_per_jiffy = loops_per_jiffy;
731 ci->type = CPUREV;
732}
733
734
735
736
737
738
739static void __init smp_tune_scheduling(void)
740{
741}
742
743
744
745
746
747
748
749
750static int __init do_boot_cpu(int phy_id)
751{
752 struct task_struct *idle;
753 unsigned long send_status, callin_status;
754 int timeout, cpu_id;
755
756 send_status = GxICR_REQUEST;
757 callin_status = 0;
758 timeout = 0;
759 cpu_id = phy_id;
760
761 cpucount++;
762
763
764 idle = fork_idle(cpu_id);
765 if (IS_ERR(idle))
766 panic("Failed fork for CPU#%d.", cpu_id);
767
768 idle->thread.pc = (unsigned long)start_secondary;
769
770 printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id);
771 start_stack[cpu_id - 1] = idle->thread.sp;
772
773 task_thread_info(idle)->cpu = cpu_id;
774
775
776 send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ);
777
778 Dprintk("Waiting for send to finish...\n");
779
780
781 do {
782 udelay(1000);
783 send_status =
784 CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST;
785 } while (send_status == GxICR_REQUEST && timeout++ < 100);
786
787 Dprintk("Waiting for cpu_callin_map.\n");
788
789 if (send_status == 0) {
790
791 cpumask_set_cpu(cpu_id, &cpu_callout_map);
792
793
794 timeout = 0;
795 do {
796 udelay(1000);
797 callin_status = cpumask_test_cpu(cpu_id,
798 &cpu_callin_map);
799 } while (callin_status == 0 && timeout++ < 5000);
800
801 if (callin_status == 0)
802 Dprintk("Not responding.\n");
803 } else {
804 printk(KERN_WARNING "IPI not delivered.\n");
805 }
806
807 if (send_status == GxICR_REQUEST || callin_status == 0) {
808 cpumask_clear_cpu(cpu_id, &cpu_callout_map);
809 cpumask_clear_cpu(cpu_id, &cpu_callin_map);
810 cpumask_clear_cpu(cpu_id, &cpu_initialized);
811 cpucount--;
812 return 1;
813 }
814 return 0;
815}
816
817
818
819
820
821static void __init smp_show_cpu_info(int cpu)
822{
823 struct mn10300_cpuinfo *ci = &cpu_data[cpu];
824
825 printk(KERN_INFO
826 "CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n",
827 cpu,
828 MN10300_IOCLK / 1000000,
829 (MN10300_IOCLK / 10000) % 100,
830 ci->loops_per_jiffy / (500000 / HZ),
831 (ci->loops_per_jiffy / (5000 / HZ)) % 100);
832}
833
834
835
836
837static void __init smp_callin(void)
838{
839 unsigned long timeout;
840 int cpu;
841
842 cpu = smp_processor_id();
843 timeout = jiffies + (2 * HZ);
844
845 if (cpumask_test_cpu(cpu, &cpu_callin_map)) {
846 printk(KERN_ERR "CPU#%d already present.\n", cpu);
847 BUG();
848 }
849 Dprintk("CPU#%d waiting for CALLOUT\n", cpu);
850
851
852 while (time_before(jiffies, timeout)) {
853 if (cpumask_test_cpu(cpu, &cpu_callout_map))
854 break;
855 cpu_relax();
856 }
857
858 if (!time_before(jiffies, timeout)) {
859 printk(KERN_ERR
860 "BUG: CPU#%d started up but did not get a callout!\n",
861 cpu);
862 BUG();
863 }
864
865#ifdef CONFIG_CALIBRATE_DELAY
866 calibrate_delay();
867#endif
868
869
870 smp_store_cpu_info(cpu);
871
872
873 cpumask_set_cpu(cpu, &cpu_callin_map);
874}
875
876
877
878
879static void __init smp_online(void)
880{
881 int cpu;
882
883 cpu = smp_processor_id();
884
885 notify_cpu_starting(cpu);
886
887 set_cpu_online(cpu, true);
888
889 local_irq_enable();
890}
891
892
893
894
895
896
897
898void __init smp_cpus_done(unsigned int max_cpus)
899{
900}
901
902
903
904
905
906
907
908void smp_prepare_boot_cpu(void)
909{
910 cpumask_set_cpu(0, &cpu_callout_map);
911 cpumask_set_cpu(0, &cpu_callin_map);
912 current_thread_info()->cpu = 0;
913}
914
915
916
917
918
919
920void initialize_secondary(void)
921{
922 asm volatile (
923 "mov %0,sp \n"
924 "jmp (%1) \n"
925 :
926 : "a"(current->thread.sp), "a"(current->thread.pc));
927}
928
929
930
931
932
933int __cpu_up(unsigned int cpu, struct task_struct *tidle)
934{
935 int timeout;
936
937#ifdef CONFIG_HOTPLUG_CPU
938 if (num_online_cpus() == 1)
939 disable_hlt();
940 if (sleep_mode[cpu])
941 run_wakeup_cpu(cpu);
942#endif
943
944 cpumask_set_cpu(cpu, &smp_commenced_mask);
945
946
947 for (timeout = 0 ; timeout < 5000 ; timeout++) {
948 if (cpu_online(cpu))
949 break;
950 udelay(1000);
951 }
952
953 BUG_ON(!cpu_online(cpu));
954 return 0;
955}
956
957
958
959
960
961
962
963
964int setup_profiling_timer(unsigned int multiplier)
965{
966 return -EINVAL;
967}
968
969
970
971
972#ifdef CONFIG_HOTPLUG_CPU
973
974static DEFINE_PER_CPU(struct cpu, cpu_devices);
975
976static int __init topology_init(void)
977{
978 int cpu, ret;
979
980 for_each_cpu(cpu) {
981 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
982 if (ret)
983 printk(KERN_WARNING
984 "topology_init: register_cpu %d failed (%d)\n",
985 cpu, ret);
986 }
987 return 0;
988}
989
990subsys_initcall(topology_init);
991
992int __cpu_disable(void)
993{
994 int cpu = smp_processor_id();
995 if (cpu == 0)
996 return -EBUSY;
997
998 migrate_irqs();
999 cpumask_clear_cpu(cpu, &mm_cpumask(current->active_mm));
1000 return 0;
1001}
1002
1003void __cpu_die(unsigned int cpu)
1004{
1005 run_sleep_cpu(cpu);
1006
1007 if (num_online_cpus() == 1)
1008 enable_hlt();
1009}
1010
1011#ifdef CONFIG_MN10300_CACHE_ENABLED
1012static inline void hotplug_cpu_disable_cache(void)
1013{
1014 int tmp;
1015 asm volatile(
1016 " movhu (%1),%0 \n"
1017 " and %2,%0 \n"
1018 " movhu %0,(%1) \n"
1019 "1: movhu (%1),%0 \n"
1020 " btst %3,%0 \n"
1021 " bne 1b \n"
1022 : "=&r"(tmp)
1023 : "a"(&CHCTR),
1024 "i"(~(CHCTR_ICEN | CHCTR_DCEN)),
1025 "i"(CHCTR_ICBUSY | CHCTR_DCBUSY)
1026 : "memory", "cc");
1027}
1028
1029static inline void hotplug_cpu_enable_cache(void)
1030{
1031 int tmp;
1032 asm volatile(
1033 "movhu (%1),%0 \n"
1034 "or %2,%0 \n"
1035 "movhu %0,(%1) \n"
1036 : "=&r"(tmp)
1037 : "a"(&CHCTR),
1038 "i"(CHCTR_ICEN | CHCTR_DCEN)
1039 : "memory", "cc");
1040}
1041
1042static inline void hotplug_cpu_invalidate_cache(void)
1043{
1044 int tmp;
1045 asm volatile (
1046 "movhu (%1),%0 \n"
1047 "or %2,%0 \n"
1048 "movhu %0,(%1) \n"
1049 : "=&r"(tmp)
1050 : "a"(&CHCTR),
1051 "i"(CHCTR_ICINV | CHCTR_DCINV)
1052 : "cc");
1053}
1054
1055#else
1056#define hotplug_cpu_disable_cache() do {} while (0)
1057#define hotplug_cpu_enable_cache() do {} while (0)
1058#define hotplug_cpu_invalidate_cache() do {} while (0)
1059#endif
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
1073 smp_call_func_t func, void *info,
1074 int wait)
1075{
1076
1077
1078
1079
1080 static struct nmi_call_data_struct nmi_call_func_mask_data
1081 __cacheline_aligned;
1082 unsigned long start, end;
1083
1084 start = (unsigned long)&nmi_call_func_mask_data;
1085 end = start + sizeof(struct nmi_call_data_struct);
1086
1087 nmi_call_func_mask_data.func = func;
1088 nmi_call_func_mask_data.info = info;
1089 nmi_call_func_mask_data.started = cpumask;
1090 nmi_call_func_mask_data.wait = wait;
1091 if (wait)
1092 nmi_call_func_mask_data.finished = cpumask;
1093
1094 spin_lock(&smp_nmi_call_lock);
1095 nmi_call_data = &nmi_call_func_mask_data;
1096 mn10300_local_dcache_flush_range(start, end);
1097 smp_wmb();
1098
1099 send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI);
1100
1101 do {
1102 mn10300_local_dcache_inv_range(start, end);
1103 barrier();
1104 } while (!cpumask_empty(&nmi_call_func_mask_data.started));
1105
1106 if (wait) {
1107 do {
1108 mn10300_local_dcache_inv_range(start, end);
1109 barrier();
1110 } while (!cpumask_empty(&nmi_call_func_mask_data.finished));
1111 }
1112
1113 spin_unlock(&smp_nmi_call_lock);
1114 return 0;
1115}
1116
1117static void restart_wakeup_cpu(void)
1118{
1119 unsigned int cpu = smp_processor_id();
1120
1121 cpumask_set_cpu(cpu, &cpu_callin_map);
1122 local_flush_tlb();
1123 set_cpu_online(cpu, true);
1124 smp_wmb();
1125}
1126
1127static void prepare_sleep_cpu(void *unused)
1128{
1129 sleep_mode[smp_processor_id()] = 1;
1130 smp_mb();
1131 mn10300_local_dcache_flush_inv();
1132 hotplug_cpu_disable_cache();
1133 hotplug_cpu_invalidate_cache();
1134}
1135
1136
1137static void sleep_cpu(void *unused)
1138{
1139 unsigned int cpu_id = smp_processor_id();
1140
1141
1142
1143
1144 do {
1145 smp_mb();
1146 __sleep_cpu();
1147 } while (sleep_mode[cpu_id]);
1148 restart_wakeup_cpu();
1149}
1150
1151static void run_sleep_cpu(unsigned int cpu)
1152{
1153 unsigned long flags;
1154 cpumask_t cpumask;
1155
1156 cpumask_copy(&cpumask, &cpumask_of(cpu));
1157 flags = arch_local_cli_save();
1158 hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
1159 hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
1160 udelay(1);
1161 arch_local_irq_restore(flags);
1162}
1163
1164static void wakeup_cpu(void)
1165{
1166 hotplug_cpu_invalidate_cache();
1167 hotplug_cpu_enable_cache();
1168 smp_mb();
1169 sleep_mode[smp_processor_id()] = 0;
1170}
1171
1172static void run_wakeup_cpu(unsigned int cpu)
1173{
1174 unsigned long flags;
1175
1176 flags = arch_local_cli_save();
1177#if NR_CPUS == 2
1178 mn10300_local_dcache_flush_inv();
1179#else
1180
1181
1182
1183
1184#error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y.
1185#endif
1186 hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1);
1187 arch_local_irq_restore(flags);
1188}
1189
1190#endif
1191