1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/spinlock.h>
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/cache.h>
17#include <linux/profile.h>
18#include <linux/errno.h>
19#include <linux/mm.h>
20#include <linux/err.h>
21#include <linux/cpu.h>
22#include <linux/smp.h>
23#include <linux/seq_file.h>
24#include <linux/irq.h>
25#include <linux/percpu.h>
26#include <linux/clockchips.h>
27
28#include <asm/atomic.h>
29#include <asm/cacheflush.h>
30#include <asm/cpu.h>
31#include <asm/cputype.h>
32#include <asm/mmu_context.h>
33#include <asm/pgtable.h>
34#include <asm/pgalloc.h>
35#include <asm/processor.h>
36#include <asm/tlbflush.h>
37#include <asm/ptrace.h>
38#include <asm/localtimer.h>
39#include <asm/smp_plat.h>
40
41
42
43
44
45
46struct secondary_data secondary_data;
47
48
49
50
51
52struct ipi_data {
53 spinlock_t lock;
54 unsigned long ipi_count;
55 unsigned long bits;
56};
57
58static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
59 .lock = SPIN_LOCK_UNLOCKED,
60};
61
62enum ipi_msg_type {
63 IPI_TIMER,
64 IPI_RESCHEDULE,
65 IPI_CALL_FUNC,
66 IPI_CALL_FUNC_SINGLE,
67 IPI_CPU_STOP,
68};
69
70int __cpuinit __cpu_up(unsigned int cpu)
71{
72 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
73 struct task_struct *idle = ci->idle;
74 pgd_t *pgd;
75 pmd_t *pmd;
76 int ret;
77
78
79
80
81
82 if (!idle) {
83 idle = fork_idle(cpu);
84 if (IS_ERR(idle)) {
85 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
86 return PTR_ERR(idle);
87 }
88 ci->idle = idle;
89 }
90
91
92
93
94
95
96
97 pgd = pgd_alloc(&init_mm);
98 pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET);
99 *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) |
100 PMD_TYPE_SECT | PMD_SECT_AP_WRITE);
101 flush_pmd_entry(pmd);
102
103
104
105
106
107 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
108 secondary_data.pgdir = virt_to_phys(pgd);
109 wmb();
110
111
112
113
114 ret = boot_secondary(cpu, idle);
115 if (ret == 0) {
116 unsigned long timeout;
117
118
119
120
121
122 timeout = jiffies + HZ;
123 while (time_before(jiffies, timeout)) {
124 if (cpu_online(cpu))
125 break;
126
127 udelay(10);
128 barrier();
129 }
130
131 if (!cpu_online(cpu))
132 ret = -EIO;
133 }
134
135 secondary_data.stack = NULL;
136 secondary_data.pgdir = 0;
137
138 *pmd = __pmd(0);
139 clean_pmd_entry(pmd);
140 pgd_free(&init_mm, pgd);
141
142 if (ret) {
143 printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
144
145
146
147
148 }
149
150 return ret;
151}
152
153#ifdef CONFIG_HOTPLUG_CPU
154
155
156
157int __cpu_disable(void)
158{
159 unsigned int cpu = smp_processor_id();
160 struct task_struct *p;
161 int ret;
162
163 ret = mach_cpu_disable(cpu);
164 if (ret)
165 return ret;
166
167
168
169
170
171 set_cpu_online(cpu, false);
172
173
174
175
176 migrate_irqs();
177
178
179
180
181 local_timer_stop();
182
183
184
185
186
187 flush_cache_all();
188 local_flush_tlb_all();
189
190 read_lock(&tasklist_lock);
191 for_each_process(p) {
192 if (p->mm)
193 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
194 }
195 read_unlock(&tasklist_lock);
196
197 return 0;
198}
199
200
201
202
203
204void __cpu_die(unsigned int cpu)
205{
206 if (!platform_cpu_kill(cpu))
207 printk("CPU%u: unable to kill\n", cpu);
208}
209
210
211
212
213
214
215
216
217
218void __ref cpu_die(void)
219{
220 unsigned int cpu = smp_processor_id();
221
222 local_irq_disable();
223 idle_task_exit();
224
225
226
227
228
229 platform_cpu_die(cpu);
230
231
232
233
234
235
236 __asm__("mov sp, %0\n"
237 " b secondary_start_kernel"
238 :
239 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
240}
241#endif
242
243
244
245
246
247asmlinkage void __cpuinit secondary_start_kernel(void)
248{
249 struct mm_struct *mm = &init_mm;
250 unsigned int cpu = smp_processor_id();
251
252 printk("CPU%u: Booted secondary processor\n", cpu);
253
254
255
256
257
258 atomic_inc(&mm->mm_users);
259 atomic_inc(&mm->mm_count);
260 current->active_mm = mm;
261 cpumask_set_cpu(cpu, mm_cpumask(mm));
262 cpu_switch_mm(mm->pgd, mm);
263 enter_lazy_tlb(mm, current);
264 local_flush_tlb_all();
265
266 cpu_init();
267 preempt_disable();
268
269
270
271
272 platform_secondary_init(cpu);
273
274
275
276
277 notify_cpu_starting(cpu);
278 local_irq_enable();
279 local_fiq_enable();
280
281
282
283
284 percpu_timer_setup();
285
286 calibrate_delay();
287
288 smp_store_cpu_info(cpu);
289
290
291
292
293 set_cpu_online(cpu, true);
294
295
296
297
298 cpu_idle();
299}
300
301
302
303
304
305void __cpuinit smp_store_cpu_info(unsigned int cpuid)
306{
307 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
308
309 cpu_info->loops_per_jiffy = loops_per_jiffy;
310}
311
312void __init smp_cpus_done(unsigned int max_cpus)
313{
314 int cpu;
315 unsigned long bogosum = 0;
316
317 for_each_online_cpu(cpu)
318 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
319
320 printk(KERN_INFO "SMP: Total of %d processors activated "
321 "(%lu.%02lu BogoMIPS).\n",
322 num_online_cpus(),
323 bogosum / (500000/HZ),
324 (bogosum / (5000/HZ)) % 100);
325}
326
327void __init smp_prepare_boot_cpu(void)
328{
329 unsigned int cpu = smp_processor_id();
330
331 per_cpu(cpu_data, cpu).idle = current;
332}
333
334static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
335{
336 unsigned long flags;
337 unsigned int cpu;
338
339 local_irq_save(flags);
340
341 for_each_cpu(cpu, mask) {
342 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
343
344 spin_lock(&ipi->lock);
345 ipi->bits |= 1 << msg;
346 spin_unlock(&ipi->lock);
347 }
348
349
350
351
352 smp_cross_call(mask);
353
354 local_irq_restore(flags);
355}
356
357void arch_send_call_function_ipi_mask(const struct cpumask *mask)
358{
359 send_ipi_message(mask, IPI_CALL_FUNC);
360}
361
362void arch_send_call_function_single_ipi(int cpu)
363{
364 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
365}
366
367void show_ipi_list(struct seq_file *p)
368{
369 unsigned int cpu;
370
371 seq_puts(p, "IPI:");
372
373 for_each_present_cpu(cpu)
374 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
375
376 seq_putc(p, '\n');
377}
378
379void show_local_irqs(struct seq_file *p)
380{
381 unsigned int cpu;
382
383 seq_printf(p, "LOC: ");
384
385 for_each_present_cpu(cpu)
386 seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs);
387
388 seq_putc(p, '\n');
389}
390
391
392
393
394static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
395
396static void ipi_timer(void)
397{
398 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
399 irq_enter();
400 evt->event_handler(evt);
401 irq_exit();
402}
403
404#ifdef CONFIG_LOCAL_TIMERS
405asmlinkage void __exception do_local_timer(struct pt_regs *regs)
406{
407 struct pt_regs *old_regs = set_irq_regs(regs);
408 int cpu = smp_processor_id();
409
410 if (local_timer_ack()) {
411 irq_stat[cpu].local_timer_irqs++;
412 ipi_timer();
413 }
414
415 set_irq_regs(old_regs);
416}
417#endif
418
419#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
420static void smp_timer_broadcast(const struct cpumask *mask)
421{
422 send_ipi_message(mask, IPI_TIMER);
423}
424
425static void broadcast_timer_set_mode(enum clock_event_mode mode,
426 struct clock_event_device *evt)
427{
428}
429
430static void local_timer_setup(struct clock_event_device *evt)
431{
432 evt->name = "dummy_timer";
433 evt->features = CLOCK_EVT_FEAT_ONESHOT |
434 CLOCK_EVT_FEAT_PERIODIC |
435 CLOCK_EVT_FEAT_DUMMY;
436 evt->rating = 400;
437 evt->mult = 1;
438 evt->set_mode = broadcast_timer_set_mode;
439 evt->broadcast = smp_timer_broadcast;
440
441 clockevents_register_device(evt);
442}
443#endif
444
445void __cpuinit percpu_timer_setup(void)
446{
447 unsigned int cpu = smp_processor_id();
448 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
449
450 evt->cpumask = cpumask_of(cpu);
451
452 local_timer_setup(evt);
453}
454
455static DEFINE_SPINLOCK(stop_lock);
456
457
458
459
460static void ipi_cpu_stop(unsigned int cpu)
461{
462 spin_lock(&stop_lock);
463 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
464 dump_stack();
465 spin_unlock(&stop_lock);
466
467 set_cpu_online(cpu, false);
468
469 local_fiq_disable();
470 local_irq_disable();
471
472 while (1)
473 cpu_relax();
474}
475
476
477
478
479
480
481
482
483
484
485asmlinkage void __exception do_IPI(struct pt_regs *regs)
486{
487 unsigned int cpu = smp_processor_id();
488 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
489 struct pt_regs *old_regs = set_irq_regs(regs);
490
491 ipi->ipi_count++;
492
493 for (;;) {
494 unsigned long msgs;
495
496 spin_lock(&ipi->lock);
497 msgs = ipi->bits;
498 ipi->bits = 0;
499 spin_unlock(&ipi->lock);
500
501 if (!msgs)
502 break;
503
504 do {
505 unsigned nextmsg;
506
507 nextmsg = msgs & -msgs;
508 msgs &= ~nextmsg;
509 nextmsg = ffz(~nextmsg);
510
511 switch (nextmsg) {
512 case IPI_TIMER:
513 ipi_timer();
514 break;
515
516 case IPI_RESCHEDULE:
517
518
519
520
521 break;
522
523 case IPI_CALL_FUNC:
524 generic_smp_call_function_interrupt();
525 break;
526
527 case IPI_CALL_FUNC_SINGLE:
528 generic_smp_call_function_single_interrupt();
529 break;
530
531 case IPI_CPU_STOP:
532 ipi_cpu_stop(cpu);
533 break;
534
535 default:
536 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
537 cpu, nextmsg);
538 break;
539 }
540 } while (msgs);
541 }
542
543 set_irq_regs(old_regs);
544}
545
546void smp_send_reschedule(int cpu)
547{
548 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
549}
550
551void smp_send_stop(void)
552{
553 cpumask_t mask = cpu_online_map;
554 cpu_clear(smp_processor_id(), mask);
555 send_ipi_message(&mask, IPI_CPU_STOP);
556}
557
558
559
560
561int setup_profiling_timer(unsigned int multiplier)
562{
563 return -EINVAL;
564}
565
566static void
567on_each_cpu_mask(void (*func)(void *), void *info, int wait,
568 const struct cpumask *mask)
569{
570 preempt_disable();
571
572 smp_call_function_many(mask, func, info, wait);
573 if (cpumask_test_cpu(smp_processor_id(), mask))
574 func(info);
575
576 preempt_enable();
577}
578
579
580
581
582
583
584struct tlb_args {
585 struct vm_area_struct *ta_vma;
586 unsigned long ta_start;
587 unsigned long ta_end;
588};
589
590static inline void ipi_flush_tlb_all(void *ignored)
591{
592 local_flush_tlb_all();
593}
594
595static inline void ipi_flush_tlb_mm(void *arg)
596{
597 struct mm_struct *mm = (struct mm_struct *)arg;
598
599 local_flush_tlb_mm(mm);
600}
601
602static inline void ipi_flush_tlb_page(void *arg)
603{
604 struct tlb_args *ta = (struct tlb_args *)arg;
605
606 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
607}
608
609static inline void ipi_flush_tlb_kernel_page(void *arg)
610{
611 struct tlb_args *ta = (struct tlb_args *)arg;
612
613 local_flush_tlb_kernel_page(ta->ta_start);
614}
615
616static inline void ipi_flush_tlb_range(void *arg)
617{
618 struct tlb_args *ta = (struct tlb_args *)arg;
619
620 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
621}
622
623static inline void ipi_flush_tlb_kernel_range(void *arg)
624{
625 struct tlb_args *ta = (struct tlb_args *)arg;
626
627 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
628}
629
630void flush_tlb_all(void)
631{
632 if (tlb_ops_need_broadcast())
633 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
634 else
635 local_flush_tlb_all();
636}
637
638void flush_tlb_mm(struct mm_struct *mm)
639{
640 if (tlb_ops_need_broadcast())
641 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
642 else
643 local_flush_tlb_mm(mm);
644}
645
646void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
647{
648 if (tlb_ops_need_broadcast()) {
649 struct tlb_args ta;
650 ta.ta_vma = vma;
651 ta.ta_start = uaddr;
652 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
653 } else
654 local_flush_tlb_page(vma, uaddr);
655}
656
657void flush_tlb_kernel_page(unsigned long kaddr)
658{
659 if (tlb_ops_need_broadcast()) {
660 struct tlb_args ta;
661 ta.ta_start = kaddr;
662 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
663 } else
664 local_flush_tlb_kernel_page(kaddr);
665}
666
667void flush_tlb_range(struct vm_area_struct *vma,
668 unsigned long start, unsigned long end)
669{
670 if (tlb_ops_need_broadcast()) {
671 struct tlb_args ta;
672 ta.ta_vma = vma;
673 ta.ta_start = start;
674 ta.ta_end = end;
675 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
676 } else
677 local_flush_tlb_range(vma, start, end);
678}
679
680void flush_tlb_kernel_range(unsigned long start, unsigned long end)
681{
682 if (tlb_ops_need_broadcast()) {
683 struct tlb_args ta;
684 ta.ta_start = start;
685 ta.ta_end = end;
686 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
687 } else
688 local_flush_tlb_kernel_range(start, end);
689}
690