1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/cache.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/smp.h>
26#include <linux/spinlock.h>
27#include <linux/threads.h>
28#include <linux/module.h>
29#include <linux/time.h>
30#include <linux/timex.h>
31#include <linux/sched.h>
32#include <linux/cpumask.h>
33#include <linux/cpu.h>
34#include <linux/err.h>
35#include <linux/ftrace.h>
36#include <linux/irqdomain.h>
37#include <linux/of.h>
38#include <linux/of_irq.h>
39
40#include <linux/atomic.h>
41#include <asm/cpu.h>
42#include <asm/processor.h>
43#include <asm/idle.h>
44#include <asm/r4k-timer.h>
45#include <asm/mips-cpc.h>
46#include <asm/mmu_context.h>
47#include <asm/time.h>
48#include <asm/setup.h>
49#include <asm/maar.h>
50
51cpumask_t cpu_callin_map;
52
53int __cpu_number_map[NR_CPUS];
54EXPORT_SYMBOL(__cpu_number_map);
55
56int __cpu_logical_map[NR_CPUS];
57EXPORT_SYMBOL(__cpu_logical_map);
58
59
60int smp_num_siblings = 1;
61EXPORT_SYMBOL(smp_num_siblings);
62
63
64cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
65EXPORT_SYMBOL(cpu_sibling_map);
66
67
68cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
69EXPORT_SYMBOL(cpu_core_map);
70
71
72
73
74
75cpumask_t cpu_foreign_map __read_mostly;
76EXPORT_SYMBOL(cpu_foreign_map);
77
78
79static cpumask_t cpu_sibling_setup_map;
80
81
82static cpumask_t cpu_core_setup_map;
83
84cpumask_t cpu_coherent_mask;
85
86#ifdef CONFIG_GENERIC_IRQ_IPI
87static struct irq_desc *call_desc;
88static struct irq_desc *sched_desc;
89#endif
90
91static inline void set_cpu_sibling_map(int cpu)
92{
93 int i;
94
95 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
96
97 if (smp_num_siblings > 1) {
98 for_each_cpu(i, &cpu_sibling_setup_map) {
99 if (cpu_data[cpu].package == cpu_data[i].package &&
100 cpu_data[cpu].core == cpu_data[i].core) {
101 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
102 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
103 }
104 }
105 } else
106 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
107}
108
109static inline void set_cpu_core_map(int cpu)
110{
111 int i;
112
113 cpumask_set_cpu(cpu, &cpu_core_setup_map);
114
115 for_each_cpu(i, &cpu_core_setup_map) {
116 if (cpu_data[cpu].package == cpu_data[i].package) {
117 cpumask_set_cpu(i, &cpu_core_map[cpu]);
118 cpumask_set_cpu(cpu, &cpu_core_map[i]);
119 }
120 }
121}
122
123
124
125
126
127static inline void calculate_cpu_foreign_map(void)
128{
129 int i, k, core_present;
130 cpumask_t temp_foreign_map;
131
132
133 cpumask_clear(&temp_foreign_map);
134 for_each_online_cpu(i) {
135 core_present = 0;
136 for_each_cpu(k, &temp_foreign_map)
137 if (cpu_data[i].package == cpu_data[k].package &&
138 cpu_data[i].core == cpu_data[k].core)
139 core_present = 1;
140 if (!core_present)
141 cpumask_set_cpu(i, &temp_foreign_map);
142 }
143
144 cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
145}
146
147struct plat_smp_ops *mp_ops;
148EXPORT_SYMBOL(mp_ops);
149
150void register_smp_ops(struct plat_smp_ops *ops)
151{
152 if (mp_ops)
153 printk(KERN_WARNING "Overriding previously set SMP ops\n");
154
155 mp_ops = ops;
156}
157
158#ifdef CONFIG_GENERIC_IRQ_IPI
159void mips_smp_send_ipi_single(int cpu, unsigned int action)
160{
161 mips_smp_send_ipi_mask(cpumask_of(cpu), action);
162}
163
164void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
165{
166 unsigned long flags;
167 unsigned int core;
168 int cpu;
169
170 local_irq_save(flags);
171
172 switch (action) {
173 case SMP_CALL_FUNCTION:
174 __ipi_send_mask(call_desc, mask);
175 break;
176
177 case SMP_RESCHEDULE_YOURSELF:
178 __ipi_send_mask(sched_desc, mask);
179 break;
180
181 default:
182 BUG();
183 }
184
185 if (mips_cpc_present()) {
186 for_each_cpu(cpu, mask) {
187 core = cpu_data[cpu].core;
188
189 if (core == current_cpu_data.core)
190 continue;
191
192 while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
193 mips_cpc_lock_other(core);
194 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
195 mips_cpc_unlock_other();
196 }
197 }
198 }
199
200 local_irq_restore(flags);
201}
202
203
204static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
205{
206 scheduler_ipi();
207
208 return IRQ_HANDLED;
209}
210
211static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
212{
213 generic_smp_call_function_interrupt();
214
215 return IRQ_HANDLED;
216}
217
218static struct irqaction irq_resched = {
219 .handler = ipi_resched_interrupt,
220 .flags = IRQF_PERCPU,
221 .name = "IPI resched"
222};
223
224static struct irqaction irq_call = {
225 .handler = ipi_call_interrupt,
226 .flags = IRQF_PERCPU,
227 .name = "IPI call"
228};
229
230static __init void smp_ipi_init_one(unsigned int virq,
231 struct irqaction *action)
232{
233 int ret;
234
235 irq_set_handler(virq, handle_percpu_irq);
236 ret = setup_irq(virq, action);
237 BUG_ON(ret);
238}
239
240static int __init mips_smp_ipi_init(void)
241{
242 unsigned int call_virq, sched_virq;
243 struct irq_domain *ipidomain;
244 struct device_node *node;
245
246
247
248
249
250
251
252
253
254
255 if (cpumask_weight(cpu_possible_mask) == 1)
256 return 0;
257
258 node = of_irq_find_parent(of_root);
259 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
260
261
262
263
264
265
266 if (node && !ipidomain)
267 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
268
269 BUG_ON(!ipidomain);
270
271 call_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask);
272 BUG_ON(!call_virq);
273
274 sched_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask);
275 BUG_ON(!sched_virq);
276
277 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
278 int cpu;
279
280 for_each_cpu(cpu, cpu_possible_mask) {
281 smp_ipi_init_one(call_virq + cpu, &irq_call);
282 smp_ipi_init_one(sched_virq + cpu, &irq_resched);
283 }
284 } else {
285 smp_ipi_init_one(call_virq, &irq_call);
286 smp_ipi_init_one(sched_virq, &irq_resched);
287 }
288
289 call_desc = irq_to_desc(call_virq);
290 sched_desc = irq_to_desc(sched_virq);
291
292 return 0;
293}
294early_initcall(mips_smp_ipi_init);
295#endif
296
297
298
299
300
301asmlinkage void start_secondary(void)
302{
303 unsigned int cpu;
304
305 cpu_probe();
306 per_cpu_trap_init(false);
307 mips_clockevent_init();
308 mp_ops->init_secondary();
309 cpu_report();
310 maar_init();
311
312
313
314
315
316
317 calibrate_delay();
318 preempt_disable();
319 cpu = smp_processor_id();
320 cpu_data[cpu].udelay_val = loops_per_jiffy;
321
322 cpumask_set_cpu(cpu, &cpu_coherent_mask);
323 notify_cpu_starting(cpu);
324
325 set_cpu_online(cpu, true);
326
327 set_cpu_sibling_map(cpu);
328 set_cpu_core_map(cpu);
329
330 calculate_cpu_foreign_map();
331
332 cpumask_set_cpu(cpu, &cpu_callin_map);
333
334 synchronise_count_slave(cpu);
335
336
337
338
339
340 WARN_ON_ONCE(!irqs_disabled());
341 mp_ops->smp_finish();
342
343 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
344}
345
346static void stop_this_cpu(void *dummy)
347{
348
349
350
351
352
353
354 cpumask_copy(&cpu_foreign_map, cpu_online_mask);
355
356
357 smp_mb();
358
359 set_cpu_online(smp_processor_id(), false);
360 calculate_cpu_foreign_map();
361 local_irq_disable();
362 while (1);
363}
364
365void smp_send_stop(void)
366{
367 smp_call_function(stop_this_cpu, NULL, 0);
368}
369
370void __init smp_cpus_done(unsigned int max_cpus)
371{
372}
373
374
375void __init smp_prepare_cpus(unsigned int max_cpus)
376{
377 init_new_context(current, &init_mm);
378 current_thread_info()->cpu = 0;
379 mp_ops->prepare_cpus(max_cpus);
380 set_cpu_sibling_map(0);
381 set_cpu_core_map(0);
382 calculate_cpu_foreign_map();
383#ifndef CONFIG_HOTPLUG_CPU
384 init_cpu_present(cpu_possible_mask);
385#endif
386 cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
387}
388
389
390void smp_prepare_boot_cpu(void)
391{
392 set_cpu_possible(0, true);
393 set_cpu_online(0, true);
394 cpumask_set_cpu(0, &cpu_callin_map);
395}
396
397int __cpu_up(unsigned int cpu, struct task_struct *tidle)
398{
399 mp_ops->boot_secondary(cpu, tidle);
400
401
402
403
404 while (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
405 udelay(100);
406 schedule();
407 }
408
409 synchronise_count_master(cpu);
410 return 0;
411}
412
413
414int setup_profiling_timer(unsigned int multiplier)
415{
416 return 0;
417}
418
419static void flush_tlb_all_ipi(void *info)
420{
421 local_flush_tlb_all();
422}
423
424void flush_tlb_all(void)
425{
426 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
427}
428
429static void flush_tlb_mm_ipi(void *mm)
430{
431 local_flush_tlb_mm((struct mm_struct *)mm);
432}
433
434
435
436
437
438
439
440
441
442static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
443{
444 smp_call_function(func, info, 1);
445}
446
447static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
448{
449 preempt_disable();
450
451 smp_on_other_tlbs(func, info);
452 func(info);
453
454 preempt_enable();
455}
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470void flush_tlb_mm(struct mm_struct *mm)
471{
472 preempt_disable();
473
474 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
475 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
476 } else {
477 unsigned int cpu;
478
479 for_each_online_cpu(cpu) {
480 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
481 cpu_context(cpu, mm) = 0;
482 }
483 }
484 local_flush_tlb_mm(mm);
485
486 preempt_enable();
487}
488
489struct flush_tlb_data {
490 struct vm_area_struct *vma;
491 unsigned long addr1;
492 unsigned long addr2;
493};
494
495static void flush_tlb_range_ipi(void *info)
496{
497 struct flush_tlb_data *fd = info;
498
499 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
500}
501
502void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
503{
504 struct mm_struct *mm = vma->vm_mm;
505
506 preempt_disable();
507 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
508 struct flush_tlb_data fd = {
509 .vma = vma,
510 .addr1 = start,
511 .addr2 = end,
512 };
513
514 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
515 } else {
516 unsigned int cpu;
517
518 for_each_online_cpu(cpu) {
519 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
520 cpu_context(cpu, mm) = 0;
521 }
522 }
523 local_flush_tlb_range(vma, start, end);
524 preempt_enable();
525}
526
527static void flush_tlb_kernel_range_ipi(void *info)
528{
529 struct flush_tlb_data *fd = info;
530
531 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
532}
533
534void flush_tlb_kernel_range(unsigned long start, unsigned long end)
535{
536 struct flush_tlb_data fd = {
537 .addr1 = start,
538 .addr2 = end,
539 };
540
541 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
542}
543
544static void flush_tlb_page_ipi(void *info)
545{
546 struct flush_tlb_data *fd = info;
547
548 local_flush_tlb_page(fd->vma, fd->addr1);
549}
550
551void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
552{
553 preempt_disable();
554 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
555 struct flush_tlb_data fd = {
556 .vma = vma,
557 .addr1 = page,
558 };
559
560 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
561 } else {
562 unsigned int cpu;
563
564 for_each_online_cpu(cpu) {
565 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
566 cpu_context(cpu, vma->vm_mm) = 0;
567 }
568 }
569 local_flush_tlb_page(vma, page);
570 preempt_enable();
571}
572
573static void flush_tlb_one_ipi(void *info)
574{
575 unsigned long vaddr = (unsigned long) info;
576
577 local_flush_tlb_one(vaddr);
578}
579
580void flush_tlb_one(unsigned long vaddr)
581{
582 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
583}
584
585EXPORT_SYMBOL(flush_tlb_page);
586EXPORT_SYMBOL(flush_tlb_one);
587
588#if defined(CONFIG_KEXEC)
589void (*dump_ipi_function_ptr)(void *) = NULL;
590void dump_send_ipi(void (*dump_ipi_callback)(void *))
591{
592 int i;
593 int cpu = smp_processor_id();
594
595 dump_ipi_function_ptr = dump_ipi_callback;
596 smp_mb();
597 for_each_online_cpu(i)
598 if (i != cpu)
599 mp_ops->send_ipi_single(i, SMP_DUMP);
600
601}
602EXPORT_SYMBOL(dump_send_ipi);
603#endif
604
605#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
606
607static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
608static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
609
610void tick_broadcast(const struct cpumask *mask)
611{
612 atomic_t *count;
613 struct call_single_data *csd;
614 int cpu;
615
616 for_each_cpu(cpu, mask) {
617 count = &per_cpu(tick_broadcast_count, cpu);
618 csd = &per_cpu(tick_broadcast_csd, cpu);
619
620 if (atomic_inc_return(count) == 1)
621 smp_call_function_single_async(cpu, csd);
622 }
623}
624
625static void tick_broadcast_callee(void *info)
626{
627 int cpu = smp_processor_id();
628 tick_receive_broadcast();
629 atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
630}
631
632static int __init tick_broadcast_init(void)
633{
634 struct call_single_data *csd;
635 int cpu;
636
637 for (cpu = 0; cpu < NR_CPUS; cpu++) {
638 csd = &per_cpu(tick_broadcast_csd, cpu);
639 csd->func = tick_broadcast_callee;
640 }
641
642 return 0;
643}
644early_initcall(tick_broadcast_init);
645
646#endif
647