1
2
3
4
5
6#include <linux/export.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pagemap.h>
11#include <linux/threads.h>
12#include <linux/smp.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/delay.h>
16#include <linux/init.h>
17#include <linux/spinlock.h>
18#include <linux/fs.h>
19#include <linux/seq_file.h>
20#include <linux/cache.h>
21#include <linux/jiffies.h>
22#include <linux/profile.h>
23#include <linux/bootmem.h>
24#include <linux/vmalloc.h>
25#include <linux/ftrace.h>
26#include <linux/cpu.h>
27#include <linux/slab.h>
28#include <linux/kgdb.h>
29
30#include <asm/head.h>
31#include <asm/ptrace.h>
32#include <linux/atomic.h>
33#include <asm/tlbflush.h>
34#include <asm/mmu_context.h>
35#include <asm/cpudata.h>
36#include <asm/hvtramp.h>
37#include <asm/io.h>
38#include <asm/timer.h>
39#include <asm/setup.h>
40
41#include <asm/irq.h>
42#include <asm/irq_regs.h>
43#include <asm/page.h>
44#include <asm/pgtable.h>
45#include <asm/oplib.h>
46#include <asm/uaccess.h>
47#include <asm/starfire.h>
48#include <asm/tlb.h>
49#include <asm/sections.h>
50#include <asm/prom.h>
51#include <asm/mdesc.h>
52#include <asm/ldc.h>
53#include <asm/hypervisor.h>
54#include <asm/pcr.h>
55
56#include "cpumap.h"
57#include "kernel.h"
58
59DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
60cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
61 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
62
63cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
64 [0 ... NR_CPUS-1] = CPU_MASK_NONE };
65
66EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
67EXPORT_SYMBOL(cpu_core_map);
68EXPORT_SYMBOL(cpu_core_sib_map);
69
70static cpumask_t smp_commenced_mask;
71
72void smp_info(struct seq_file *m)
73{
74 int i;
75
76 seq_printf(m, "State:\n");
77 for_each_online_cpu(i)
78 seq_printf(m, "CPU%d:\t\tonline\n", i);
79}
80
81void smp_bogo(struct seq_file *m)
82{
83 int i;
84
85 for_each_online_cpu(i)
86 seq_printf(m,
87 "Cpu%dClkTck\t: %016lx\n",
88 i, cpu_data(i).clock_tick);
89}
90
91extern void setup_sparc64_timer(void);
92
93static volatile unsigned long callin_flag = 0;
94
95void smp_callin(void)
96{
97 int cpuid = hard_smp_processor_id();
98
99 __local_per_cpu_offset = __per_cpu_offset(cpuid);
100
101 if (tlb_type == hypervisor)
102 sun4v_ktsb_register();
103
104 __flush_tlb_all();
105
106 setup_sparc64_timer();
107
108 if (cheetah_pcache_forced_on)
109 cheetah_enable_pcache();
110
111 callin_flag = 1;
112 __asm__ __volatile__("membar #Sync\n\t"
113 "flush %%g6" : : : "memory");
114
115
116
117
118 current_thread_info()->new_child = 0;
119
120
121 atomic_inc(&init_mm.mm_count);
122 current->active_mm = &init_mm;
123
124
125 notify_cpu_starting(cpuid);
126
127 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
128 rmb();
129
130 set_cpu_online(cpuid, true);
131
132
133 preempt_disable();
134
135 local_irq_enable();
136
137 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
138}
139
140void cpu_panic(void)
141{
142 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
143 panic("SMP bolixed\n");
144}
145
146
147
148
149
150
151
152
153#define MASTER 0
154#define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
155
156#define NUM_ROUNDS 64
157#define NUM_ITERS 5
158
159static DEFINE_RAW_SPINLOCK(itc_sync_lock);
160static unsigned long go[SLAVE + 1];
161
162#define DEBUG_TICK_SYNC 0
163
164static inline long get_delta (long *rt, long *master)
165{
166 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
167 unsigned long tcenter, t0, t1, tm;
168 unsigned long i;
169
170 for (i = 0; i < NUM_ITERS; i++) {
171 t0 = tick_ops->get_tick();
172 go[MASTER] = 1;
173 membar_safe("#StoreLoad");
174 while (!(tm = go[SLAVE]))
175 rmb();
176 go[SLAVE] = 0;
177 wmb();
178 t1 = tick_ops->get_tick();
179
180 if (t1 - t0 < best_t1 - best_t0)
181 best_t0 = t0, best_t1 = t1, best_tm = tm;
182 }
183
184 *rt = best_t1 - best_t0;
185 *master = best_tm - best_t0;
186
187
188 tcenter = (best_t0/2 + best_t1/2);
189 if (best_t0 % 2 + best_t1 % 2 == 2)
190 tcenter++;
191 return tcenter - best_tm;
192}
193
194void smp_synchronize_tick_client(void)
195{
196 long i, delta, adj, adjust_latency = 0, done = 0;
197 unsigned long flags, rt, master_time_stamp;
198#if DEBUG_TICK_SYNC
199 struct {
200 long rt;
201 long master;
202 long diff;
203 long lat;
204 } t[NUM_ROUNDS];
205#endif
206
207 go[MASTER] = 1;
208
209 while (go[MASTER])
210 rmb();
211
212 local_irq_save(flags);
213 {
214 for (i = 0; i < NUM_ROUNDS; i++) {
215 delta = get_delta(&rt, &master_time_stamp);
216 if (delta == 0)
217 done = 1;
218
219 if (!done) {
220 if (i > 0) {
221 adjust_latency += -delta;
222 adj = -delta + adjust_latency/4;
223 } else
224 adj = -delta;
225
226 tick_ops->add_tick(adj);
227 }
228#if DEBUG_TICK_SYNC
229 t[i].rt = rt;
230 t[i].master = master_time_stamp;
231 t[i].diff = delta;
232 t[i].lat = adjust_latency/4;
233#endif
234 }
235 }
236 local_irq_restore(flags);
237
238#if DEBUG_TICK_SYNC
239 for (i = 0; i < NUM_ROUNDS; i++)
240 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
241 t[i].rt, t[i].master, t[i].diff, t[i].lat);
242#endif
243
244 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
245 "(last diff %ld cycles, maxerr %lu cycles)\n",
246 smp_processor_id(), delta, rt);
247}
248
249static void smp_start_sync_tick_client(int cpu);
250
251static void smp_synchronize_one_tick(int cpu)
252{
253 unsigned long flags, i;
254
255 go[MASTER] = 0;
256
257 smp_start_sync_tick_client(cpu);
258
259
260 while (!go[MASTER])
261 rmb();
262
263
264 go[MASTER] = 0;
265 membar_safe("#StoreLoad");
266
267 raw_spin_lock_irqsave(&itc_sync_lock, flags);
268 {
269 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
270 while (!go[MASTER])
271 rmb();
272 go[MASTER] = 0;
273 wmb();
274 go[SLAVE] = tick_ops->get_tick();
275 membar_safe("#StoreLoad");
276 }
277 }
278 raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
279}
280
281#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
282static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
283 void **descrp)
284{
285 extern unsigned long sparc64_ttable_tl0;
286 extern unsigned long kern_locked_tte_data;
287 struct hvtramp_descr *hdesc;
288 unsigned long trampoline_ra;
289 struct trap_per_cpu *tb;
290 u64 tte_vaddr, tte_data;
291 unsigned long hv_err;
292 int i;
293
294 hdesc = kzalloc(sizeof(*hdesc) +
295 (sizeof(struct hvtramp_mapping) *
296 num_kernel_image_mappings - 1),
297 GFP_KERNEL);
298 if (!hdesc) {
299 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
300 "hvtramp_descr.\n");
301 return;
302 }
303 *descrp = hdesc;
304
305 hdesc->cpu = cpu;
306 hdesc->num_mappings = num_kernel_image_mappings;
307
308 tb = &trap_block[cpu];
309
310 hdesc->fault_info_va = (unsigned long) &tb->fault_info;
311 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
312
313 hdesc->thread_reg = thread_reg;
314
315 tte_vaddr = (unsigned long) KERNBASE;
316 tte_data = kern_locked_tte_data;
317
318 for (i = 0; i < hdesc->num_mappings; i++) {
319 hdesc->maps[i].vaddr = tte_vaddr;
320 hdesc->maps[i].tte = tte_data;
321 tte_vaddr += 0x400000;
322 tte_data += 0x400000;
323 }
324
325 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
326
327 hv_err = sun4v_cpu_start(cpu, trampoline_ra,
328 kimage_addr_to_ra(&sparc64_ttable_tl0),
329 __pa(hdesc));
330 if (hv_err)
331 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
332 "gives error %lu\n", hv_err);
333}
334#endif
335
336extern unsigned long sparc64_cpu_startup;
337
338
339
340
341
342static struct thread_info *cpu_new_thread = NULL;
343
344static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
345{
346 unsigned long entry =
347 (unsigned long)(&sparc64_cpu_startup);
348 unsigned long cookie =
349 (unsigned long)(&cpu_new_thread);
350 void *descr = NULL;
351 int timeout, ret;
352
353 callin_flag = 0;
354 cpu_new_thread = task_thread_info(idle);
355
356 if (tlb_type == hypervisor) {
357#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
358 if (ldom_domaining_enabled)
359 ldom_startcpu_cpuid(cpu,
360 (unsigned long) cpu_new_thread,
361 &descr);
362 else
363#endif
364 prom_startcpu_cpuid(cpu, entry, cookie);
365 } else {
366 struct device_node *dp = of_find_node_by_cpuid(cpu);
367
368 prom_startcpu(dp->phandle, entry, cookie);
369 }
370
371 for (timeout = 0; timeout < 50000; timeout++) {
372 if (callin_flag)
373 break;
374 udelay(100);
375 }
376
377 if (callin_flag) {
378 ret = 0;
379 } else {
380 printk("Processor %d is stuck.\n", cpu);
381 ret = -ENODEV;
382 }
383 cpu_new_thread = NULL;
384
385 kfree(descr);
386
387 return ret;
388}
389
390static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
391{
392 u64 result, target;
393 int stuck, tmp;
394
395 if (this_is_starfire) {
396
397 cpu = (((cpu & 0x3c) << 1) |
398 ((cpu & 0x40) >> 4) |
399 (cpu & 0x3));
400 }
401
402 target = (cpu << 14) | 0x70;
403again:
404
405
406
407
408
409
410
411 tmp = 0x40;
412 __asm__ __volatile__(
413 "wrpr %1, %2, %%pstate\n\t"
414 "stxa %4, [%0] %3\n\t"
415 "stxa %5, [%0+%8] %3\n\t"
416 "add %0, %8, %0\n\t"
417 "stxa %6, [%0+%8] %3\n\t"
418 "membar #Sync\n\t"
419 "stxa %%g0, [%7] %3\n\t"
420 "membar #Sync\n\t"
421 "mov 0x20, %%g1\n\t"
422 "ldxa [%%g1] 0x7f, %%g0\n\t"
423 "membar #Sync"
424 : "=r" (tmp)
425 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
426 "r" (data0), "r" (data1), "r" (data2), "r" (target),
427 "r" (0x10), "0" (tmp)
428 : "g1");
429
430
431 stuck = 100000;
432 do {
433 __asm__ __volatile__("ldxa [%%g0] %1, %0"
434 : "=r" (result)
435 : "i" (ASI_INTR_DISPATCH_STAT));
436 if (result == 0) {
437 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
438 : : "r" (pstate));
439 return;
440 }
441 stuck -= 1;
442 if (stuck == 0)
443 break;
444 } while (result & 0x1);
445 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
446 : : "r" (pstate));
447 if (stuck == 0) {
448 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
449 smp_processor_id(), result);
450 } else {
451 udelay(2);
452 goto again;
453 }
454}
455
456static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
457{
458 u64 *mondo, data0, data1, data2;
459 u16 *cpu_list;
460 u64 pstate;
461 int i;
462
463 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
464 cpu_list = __va(tb->cpu_list_pa);
465 mondo = __va(tb->cpu_mondo_block_pa);
466 data0 = mondo[0];
467 data1 = mondo[1];
468 data2 = mondo[2];
469 for (i = 0; i < cnt; i++)
470 spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
471}
472
473
474
475
476
477static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
478{
479 int nack_busy_id, is_jbus, need_more;
480 u64 *mondo, pstate, ver, busy_mask;
481 u16 *cpu_list;
482
483 cpu_list = __va(tb->cpu_list_pa);
484 mondo = __va(tb->cpu_mondo_block_pa);
485
486
487
488
489
490 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
491 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
492 (ver >> 32) == __SERRANO_ID);
493
494 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
495
496retry:
497 need_more = 0;
498 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
499 : : "r" (pstate), "i" (PSTATE_IE));
500
501
502 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
503 "stxa %1, [%4] %6\n\t"
504 "stxa %2, [%5] %6\n\t"
505 "membar #Sync\n\t"
506 :
507 : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
508 "r" (0x40), "r" (0x50), "r" (0x60),
509 "i" (ASI_INTR_W));
510
511 nack_busy_id = 0;
512 busy_mask = 0;
513 {
514 int i;
515
516 for (i = 0; i < cnt; i++) {
517 u64 target, nr;
518
519 nr = cpu_list[i];
520 if (nr == 0xffff)
521 continue;
522
523 target = (nr << 14) | 0x70;
524 if (is_jbus) {
525 busy_mask |= (0x1UL << (nr * 2));
526 } else {
527 target |= (nack_busy_id << 24);
528 busy_mask |= (0x1UL <<
529 (nack_busy_id * 2));
530 }
531 __asm__ __volatile__(
532 "stxa %%g0, [%0] %1\n\t"
533 "membar #Sync\n\t"
534 :
535 : "r" (target), "i" (ASI_INTR_W));
536 nack_busy_id++;
537 if (nack_busy_id == 32) {
538 need_more = 1;
539 break;
540 }
541 }
542 }
543
544
545 {
546 u64 dispatch_stat, nack_mask;
547 long stuck;
548
549 stuck = 100000 * nack_busy_id;
550 nack_mask = busy_mask << 1;
551 do {
552 __asm__ __volatile__("ldxa [%%g0] %1, %0"
553 : "=r" (dispatch_stat)
554 : "i" (ASI_INTR_DISPATCH_STAT));
555 if (!(dispatch_stat & (busy_mask | nack_mask))) {
556 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
557 : : "r" (pstate));
558 if (unlikely(need_more)) {
559 int i, this_cnt = 0;
560 for (i = 0; i < cnt; i++) {
561 if (cpu_list[i] == 0xffff)
562 continue;
563 cpu_list[i] = 0xffff;
564 this_cnt++;
565 if (this_cnt == 32)
566 break;
567 }
568 goto retry;
569 }
570 return;
571 }
572 if (!--stuck)
573 break;
574 } while (dispatch_stat & busy_mask);
575
576 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
577 : : "r" (pstate));
578
579 if (dispatch_stat & busy_mask) {
580
581
582
583 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
584 smp_processor_id(), dispatch_stat);
585 } else {
586 int i, this_busy_nack = 0;
587
588
589
590
591 udelay(2 * nack_busy_id);
592
593
594
595
596 for (i = 0; i < cnt; i++) {
597 u64 check_mask, nr;
598
599 nr = cpu_list[i];
600 if (nr == 0xffff)
601 continue;
602
603 if (is_jbus)
604 check_mask = (0x2UL << (2*nr));
605 else
606 check_mask = (0x2UL <<
607 this_busy_nack);
608 if ((dispatch_stat & check_mask) == 0)
609 cpu_list[i] = 0xffff;
610 this_busy_nack += 2;
611 if (this_busy_nack == 64)
612 break;
613 }
614
615 goto retry;
616 }
617 }
618}
619
620
621static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
622{
623 int retries, this_cpu, prev_sent, i, saw_cpu_error;
624 unsigned long status;
625 u16 *cpu_list;
626
627 this_cpu = smp_processor_id();
628
629 cpu_list = __va(tb->cpu_list_pa);
630
631 saw_cpu_error = 0;
632 retries = 0;
633 prev_sent = 0;
634 do {
635 int forward_progress, n_sent;
636
637 status = sun4v_cpu_mondo_send(cnt,
638 tb->cpu_list_pa,
639 tb->cpu_mondo_block_pa);
640
641
642 if (likely(status == HV_EOK))
643 break;
644
645
646
647
648
649
650 n_sent = 0;
651 for (i = 0; i < cnt; i++) {
652 if (likely(cpu_list[i] == 0xffff))
653 n_sent++;
654 }
655
656 forward_progress = 0;
657 if (n_sent > prev_sent)
658 forward_progress = 1;
659
660 prev_sent = n_sent;
661
662
663
664
665
666 if (unlikely(status == HV_ECPUERROR)) {
667 for (i = 0; i < cnt; i++) {
668 long err;
669 u16 cpu;
670
671 cpu = cpu_list[i];
672 if (cpu == 0xffff)
673 continue;
674
675 err = sun4v_cpu_state(cpu);
676 if (err == HV_CPU_STATE_ERROR) {
677 saw_cpu_error = (cpu + 1);
678 cpu_list[i] = 0xffff;
679 }
680 }
681 } else if (unlikely(status != HV_EWOULDBLOCK))
682 goto fatal_mondo_error;
683
684
685
686
687
688
689
690
691 if (unlikely(!forward_progress)) {
692 if (unlikely(++retries > 10000))
693 goto fatal_mondo_timeout;
694
695
696
697
698 udelay(2 * cnt);
699 }
700 } while (1);
701
702 if (unlikely(saw_cpu_error))
703 goto fatal_mondo_cpu_error;
704
705 return;
706
707fatal_mondo_cpu_error:
708 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
709 "(including %d) were in error state\n",
710 this_cpu, saw_cpu_error - 1);
711 return;
712
713fatal_mondo_timeout:
714 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
715 " progress after %d retries.\n",
716 this_cpu, retries);
717 goto dump_cpu_list_and_out;
718
719fatal_mondo_error:
720 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
721 this_cpu, status);
722 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
723 "mondo_block_pa(%lx)\n",
724 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
725
726dump_cpu_list_and_out:
727 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
728 for (i = 0; i < cnt; i++)
729 printk("%u ", cpu_list[i]);
730 printk("]\n");
731}
732
733static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
734
735static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
736{
737 struct trap_per_cpu *tb;
738 int this_cpu, i, cnt;
739 unsigned long flags;
740 u16 *cpu_list;
741 u64 *mondo;
742
743
744
745
746
747
748
749
750
751
752
753 local_irq_save(flags);
754
755 this_cpu = smp_processor_id();
756 tb = &trap_block[this_cpu];
757
758 mondo = __va(tb->cpu_mondo_block_pa);
759 mondo[0] = data0;
760 mondo[1] = data1;
761 mondo[2] = data2;
762 wmb();
763
764 cpu_list = __va(tb->cpu_list_pa);
765
766
767 cnt = 0;
768 for_each_cpu(i, mask) {
769 if (i == this_cpu || !cpu_online(i))
770 continue;
771 cpu_list[cnt++] = i;
772 }
773
774 if (cnt)
775 xcall_deliver_impl(tb, cnt);
776
777 local_irq_restore(flags);
778}
779
780
781
782
783
784static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
785{
786 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
787
788 xcall_deliver(data0, data1, data2, mask);
789}
790
791
792static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
793{
794 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
795}
796
797extern unsigned long xcall_sync_tick;
798
799static void smp_start_sync_tick_client(int cpu)
800{
801 xcall_deliver((u64) &xcall_sync_tick, 0, 0,
802 cpumask_of(cpu));
803}
804
805extern unsigned long xcall_call_function;
806
807void arch_send_call_function_ipi_mask(const struct cpumask *mask)
808{
809 xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
810}
811
812extern unsigned long xcall_call_function_single;
813
814void arch_send_call_function_single_ipi(int cpu)
815{
816 xcall_deliver((u64) &xcall_call_function_single, 0, 0,
817 cpumask_of(cpu));
818}
819
820void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
821{
822 clear_softint(1 << irq);
823 irq_enter();
824 generic_smp_call_function_interrupt();
825 irq_exit();
826}
827
828void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
829{
830 clear_softint(1 << irq);
831 irq_enter();
832 generic_smp_call_function_single_interrupt();
833 irq_exit();
834}
835
836static void tsb_sync(void *info)
837{
838 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
839 struct mm_struct *mm = info;
840
841
842
843
844
845
846
847 if (tp->pgd_paddr == __pa(mm->pgd))
848 tsb_context_switch(mm);
849}
850
851void smp_tsb_sync(struct mm_struct *mm)
852{
853 smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
854}
855
856extern unsigned long xcall_flush_tlb_mm;
857extern unsigned long xcall_flush_tlb_page;
858extern unsigned long xcall_flush_tlb_kernel_range;
859extern unsigned long xcall_fetch_glob_regs;
860extern unsigned long xcall_fetch_glob_pmu;
861extern unsigned long xcall_fetch_glob_pmu_n4;
862extern unsigned long xcall_receive_signal;
863extern unsigned long xcall_new_mmu_context_version;
864#ifdef CONFIG_KGDB
865extern unsigned long xcall_kgdb_capture;
866#endif
867
868#ifdef DCACHE_ALIASING_POSSIBLE
869extern unsigned long xcall_flush_dcache_page_cheetah;
870#endif
871extern unsigned long xcall_flush_dcache_page_spitfire;
872
873static inline void __local_flush_dcache_page(struct page *page)
874{
875#ifdef DCACHE_ALIASING_POSSIBLE
876 __flush_dcache_page(page_address(page),
877 ((tlb_type == spitfire) &&
878 page_mapping(page) != NULL));
879#else
880 if (page_mapping(page) != NULL &&
881 tlb_type == spitfire)
882 __flush_icache_page(__pa(page_address(page)));
883#endif
884}
885
886void smp_flush_dcache_page_impl(struct page *page, int cpu)
887{
888 int this_cpu;
889
890 if (tlb_type == hypervisor)
891 return;
892
893#ifdef CONFIG_DEBUG_DCFLUSH
894 atomic_inc(&dcpage_flushes);
895#endif
896
897 this_cpu = get_cpu();
898
899 if (cpu == this_cpu) {
900 __local_flush_dcache_page(page);
901 } else if (cpu_online(cpu)) {
902 void *pg_addr = page_address(page);
903 u64 data0 = 0;
904
905 if (tlb_type == spitfire) {
906 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
907 if (page_mapping(page) != NULL)
908 data0 |= ((u64)1 << 32);
909 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
910#ifdef DCACHE_ALIASING_POSSIBLE
911 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
912#endif
913 }
914 if (data0) {
915 xcall_deliver(data0, __pa(pg_addr),
916 (u64) pg_addr, cpumask_of(cpu));
917#ifdef CONFIG_DEBUG_DCFLUSH
918 atomic_inc(&dcpage_flushes_xcall);
919#endif
920 }
921 }
922
923 put_cpu();
924}
925
926void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
927{
928 void *pg_addr;
929 u64 data0;
930
931 if (tlb_type == hypervisor)
932 return;
933
934 preempt_disable();
935
936#ifdef CONFIG_DEBUG_DCFLUSH
937 atomic_inc(&dcpage_flushes);
938#endif
939 data0 = 0;
940 pg_addr = page_address(page);
941 if (tlb_type == spitfire) {
942 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
943 if (page_mapping(page) != NULL)
944 data0 |= ((u64)1 << 32);
945 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
946#ifdef DCACHE_ALIASING_POSSIBLE
947 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
948#endif
949 }
950 if (data0) {
951 xcall_deliver(data0, __pa(pg_addr),
952 (u64) pg_addr, cpu_online_mask);
953#ifdef CONFIG_DEBUG_DCFLUSH
954 atomic_inc(&dcpage_flushes_xcall);
955#endif
956 }
957 __local_flush_dcache_page(page);
958
959 preempt_enable();
960}
961
962void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
963{
964 struct mm_struct *mm;
965 unsigned long flags;
966
967 clear_softint(1 << irq);
968
969
970
971
972 mm = current->active_mm;
973 if (unlikely(!mm || (mm == &init_mm)))
974 return;
975
976 spin_lock_irqsave(&mm->context.lock, flags);
977
978 if (unlikely(!CTX_VALID(mm->context)))
979 get_new_mmu_context(mm);
980
981 spin_unlock_irqrestore(&mm->context.lock, flags);
982
983 load_secondary_context(mm);
984 __flush_tlb_mm(CTX_HWBITS(mm->context),
985 SECONDARY_CONTEXT);
986}
987
988void smp_new_mmu_context_version(void)
989{
990 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
991}
992
993#ifdef CONFIG_KGDB
994void kgdb_roundup_cpus(unsigned long flags)
995{
996 smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
997}
998#endif
999
1000void smp_fetch_global_regs(void)
1001{
1002 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1003}
1004
1005void smp_fetch_global_pmu(void)
1006{
1007 if (tlb_type == hypervisor &&
1008 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
1009 smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0);
1010 else
1011 smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0);
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057void smp_flush_tlb_mm(struct mm_struct *mm)
1058{
1059 u32 ctx = CTX_HWBITS(mm->context);
1060 int cpu = get_cpu();
1061
1062 if (atomic_read(&mm->mm_users) == 1) {
1063 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1064 goto local_flush_and_out;
1065 }
1066
1067 smp_cross_call_masked(&xcall_flush_tlb_mm,
1068 ctx, 0, 0,
1069 mm_cpumask(mm));
1070
1071local_flush_and_out:
1072 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1073
1074 put_cpu();
1075}
1076
1077struct tlb_pending_info {
1078 unsigned long ctx;
1079 unsigned long nr;
1080 unsigned long *vaddrs;
1081};
1082
1083static void tlb_pending_func(void *info)
1084{
1085 struct tlb_pending_info *t = info;
1086
1087 __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1088}
1089
1090void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1091{
1092 u32 ctx = CTX_HWBITS(mm->context);
1093 struct tlb_pending_info info;
1094 int cpu = get_cpu();
1095
1096 info.ctx = ctx;
1097 info.nr = nr;
1098 info.vaddrs = vaddrs;
1099
1100 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1101 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1102 else
1103 smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1104 &info, 1);
1105
1106 __flush_tlb_pending(ctx, nr, vaddrs);
1107
1108 put_cpu();
1109}
1110
1111void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1112{
1113 unsigned long context = CTX_HWBITS(mm->context);
1114 int cpu = get_cpu();
1115
1116 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1117 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1118 else
1119 smp_cross_call_masked(&xcall_flush_tlb_page,
1120 context, vaddr, 0,
1121 mm_cpumask(mm));
1122 __flush_tlb_page(context, vaddr);
1123
1124 put_cpu();
1125}
1126
1127void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1128{
1129 start &= PAGE_MASK;
1130 end = PAGE_ALIGN(end);
1131 if (start != end) {
1132 smp_cross_call(&xcall_flush_tlb_kernel_range,
1133 0, start, end);
1134
1135 __flush_tlb_kernel_range(start, end);
1136 }
1137}
1138
1139
1140
1141extern unsigned long xcall_capture;
1142
1143static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1144static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1145static unsigned long penguins_are_doing_time;
1146
1147void smp_capture(void)
1148{
1149 int result = atomic_add_return(1, &smp_capture_depth);
1150
1151 if (result == 1) {
1152 int ncpus = num_online_cpus();
1153
1154#ifdef CAPTURE_DEBUG
1155 printk("CPU[%d]: Sending penguins to jail...",
1156 smp_processor_id());
1157#endif
1158 penguins_are_doing_time = 1;
1159 atomic_inc(&smp_capture_registry);
1160 smp_cross_call(&xcall_capture, 0, 0, 0);
1161 while (atomic_read(&smp_capture_registry) != ncpus)
1162 rmb();
1163#ifdef CAPTURE_DEBUG
1164 printk("done\n");
1165#endif
1166 }
1167}
1168
1169void smp_release(void)
1170{
1171 if (atomic_dec_and_test(&smp_capture_depth)) {
1172#ifdef CAPTURE_DEBUG
1173 printk("CPU[%d]: Giving pardon to "
1174 "imprisoned penguins\n",
1175 smp_processor_id());
1176#endif
1177 penguins_are_doing_time = 0;
1178 membar_safe("#StoreLoad");
1179 atomic_dec(&smp_capture_registry);
1180 }
1181}
1182
1183
1184
1185
1186extern void prom_world(int);
1187
1188void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1189{
1190 clear_softint(1 << irq);
1191
1192 preempt_disable();
1193
1194 __asm__ __volatile__("flushw");
1195 prom_world(1);
1196 atomic_inc(&smp_capture_registry);
1197 membar_safe("#StoreLoad");
1198 while (penguins_are_doing_time)
1199 rmb();
1200 atomic_dec(&smp_capture_registry);
1201 prom_world(0);
1202
1203 preempt_enable();
1204}
1205
1206
1207int setup_profiling_timer(unsigned int multiplier)
1208{
1209 return -EINVAL;
1210}
1211
1212void __init smp_prepare_cpus(unsigned int max_cpus)
1213{
1214}
1215
1216void smp_prepare_boot_cpu(void)
1217{
1218}
1219
1220void __init smp_setup_processor_id(void)
1221{
1222 if (tlb_type == spitfire)
1223 xcall_deliver_impl = spitfire_xcall_deliver;
1224 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1225 xcall_deliver_impl = cheetah_xcall_deliver;
1226 else
1227 xcall_deliver_impl = hypervisor_xcall_deliver;
1228}
1229
1230void smp_fill_in_sib_core_maps(void)
1231{
1232 unsigned int i;
1233
1234 for_each_present_cpu(i) {
1235 unsigned int j;
1236
1237 cpumask_clear(&cpu_core_map[i]);
1238 if (cpu_data(i).core_id == 0) {
1239 cpumask_set_cpu(i, &cpu_core_map[i]);
1240 continue;
1241 }
1242
1243 for_each_present_cpu(j) {
1244 if (cpu_data(i).core_id ==
1245 cpu_data(j).core_id)
1246 cpumask_set_cpu(j, &cpu_core_map[i]);
1247 }
1248 }
1249
1250 for_each_present_cpu(i) {
1251 unsigned int j;
1252
1253 for_each_present_cpu(j) {
1254 if (cpu_data(i).sock_id == cpu_data(j).sock_id)
1255 cpumask_set_cpu(j, &cpu_core_sib_map[i]);
1256 }
1257 }
1258
1259 for_each_present_cpu(i) {
1260 unsigned int j;
1261
1262 cpumask_clear(&per_cpu(cpu_sibling_map, i));
1263 if (cpu_data(i).proc_id == -1) {
1264 cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1265 continue;
1266 }
1267
1268 for_each_present_cpu(j) {
1269 if (cpu_data(i).proc_id ==
1270 cpu_data(j).proc_id)
1271 cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1272 }
1273 }
1274}
1275
1276int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1277{
1278 int ret = smp_boot_one_cpu(cpu, tidle);
1279
1280 if (!ret) {
1281 cpumask_set_cpu(cpu, &smp_commenced_mask);
1282 while (!cpu_online(cpu))
1283 mb();
1284 if (!cpu_online(cpu)) {
1285 ret = -ENODEV;
1286 } else {
1287
1288
1289
1290 if (tlb_type != hypervisor)
1291 smp_synchronize_one_tick(cpu);
1292 }
1293 }
1294 return ret;
1295}
1296
1297#ifdef CONFIG_HOTPLUG_CPU
1298void cpu_play_dead(void)
1299{
1300 int cpu = smp_processor_id();
1301 unsigned long pstate;
1302
1303 idle_task_exit();
1304
1305 if (tlb_type == hypervisor) {
1306 struct trap_per_cpu *tb = &trap_block[cpu];
1307
1308 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1309 tb->cpu_mondo_pa, 0);
1310 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1311 tb->dev_mondo_pa, 0);
1312 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1313 tb->resum_mondo_pa, 0);
1314 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1315 tb->nonresum_mondo_pa, 0);
1316 }
1317
1318 cpumask_clear_cpu(cpu, &smp_commenced_mask);
1319 membar_safe("#Sync");
1320
1321 local_irq_disable();
1322
1323 __asm__ __volatile__(
1324 "rdpr %%pstate, %0\n\t"
1325 "wrpr %0, %1, %%pstate"
1326 : "=r" (pstate)
1327 : "i" (PSTATE_IE));
1328
1329 while (1)
1330 barrier();
1331}
1332
1333int __cpu_disable(void)
1334{
1335 int cpu = smp_processor_id();
1336 cpuinfo_sparc *c;
1337 int i;
1338
1339 for_each_cpu(i, &cpu_core_map[cpu])
1340 cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1341 cpumask_clear(&cpu_core_map[cpu]);
1342
1343 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1344 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1345 cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1346
1347 c = &cpu_data(cpu);
1348
1349 c->core_id = 0;
1350 c->proc_id = -1;
1351
1352 smp_wmb();
1353
1354
1355 fixup_irqs();
1356
1357 local_irq_enable();
1358 mdelay(1);
1359 local_irq_disable();
1360
1361 set_cpu_online(cpu, false);
1362
1363 cpu_map_rebuild();
1364
1365 return 0;
1366}
1367
1368void __cpu_die(unsigned int cpu)
1369{
1370 int i;
1371
1372 for (i = 0; i < 100; i++) {
1373 smp_rmb();
1374 if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1375 break;
1376 msleep(100);
1377 }
1378 if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1379 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1380 } else {
1381#if defined(CONFIG_SUN_LDOMS)
1382 unsigned long hv_err;
1383 int limit = 100;
1384
1385 do {
1386 hv_err = sun4v_cpu_stop(cpu);
1387 if (hv_err == HV_EOK) {
1388 set_cpu_present(cpu, false);
1389 break;
1390 }
1391 } while (--limit > 0);
1392 if (limit <= 0) {
1393 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1394 hv_err);
1395 }
1396#endif
1397 }
1398}
1399#endif
1400
1401void __init smp_cpus_done(unsigned int max_cpus)
1402{
1403}
1404
1405void smp_send_reschedule(int cpu)
1406{
1407 if (cpu == smp_processor_id()) {
1408 WARN_ON_ONCE(preemptible());
1409 set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1410 } else {
1411 xcall_deliver((u64) &xcall_receive_signal,
1412 0, 0, cpumask_of(cpu));
1413 }
1414}
1415
1416void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1417{
1418 clear_softint(1 << irq);
1419 scheduler_ipi();
1420}
1421
1422static void stop_this_cpu(void *dummy)
1423{
1424 prom_stopself();
1425}
1426
1427void smp_send_stop(void)
1428{
1429 int cpu;
1430
1431 if (tlb_type == hypervisor) {
1432 for_each_online_cpu(cpu) {
1433 if (cpu == smp_processor_id())
1434 continue;
1435#ifdef CONFIG_SUN_LDOMS
1436 if (ldom_domaining_enabled) {
1437 unsigned long hv_err;
1438 hv_err = sun4v_cpu_stop(cpu);
1439 if (hv_err)
1440 printk(KERN_ERR "sun4v_cpu_stop() "
1441 "failed err=%lu\n", hv_err);
1442 } else
1443#endif
1444 prom_stopcpu_cpuid(cpu);
1445 }
1446 } else
1447 smp_call_function(stop_this_cpu, NULL, 0);
1448}
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1464 size_t align)
1465{
1466 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1467#ifdef CONFIG_NEED_MULTIPLE_NODES
1468 int node = cpu_to_node(cpu);
1469 void *ptr;
1470
1471 if (!node_online(node) || !NODE_DATA(node)) {
1472 ptr = __alloc_bootmem(size, align, goal);
1473 pr_info("cpu %d has no node %d or node-local memory\n",
1474 cpu, node);
1475 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1476 cpu, size, __pa(ptr));
1477 } else {
1478 ptr = __alloc_bootmem_node(NODE_DATA(node),
1479 size, align, goal);
1480 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1481 "%016lx\n", cpu, size, node, __pa(ptr));
1482 }
1483 return ptr;
1484#else
1485 return __alloc_bootmem(size, align, goal);
1486#endif
1487}
1488
1489static void __init pcpu_free_bootmem(void *ptr, size_t size)
1490{
1491 free_bootmem(__pa(ptr), size);
1492}
1493
1494static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1495{
1496 if (cpu_to_node(from) == cpu_to_node(to))
1497 return LOCAL_DISTANCE;
1498 else
1499 return REMOTE_DISTANCE;
1500}
1501
1502static void __init pcpu_populate_pte(unsigned long addr)
1503{
1504 pgd_t *pgd = pgd_offset_k(addr);
1505 pud_t *pud;
1506 pmd_t *pmd;
1507
1508 if (pgd_none(*pgd)) {
1509 pud_t *new;
1510
1511 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1512 pgd_populate(&init_mm, pgd, new);
1513 }
1514
1515 pud = pud_offset(pgd, addr);
1516 if (pud_none(*pud)) {
1517 pmd_t *new;
1518
1519 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1520 pud_populate(&init_mm, pud, new);
1521 }
1522
1523 pmd = pmd_offset(pud, addr);
1524 if (!pmd_present(*pmd)) {
1525 pte_t *new;
1526
1527 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1528 pmd_populate_kernel(&init_mm, pmd, new);
1529 }
1530}
1531
1532void __init setup_per_cpu_areas(void)
1533{
1534 unsigned long delta;
1535 unsigned int cpu;
1536 int rc = -EINVAL;
1537
1538 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1539 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1540 PERCPU_DYNAMIC_RESERVE, 4 << 20,
1541 pcpu_cpu_distance,
1542 pcpu_alloc_bootmem,
1543 pcpu_free_bootmem);
1544 if (rc)
1545 pr_warning("PERCPU: %s allocator failed (%d), "
1546 "falling back to page size\n",
1547 pcpu_fc_names[pcpu_chosen_fc], rc);
1548 }
1549 if (rc < 0)
1550 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1551 pcpu_alloc_bootmem,
1552 pcpu_free_bootmem,
1553 pcpu_populate_pte);
1554 if (rc < 0)
1555 panic("cannot initialize percpu area (err=%d)", rc);
1556
1557 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1558 for_each_possible_cpu(cpu)
1559 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1560
1561
1562 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1563
1564 of_fill_in_cpu_data();
1565 if (tlb_type == hypervisor)
1566 mdesc_fill_in_cpu_data(cpu_all_mask);
1567}
1568