1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/err.h>
14#include <linux/cache.h>
15#include <linux/cpumask.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/cpu.h>
22#include <linux/interrupt.h>
23#include <asm/atomic.h>
24#include <asm/processor.h>
25#include <asm/system.h>
26#include <asm/mmu_context.h>
27#include <asm/smp.h>
28#include <asm/cacheflush.h>
29#include <asm/sections.h>
30
31int __cpu_number_map[NR_CPUS];
32int __cpu_logical_map[NR_CPUS];
33
34struct plat_smp_ops *mp_ops = NULL;
35
36
37DEFINE_PER_CPU(int, cpu_state) = { 0 };
38
39void __cpuinit register_smp_ops(struct plat_smp_ops *ops)
40{
41 if (mp_ops)
42 printk(KERN_WARNING "Overriding previously set SMP ops\n");
43
44 mp_ops = ops;
45}
46
47static inline void __cpuinit smp_store_cpu_info(unsigned int cpu)
48{
49 struct sh_cpuinfo *c = cpu_data + cpu;
50
51 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
52
53 c->loops_per_jiffy = loops_per_jiffy;
54}
55
56void __init smp_prepare_cpus(unsigned int max_cpus)
57{
58 unsigned int cpu = smp_processor_id();
59
60 init_new_context(current, &init_mm);
61 current_thread_info()->cpu = cpu;
62 mp_ops->prepare_cpus(max_cpus);
63
64#ifndef CONFIG_HOTPLUG_CPU
65 init_cpu_present(&cpu_possible_map);
66#endif
67}
68
69void __init smp_prepare_boot_cpu(void)
70{
71 unsigned int cpu = smp_processor_id();
72
73 __cpu_number_map[0] = cpu;
74 __cpu_logical_map[0] = cpu;
75
76 set_cpu_online(cpu, true);
77 set_cpu_possible(cpu, true);
78
79 per_cpu(cpu_state, cpu) = CPU_ONLINE;
80}
81
82#ifdef CONFIG_HOTPLUG_CPU
83void native_cpu_die(unsigned int cpu)
84{
85 unsigned int i;
86
87 for (i = 0; i < 10; i++) {
88 smp_rmb();
89 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
90 if (system_state == SYSTEM_RUNNING)
91 pr_info("CPU %u is now offline\n", cpu);
92
93 return;
94 }
95
96 msleep(100);
97 }
98
99 pr_err("CPU %u didn't die...\n", cpu);
100}
101
102int native_cpu_disable(unsigned int cpu)
103{
104 return cpu == 0 ? -EPERM : 0;
105}
106
107void play_dead_common(void)
108{
109 idle_task_exit();
110 irq_ctx_exit(raw_smp_processor_id());
111 mb();
112
113 __get_cpu_var(cpu_state) = CPU_DEAD;
114 local_irq_disable();
115}
116
117void native_play_dead(void)
118{
119 play_dead_common();
120}
121
122int __cpu_disable(void)
123{
124 unsigned int cpu = smp_processor_id();
125 struct task_struct *p;
126 int ret;
127
128 ret = mp_ops->cpu_disable(cpu);
129 if (ret)
130 return ret;
131
132
133
134
135
136 set_cpu_online(cpu, false);
137
138
139
140
141 migrate_irqs();
142
143
144
145
146 local_timer_stop(cpu);
147
148
149
150
151
152 flush_cache_all();
153 local_flush_tlb_all();
154
155 read_lock(&tasklist_lock);
156 for_each_process(p)
157 if (p->mm)
158 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
159 read_unlock(&tasklist_lock);
160
161 return 0;
162}
163#else
164int native_cpu_disable(unsigned int cpu)
165{
166 return -ENOSYS;
167}
168
169void native_cpu_die(unsigned int cpu)
170{
171
172 BUG();
173}
174
175void native_play_dead(void)
176{
177 BUG();
178}
179#endif
180
181asmlinkage void __cpuinit start_secondary(void)
182{
183 unsigned int cpu = smp_processor_id();
184 struct mm_struct *mm = &init_mm;
185
186 enable_mmu();
187 atomic_inc(&mm->mm_count);
188 atomic_inc(&mm->mm_users);
189 current->active_mm = mm;
190 enter_lazy_tlb(mm, current);
191 local_flush_tlb_all();
192
193 per_cpu_trap_init();
194
195 preempt_disable();
196
197 notify_cpu_starting(cpu);
198
199 local_irq_enable();
200
201
202 local_timer_setup(cpu);
203 calibrate_delay();
204
205 smp_store_cpu_info(cpu);
206
207 set_cpu_online(cpu, true);
208 per_cpu(cpu_state, cpu) = CPU_ONLINE;
209
210 cpu_idle();
211}
212
213extern struct {
214 unsigned long sp;
215 unsigned long bss_start;
216 unsigned long bss_end;
217 void *start_kernel_fn;
218 void *cpu_init_fn;
219 void *thread_info;
220} stack_start;
221
222int __cpuinit __cpu_up(unsigned int cpu)
223{
224 struct task_struct *tsk;
225 unsigned long timeout;
226
227 tsk = cpu_data[cpu].idle;
228 if (!tsk) {
229 tsk = fork_idle(cpu);
230 if (IS_ERR(tsk)) {
231 pr_err("Failed forking idle task for cpu %d\n", cpu);
232 return PTR_ERR(tsk);
233 }
234
235 cpu_data[cpu].idle = tsk;
236 }
237
238 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
239
240
241 stack_start.sp = tsk->thread.sp;
242 stack_start.thread_info = tsk->stack;
243 stack_start.bss_start = 0;
244 stack_start.start_kernel_fn = start_secondary;
245
246 flush_icache_range((unsigned long)&stack_start,
247 (unsigned long)&stack_start + sizeof(stack_start));
248 wmb();
249
250 mp_ops->start_cpu(cpu, (unsigned long)_stext);
251
252 timeout = jiffies + HZ;
253 while (time_before(jiffies, timeout)) {
254 if (cpu_online(cpu))
255 break;
256
257 udelay(10);
258 barrier();
259 }
260
261 if (cpu_online(cpu))
262 return 0;
263
264 return -ENOENT;
265}
266
267void __init smp_cpus_done(unsigned int max_cpus)
268{
269 unsigned long bogosum = 0;
270 int cpu;
271
272 for_each_online_cpu(cpu)
273 bogosum += cpu_data[cpu].loops_per_jiffy;
274
275 printk(KERN_INFO "SMP: Total of %d processors activated "
276 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
277 bogosum / (500000/HZ),
278 (bogosum / (5000/HZ)) % 100);
279}
280
281void smp_send_reschedule(int cpu)
282{
283 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
284}
285
286void smp_send_stop(void)
287{
288 smp_call_function(stop_this_cpu, 0, 0);
289}
290
291void arch_send_call_function_ipi_mask(const struct cpumask *mask)
292{
293 int cpu;
294
295 for_each_cpu(cpu, mask)
296 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
297}
298
299void arch_send_call_function_single_ipi(int cpu)
300{
301 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
302}
303
304void smp_timer_broadcast(const struct cpumask *mask)
305{
306 int cpu;
307
308 for_each_cpu(cpu, mask)
309 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
310}
311
312static void ipi_timer(void)
313{
314 irq_enter();
315 local_timer_interrupt();
316 irq_exit();
317}
318
319void smp_message_recv(unsigned int msg)
320{
321 switch (msg) {
322 case SMP_MSG_FUNCTION:
323 generic_smp_call_function_interrupt();
324 break;
325 case SMP_MSG_RESCHEDULE:
326 break;
327 case SMP_MSG_FUNCTION_SINGLE:
328 generic_smp_call_function_single_interrupt();
329 break;
330 case SMP_MSG_TIMER:
331 ipi_timer();
332 break;
333 default:
334 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
335 smp_processor_id(), __func__, msg);
336 break;
337 }
338}
339
340
341int setup_profiling_timer(unsigned int multiplier)
342{
343 return 0;
344}
345
346static void flush_tlb_all_ipi(void *info)
347{
348 local_flush_tlb_all();
349}
350
351void flush_tlb_all(void)
352{
353 on_each_cpu(flush_tlb_all_ipi, 0, 1);
354}
355
356static void flush_tlb_mm_ipi(void *mm)
357{
358 local_flush_tlb_mm((struct mm_struct *)mm);
359}
360
361
362
363
364
365
366
367
368
369
370
371
372
373void flush_tlb_mm(struct mm_struct *mm)
374{
375 preempt_disable();
376
377 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
378 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
379 } else {
380 int i;
381 for (i = 0; i < num_online_cpus(); i++)
382 if (smp_processor_id() != i)
383 cpu_context(i, mm) = 0;
384 }
385 local_flush_tlb_mm(mm);
386
387 preempt_enable();
388}
389
390struct flush_tlb_data {
391 struct vm_area_struct *vma;
392 unsigned long addr1;
393 unsigned long addr2;
394};
395
396static void flush_tlb_range_ipi(void *info)
397{
398 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
399
400 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
401}
402
403void flush_tlb_range(struct vm_area_struct *vma,
404 unsigned long start, unsigned long end)
405{
406 struct mm_struct *mm = vma->vm_mm;
407
408 preempt_disable();
409 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
410 struct flush_tlb_data fd;
411
412 fd.vma = vma;
413 fd.addr1 = start;
414 fd.addr2 = end;
415 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
416 } else {
417 int i;
418 for (i = 0; i < num_online_cpus(); i++)
419 if (smp_processor_id() != i)
420 cpu_context(i, mm) = 0;
421 }
422 local_flush_tlb_range(vma, start, end);
423 preempt_enable();
424}
425
426static void flush_tlb_kernel_range_ipi(void *info)
427{
428 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
429
430 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
431}
432
433void flush_tlb_kernel_range(unsigned long start, unsigned long end)
434{
435 struct flush_tlb_data fd;
436
437 fd.addr1 = start;
438 fd.addr2 = end;
439 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
440}
441
442static void flush_tlb_page_ipi(void *info)
443{
444 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
445
446 local_flush_tlb_page(fd->vma, fd->addr1);
447}
448
449void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
450{
451 preempt_disable();
452 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
453 (current->mm != vma->vm_mm)) {
454 struct flush_tlb_data fd;
455
456 fd.vma = vma;
457 fd.addr1 = page;
458 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
459 } else {
460 int i;
461 for (i = 0; i < num_online_cpus(); i++)
462 if (smp_processor_id() != i)
463 cpu_context(i, vma->vm_mm) = 0;
464 }
465 local_flush_tlb_page(vma, page);
466 preempt_enable();
467}
468
469static void flush_tlb_one_ipi(void *info)
470{
471 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
472 local_flush_tlb_one(fd->addr1, fd->addr2);
473}
474
475void flush_tlb_one(unsigned long asid, unsigned long vaddr)
476{
477 struct flush_tlb_data fd;
478
479 fd.addr1 = asid;
480 fd.addr2 = vaddr;
481
482 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
483 local_flush_tlb_one(asid, vaddr);
484}
485