1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/cache.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/smp.h>
26#include <linux/spinlock.h>
27#include <linux/threads.h>
28#include <linux/module.h>
29#include <linux/time.h>
30#include <linux/timex.h>
31#include <linux/sched.h>
32#include <linux/cpumask.h>
33#include <linux/cpu.h>
34#include <linux/err.h>
35
36#include <asm/atomic.h>
37#include <asm/cpu.h>
38#include <asm/processor.h>
39#include <asm/r4k-timer.h>
40#include <asm/system.h>
41#include <asm/mmu_context.h>
42#include <asm/time.h>
43
44#ifdef CONFIG_MIPS_MT_SMTC
45#include <asm/mipsmtregs.h>
46#endif
47
48volatile cpumask_t cpu_callin_map;
49int __cpu_number_map[NR_CPUS];
50int __cpu_logical_map[NR_CPUS];
51
52
53int smp_num_siblings = 1;
54EXPORT_SYMBOL(smp_num_siblings);
55
56
57cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
58EXPORT_SYMBOL(cpu_sibling_map);
59
60
61static cpumask_t cpu_sibling_setup_map;
62
63static inline void set_cpu_sibling_map(int cpu)
64{
65 int i;
66
67 cpu_set(cpu, cpu_sibling_setup_map);
68
69 if (smp_num_siblings > 1) {
70 for_each_cpu_mask(i, cpu_sibling_setup_map) {
71 if (cpu_data[cpu].core == cpu_data[i].core) {
72 cpu_set(i, cpu_sibling_map[cpu]);
73 cpu_set(cpu, cpu_sibling_map[i]);
74 }
75 }
76 } else
77 cpu_set(cpu, cpu_sibling_map[cpu]);
78}
79
80struct plat_smp_ops *mp_ops;
81
82__cpuinit void register_smp_ops(struct plat_smp_ops *ops)
83{
84 if (mp_ops)
85 printk(KERN_WARNING "Overriding previously set SMP ops\n");
86
87 mp_ops = ops;
88}
89
90
91
92
93
94asmlinkage __cpuinit void start_secondary(void)
95{
96 unsigned int cpu;
97
98#ifdef CONFIG_MIPS_MT_SMTC
99
100 if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
101#endif
102 cpu_probe();
103 cpu_report();
104 per_cpu_trap_init();
105 mips_clockevent_init();
106 mp_ops->init_secondary();
107
108
109
110
111
112
113 calibrate_delay();
114 preempt_disable();
115 cpu = smp_processor_id();
116 cpu_data[cpu].udelay_val = loops_per_jiffy;
117
118 notify_cpu_starting(cpu);
119
120 mp_ops->smp_finish();
121 set_cpu_sibling_map(cpu);
122
123 cpu_set(cpu, cpu_callin_map);
124
125 synchronise_count_slave();
126
127 cpu_idle();
128}
129
130
131
132
133void smp_call_function_interrupt(void)
134{
135 irq_enter();
136 generic_smp_call_function_single_interrupt();
137 generic_smp_call_function_interrupt();
138 irq_exit();
139}
140
141static void stop_this_cpu(void *dummy)
142{
143
144
145
146 cpu_clear(smp_processor_id(), cpu_online_map);
147 for (;;) {
148 if (cpu_wait)
149 (*cpu_wait)();
150 }
151}
152
153void smp_send_stop(void)
154{
155 smp_call_function(stop_this_cpu, NULL, 0);
156}
157
158void __init smp_cpus_done(unsigned int max_cpus)
159{
160 mp_ops->cpus_done();
161 synchronise_count_master();
162}
163
164
165void __init smp_prepare_cpus(unsigned int max_cpus)
166{
167 init_new_context(current, &init_mm);
168 current_thread_info()->cpu = 0;
169 mp_ops->prepare_cpus(max_cpus);
170 set_cpu_sibling_map(0);
171#ifndef CONFIG_HOTPLUG_CPU
172 init_cpu_present(&cpu_possible_map);
173#endif
174}
175
176
177void __devinit smp_prepare_boot_cpu(void)
178{
179 set_cpu_possible(0, true);
180 set_cpu_online(0, true);
181 cpu_set(0, cpu_callin_map);
182}
183
184
185
186
187
188
189static struct task_struct *cpu_idle_thread[NR_CPUS];
190
191int __cpuinit __cpu_up(unsigned int cpu)
192{
193 struct task_struct *idle;
194
195
196
197
198
199
200 if (!cpu_idle_thread[cpu]) {
201 idle = fork_idle(cpu);
202 cpu_idle_thread[cpu] = idle;
203
204 if (IS_ERR(idle))
205 panic(KERN_ERR "Fork failed for CPU %d", cpu);
206 } else {
207 idle = cpu_idle_thread[cpu];
208 init_idle(idle, cpu);
209 }
210
211 mp_ops->boot_secondary(cpu, idle);
212
213
214
215
216 while (!cpu_isset(cpu, cpu_callin_map))
217 udelay(100);
218
219 cpu_set(cpu, cpu_online_map);
220
221 return 0;
222}
223
224
225int setup_profiling_timer(unsigned int multiplier)
226{
227 return 0;
228}
229
230static void flush_tlb_all_ipi(void *info)
231{
232 local_flush_tlb_all();
233}
234
235void flush_tlb_all(void)
236{
237 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
238}
239
240static void flush_tlb_mm_ipi(void *mm)
241{
242 local_flush_tlb_mm((struct mm_struct *)mm);
243}
244
245
246
247
248
249
250
251
252
253
254static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
255{
256#ifndef CONFIG_MIPS_MT_SMTC
257 smp_call_function(func, info, 1);
258#endif
259}
260
261static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
262{
263 preempt_disable();
264
265 smp_on_other_tlbs(func, info);
266 func(info);
267
268 preempt_enable();
269}
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284void flush_tlb_mm(struct mm_struct *mm)
285{
286 preempt_disable();
287
288 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
289 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
290 } else {
291 cpumask_t mask = cpu_online_map;
292 unsigned int cpu;
293
294 cpu_clear(smp_processor_id(), mask);
295 for_each_cpu_mask(cpu, mask)
296 if (cpu_context(cpu, mm))
297 cpu_context(cpu, mm) = 0;
298 }
299 local_flush_tlb_mm(mm);
300
301 preempt_enable();
302}
303
304struct flush_tlb_data {
305 struct vm_area_struct *vma;
306 unsigned long addr1;
307 unsigned long addr2;
308};
309
310static void flush_tlb_range_ipi(void *info)
311{
312 struct flush_tlb_data *fd = info;
313
314 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
315}
316
317void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
318{
319 struct mm_struct *mm = vma->vm_mm;
320
321 preempt_disable();
322 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
323 struct flush_tlb_data fd = {
324 .vma = vma,
325 .addr1 = start,
326 .addr2 = end,
327 };
328
329 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
330 } else {
331 cpumask_t mask = cpu_online_map;
332 unsigned int cpu;
333
334 cpu_clear(smp_processor_id(), mask);
335 for_each_cpu_mask(cpu, mask)
336 if (cpu_context(cpu, mm))
337 cpu_context(cpu, mm) = 0;
338 }
339 local_flush_tlb_range(vma, start, end);
340 preempt_enable();
341}
342
343static void flush_tlb_kernel_range_ipi(void *info)
344{
345 struct flush_tlb_data *fd = info;
346
347 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
348}
349
350void flush_tlb_kernel_range(unsigned long start, unsigned long end)
351{
352 struct flush_tlb_data fd = {
353 .addr1 = start,
354 .addr2 = end,
355 };
356
357 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
358}
359
360static void flush_tlb_page_ipi(void *info)
361{
362 struct flush_tlb_data *fd = info;
363
364 local_flush_tlb_page(fd->vma, fd->addr1);
365}
366
367void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
368{
369 preempt_disable();
370 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
371 struct flush_tlb_data fd = {
372 .vma = vma,
373 .addr1 = page,
374 };
375
376 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
377 } else {
378 cpumask_t mask = cpu_online_map;
379 unsigned int cpu;
380
381 cpu_clear(smp_processor_id(), mask);
382 for_each_cpu_mask(cpu, mask)
383 if (cpu_context(cpu, vma->vm_mm))
384 cpu_context(cpu, vma->vm_mm) = 0;
385 }
386 local_flush_tlb_page(vma, page);
387 preempt_enable();
388}
389
390static void flush_tlb_one_ipi(void *info)
391{
392 unsigned long vaddr = (unsigned long) info;
393
394 local_flush_tlb_one(vaddr);
395}
396
397void flush_tlb_one(unsigned long vaddr)
398{
399 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
400}
401
402EXPORT_SYMBOL(flush_tlb_page);
403EXPORT_SYMBOL(flush_tlb_one);
404