1
2
3
4
5
6
7
8
9
10
11
12#include <linux/init.h>
13
14#include <linux/mm.h>
15#include <linux/delay.h>
16#include <linux/spinlock.h>
17#include <linux/smp.h>
18#include <linux/kernel_stat.h>
19#include <linux/mc146818rtc.h>
20#include <linux/interrupt.h>
21
22#include <asm/mtrr.h>
23#include <asm/pgalloc.h>
24#include <asm/tlbflush.h>
25#include <asm/mach_apic.h>
26#include <asm/mmu_context.h>
27#include <asm/proto.h>
28#include <asm/apicdef.h>
29#include <asm/idle.h>
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53union smp_flush_state {
54 struct {
55 cpumask_t flush_cpumask;
56 struct mm_struct *flush_mm;
57 unsigned long flush_va;
58#define FLUSH_ALL -1ULL
59 spinlock_t tlbstate_lock;
60 };
61 char pad[SMP_CACHE_BYTES];
62} ____cacheline_aligned;
63
64
65
66
67static DEFINE_PER_CPU(union smp_flush_state, flush_state);
68
69
70
71
72
73static inline void leave_mm(int cpu)
74{
75 if (read_pda(mmu_state) == TLBSTATE_OK)
76 BUG();
77 cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
78 load_cr3(swapper_pg_dir);
79}
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
130{
131 int cpu;
132 int sender;
133 union smp_flush_state *f;
134
135 cpu = smp_processor_id();
136
137
138
139
140 sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
141 f = &per_cpu(flush_state, sender);
142
143 if (!cpu_isset(cpu, f->flush_cpumask))
144 goto out;
145
146
147
148
149
150
151
152
153
154 if (f->flush_mm == read_pda(active_mm)) {
155 if (read_pda(mmu_state) == TLBSTATE_OK) {
156 if (f->flush_va == FLUSH_ALL)
157 local_flush_tlb();
158 else
159 __flush_tlb_one(f->flush_va);
160 } else
161 leave_mm(cpu);
162 }
163out:
164 ack_APIC_irq();
165 cpu_clear(cpu, f->flush_cpumask);
166 add_pda(irq_tlb_count, 1);
167}
168
169static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
170 unsigned long va)
171{
172 int sender;
173 union smp_flush_state *f;
174
175
176 sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
177 f = &per_cpu(flush_state, sender);
178
179
180
181
182 spin_lock(&f->tlbstate_lock);
183
184 f->flush_mm = mm;
185 f->flush_va = va;
186 cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
187
188
189
190
191
192 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
193
194 while (!cpus_empty(f->flush_cpumask))
195 cpu_relax();
196
197 f->flush_mm = NULL;
198 f->flush_va = 0;
199 spin_unlock(&f->tlbstate_lock);
200}
201
202int __cpuinit init_smp_flush(void)
203{
204 int i;
205 for_each_cpu_mask(i, cpu_possible_map) {
206 spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
207 }
208 return 0;
209}
210
211core_initcall(init_smp_flush);
212
213void flush_tlb_current_task(void)
214{
215 struct mm_struct *mm = current->mm;
216 cpumask_t cpu_mask;
217
218 preempt_disable();
219 cpu_mask = mm->cpu_vm_mask;
220 cpu_clear(smp_processor_id(), cpu_mask);
221
222 local_flush_tlb();
223 if (!cpus_empty(cpu_mask))
224 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
225 preempt_enable();
226}
227EXPORT_SYMBOL(flush_tlb_current_task);
228
229void flush_tlb_mm (struct mm_struct * mm)
230{
231 cpumask_t cpu_mask;
232
233 preempt_disable();
234 cpu_mask = mm->cpu_vm_mask;
235 cpu_clear(smp_processor_id(), cpu_mask);
236
237 if (current->active_mm == mm) {
238 if (current->mm)
239 local_flush_tlb();
240 else
241 leave_mm(smp_processor_id());
242 }
243 if (!cpus_empty(cpu_mask))
244 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
245
246 preempt_enable();
247}
248EXPORT_SYMBOL(flush_tlb_mm);
249
250void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
251{
252 struct mm_struct *mm = vma->vm_mm;
253 cpumask_t cpu_mask;
254
255 preempt_disable();
256 cpu_mask = mm->cpu_vm_mask;
257 cpu_clear(smp_processor_id(), cpu_mask);
258
259 if (current->active_mm == mm) {
260 if(current->mm)
261 __flush_tlb_one(va);
262 else
263 leave_mm(smp_processor_id());
264 }
265
266 if (!cpus_empty(cpu_mask))
267 flush_tlb_others(cpu_mask, mm, va);
268
269 preempt_enable();
270}
271EXPORT_SYMBOL(flush_tlb_page);
272
273static void do_flush_tlb_all(void* info)
274{
275 unsigned long cpu = smp_processor_id();
276
277 __flush_tlb_all();
278 if (read_pda(mmu_state) == TLBSTATE_LAZY)
279 leave_mm(cpu);
280}
281
282void flush_tlb_all(void)
283{
284 on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
285}
286
287
288
289
290
291
292
293void smp_send_reschedule(int cpu)
294{
295 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
296}
297
298
299
300
301
302static DEFINE_SPINLOCK(call_lock);
303
304struct call_data_struct {
305 void (*func) (void *info);
306 void *info;
307 atomic_t started;
308 atomic_t finished;
309 int wait;
310};
311
312static struct call_data_struct * call_data;
313
314void lock_ipi_call_lock(void)
315{
316 spin_lock_irq(&call_lock);
317}
318
319void unlock_ipi_call_lock(void)
320{
321 spin_unlock_irq(&call_lock);
322}
323
324
325
326
327
328
329static int
330__smp_call_function_mask(cpumask_t mask,
331 void (*func)(void *), void *info,
332 int wait)
333{
334 struct call_data_struct data;
335 cpumask_t allbutself;
336 int cpus;
337
338 allbutself = cpu_online_map;
339 cpu_clear(smp_processor_id(), allbutself);
340
341 cpus_and(mask, mask, allbutself);
342 cpus = cpus_weight(mask);
343
344 if (!cpus)
345 return 0;
346
347 data.func = func;
348 data.info = info;
349 atomic_set(&data.started, 0);
350 data.wait = wait;
351 if (wait)
352 atomic_set(&data.finished, 0);
353
354 call_data = &data;
355 wmb();
356
357
358 if (cpus_equal(mask, allbutself))
359 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
360 else
361 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
362
363
364 while (atomic_read(&data.started) != cpus)
365 cpu_relax();
366
367 if (!wait)
368 return 0;
369
370 while (atomic_read(&data.finished) != cpus)
371 cpu_relax();
372
373 return 0;
374}
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390int smp_call_function_mask(cpumask_t mask,
391 void (*func)(void *), void *info,
392 int wait)
393{
394 int ret;
395
396
397 WARN_ON(irqs_disabled());
398
399 spin_lock(&call_lock);
400 ret = __smp_call_function_mask(mask, func, info, wait);
401 spin_unlock(&call_lock);
402 return ret;
403}
404EXPORT_SYMBOL(smp_call_function_mask);
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
420 int nonatomic, int wait)
421{
422
423 int ret;
424 int me = get_cpu();
425
426
427 WARN_ON(irqs_disabled());
428
429 if (cpu == me) {
430 local_irq_disable();
431 func(info);
432 local_irq_enable();
433 put_cpu();
434 return 0;
435 }
436
437 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
438
439 put_cpu();
440 return ret;
441}
442EXPORT_SYMBOL(smp_call_function_single);
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
460 int wait)
461{
462 return smp_call_function_mask(cpu_online_map, func, info, wait);
463}
464EXPORT_SYMBOL(smp_call_function);
465
466static void stop_this_cpu(void *dummy)
467{
468 local_irq_disable();
469
470
471
472 cpu_clear(smp_processor_id(), cpu_online_map);
473 disable_local_APIC();
474 for (;;)
475 halt();
476}
477
478void smp_send_stop(void)
479{
480 int nolock;
481 unsigned long flags;
482
483 if (reboot_force)
484 return;
485
486
487 nolock = !spin_trylock(&call_lock);
488 local_irq_save(flags);
489 __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0);
490 if (!nolock)
491 spin_unlock(&call_lock);
492 disable_local_APIC();
493 local_irq_restore(flags);
494}
495
496
497
498
499
500
501asmlinkage void smp_reschedule_interrupt(void)
502{
503 ack_APIC_irq();
504 add_pda(irq_resched_count, 1);
505}
506
507asmlinkage void smp_call_function_interrupt(void)
508{
509 void (*func) (void *info) = call_data->func;
510 void *info = call_data->info;
511 int wait = call_data->wait;
512
513 ack_APIC_irq();
514
515
516
517
518 mb();
519 atomic_inc(&call_data->started);
520
521
522
523 exit_idle();
524 irq_enter();
525 (*func)(info);
526 add_pda(irq_call_count, 1);
527 irq_exit();
528 if (wait) {
529 mb();
530 atomic_inc(&call_data->finished);
531 }
532}
533
534