1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/interrupt.h>
16#include <linux/spinlock.h>
17#include <linux/init.h>
18#include <linux/jiffies.h>
19#include <linux/cpumask.h>
20#include <linux/err.h>
21#include <linux/kernel.h>
22#include <linux/delay.h>
23#include <linux/sched.h>
24#include <linux/profile.h>
25#include <linux/smp.h>
26#include <asm/tlbflush.h>
27#include <asm/bitops.h>
28#include <asm/processor.h>
29#include <asm/bug.h>
30#include <asm/exceptions.h>
31#include <asm/hardirq.h>
32#include <asm/fpu.h>
33#include <asm/mmu_context.h>
34#include <asm/thread_info.h>
35#include <asm/cpu-regs.h>
36#include <asm/intctl-regs.h>
37
38
39
40
41#define FLUSH_ALL 0xffffffff
42
43static cpumask_t flush_cpumask;
44static struct mm_struct *flush_mm;
45static unsigned long flush_va;
46static DEFINE_SPINLOCK(tlbstate_lock);
47
48DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
49 &init_mm, 0
50};
51
52static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
53 unsigned long va);
54static void do_flush_tlb_all(void *info);
55
56
57
58
59
60void smp_flush_tlb(void *unused)
61{
62 unsigned long cpu_id;
63
64 cpu_id = get_cpu();
65
66 if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
67
68
69
70
71
72
73
74 goto out;
75
76 if (flush_va == FLUSH_ALL)
77 local_flush_tlb();
78 else
79 local_flush_tlb_page(flush_mm, flush_va);
80
81 smp_mb__before_clear_bit();
82 cpumask_clear_cpu(cpu_id, &flush_cpumask);
83 smp_mb__after_clear_bit();
84out:
85 put_cpu();
86}
87
88
89
90
91
92
93
94static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
95 unsigned long va)
96{
97 cpumask_t tmp;
98
99
100
101
102
103
104 BUG_ON(!mm);
105 BUG_ON(cpumask_empty(&cpumask));
106 BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
107
108 cpumask_and(&tmp, &cpumask, cpu_online_mask);
109 BUG_ON(!cpumask_equal(&cpumask, &tmp));
110
111
112
113
114
115
116
117 spin_lock(&tlbstate_lock);
118
119 flush_mm = mm;
120 flush_va = va;
121#if NR_CPUS <= BITS_PER_LONG
122 atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
123#else
124#error Not supported.
125#endif
126
127
128 smp_call_function(smp_flush_tlb, NULL, 1);
129
130 while (!cpumask_empty(&flush_cpumask))
131
132 smp_mb();
133
134 flush_mm = NULL;
135 flush_va = 0;
136 spin_unlock(&tlbstate_lock);
137}
138
139
140
141
142
143void flush_tlb_mm(struct mm_struct *mm)
144{
145 cpumask_t cpu_mask;
146
147 preempt_disable();
148 cpumask_copy(&cpu_mask, mm_cpumask(mm));
149 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
150
151 local_flush_tlb();
152 if (!cpumask_empty(&cpu_mask))
153 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
154
155 preempt_enable();
156}
157
158
159
160
161void flush_tlb_current_task(void)
162{
163 struct mm_struct *mm = current->mm;
164 cpumask_t cpu_mask;
165
166 preempt_disable();
167 cpumask_copy(&cpu_mask, mm_cpumask(mm));
168 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
169
170 local_flush_tlb();
171 if (!cpumask_empty(&cpu_mask))
172 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
173
174 preempt_enable();
175}
176
177
178
179
180
181
182void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
183{
184 struct mm_struct *mm = vma->vm_mm;
185 cpumask_t cpu_mask;
186
187 preempt_disable();
188 cpumask_copy(&cpu_mask, mm_cpumask(mm));
189 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
190
191 local_flush_tlb_page(mm, va);
192 if (!cpumask_empty(&cpu_mask))
193 flush_tlb_others(cpu_mask, mm, va);
194
195 preempt_enable();
196}
197
198
199
200
201
202static void do_flush_tlb_all(void *unused)
203{
204 local_flush_tlb_all();
205}
206
207
208
209
210void flush_tlb_all(void)
211{
212 on_each_cpu(do_flush_tlb_all, 0, 1);
213}
214