1#include <linux/init.h>
2
3#include <linux/mm.h>
4#include <linux/spinlock.h>
5#include <linux/smp.h>
6#include <linux/interrupt.h>
7#include <linux/module.h>
8#include <linux/cpu.h>
9
10#include <asm/tlbflush.h>
11#include <asm/mmu_context.h>
12#include <asm/cache.h>
13#include <asm/apic.h>
14#include <asm/uv/uv.h>
15
16DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
17 = { &init_mm, 0, };
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41union smp_flush_state {
42 struct {
43 struct mm_struct *flush_mm;
44 unsigned long flush_va;
45 raw_spinlock_t tlbstate_lock;
46 DECLARE_BITMAP(flush_cpumask, NR_CPUS);
47 };
48 char pad[INTERNODE_CACHE_BYTES];
49} ____cacheline_internodealigned_in_smp;
50
51
52
53
54static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
55
56static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset);
57
58
59
60
61
62void leave_mm(int cpu)
63{
64 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
65 BUG();
66 cpumask_clear_cpu(cpu,
67 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
68 load_cr3(swapper_pg_dir);
69}
70EXPORT_SYMBOL_GPL(leave_mm);
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127#ifdef CONFIG_X86_64
128asmlinkage
129#endif
130void smp_invalidate_interrupt(struct pt_regs *regs)
131{
132 unsigned int cpu;
133 unsigned int sender;
134 union smp_flush_state *f;
135
136 cpu = smp_processor_id();
137
138
139
140
141 sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
142 f = &flush_state[sender];
143
144 if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
145 goto out;
146
147
148
149
150
151
152
153
154
155 if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
156 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
157 if (f->flush_va == TLB_FLUSH_ALL)
158 local_flush_tlb();
159 else
160 __flush_tlb_one(f->flush_va);
161 } else
162 leave_mm(cpu);
163 }
164out:
165 ack_APIC_irq();
166 smp_mb__before_clear_bit();
167 cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
168 smp_mb__after_clear_bit();
169 inc_irq_stat(irq_tlb_count);
170}
171
172static void flush_tlb_others_ipi(const struct cpumask *cpumask,
173 struct mm_struct *mm, unsigned long va)
174{
175 unsigned int sender;
176 union smp_flush_state *f;
177
178
179 sender = this_cpu_read(tlb_vector_offset);
180 f = &flush_state[sender];
181
182 if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
183 raw_spin_lock(&f->tlbstate_lock);
184
185 f->flush_mm = mm;
186 f->flush_va = va;
187 if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) {
188
189
190
191
192 apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
193 INVALIDATE_TLB_VECTOR_START + sender);
194
195 while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
196 cpu_relax();
197 }
198
199 f->flush_mm = NULL;
200 f->flush_va = 0;
201 if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
202 raw_spin_unlock(&f->tlbstate_lock);
203}
204
205void native_flush_tlb_others(const struct cpumask *cpumask,
206 struct mm_struct *mm, unsigned long va)
207{
208 if (is_uv_system()) {
209 unsigned int cpu;
210
211 cpu = smp_processor_id();
212 cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu);
213 if (cpumask)
214 flush_tlb_others_ipi(cpumask, mm, va);
215 return;
216 }
217 flush_tlb_others_ipi(cpumask, mm, va);
218}
219
220static void __cpuinit calculate_tlb_offset(void)
221{
222 int cpu, node, nr_node_vecs, idx = 0;
223
224
225
226
227
228
229
230
231
232 if (nr_online_nodes > NUM_INVALIDATE_TLB_VECTORS)
233 nr_node_vecs = 1;
234 else
235 nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes;
236
237 for_each_online_node(node) {
238 int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) *
239 nr_node_vecs;
240 int cpu_offset = 0;
241 for_each_cpu(cpu, cpumask_of_node(node)) {
242 per_cpu(tlb_vector_offset, cpu) = node_offset +
243 cpu_offset;
244 cpu_offset++;
245 cpu_offset = cpu_offset % nr_node_vecs;
246 }
247 idx++;
248 }
249}
250
251static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
252 unsigned long action, void *hcpu)
253{
254 switch (action & 0xf) {
255 case CPU_ONLINE:
256 case CPU_DEAD:
257 calculate_tlb_offset();
258 }
259 return NOTIFY_OK;
260}
261
262static int __cpuinit init_smp_flush(void)
263{
264 int i;
265
266 for (i = 0; i < ARRAY_SIZE(flush_state); i++)
267 raw_spin_lock_init(&flush_state[i].tlbstate_lock);
268
269 calculate_tlb_offset();
270 hotcpu_notifier(tlb_cpuhp_notify, 0);
271 return 0;
272}
273core_initcall(init_smp_flush);
274
275void flush_tlb_current_task(void)
276{
277 struct mm_struct *mm = current->mm;
278
279 preempt_disable();
280
281 local_flush_tlb();
282 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
283 flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
284 preempt_enable();
285}
286
287void flush_tlb_mm(struct mm_struct *mm)
288{
289 preempt_disable();
290
291 if (current->active_mm == mm) {
292 if (current->mm)
293 local_flush_tlb();
294 else
295 leave_mm(smp_processor_id());
296 }
297 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
298 flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
299
300 preempt_enable();
301}
302
303void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
304{
305 struct mm_struct *mm = vma->vm_mm;
306
307 preempt_disable();
308
309 if (current->active_mm == mm) {
310 if (current->mm)
311 __flush_tlb_one(va);
312 else
313 leave_mm(smp_processor_id());
314 }
315
316 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
317 flush_tlb_others(mm_cpumask(mm), mm, va);
318
319 preempt_enable();
320}
321
322static void do_flush_tlb_all(void *info)
323{
324 __flush_tlb_all();
325 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
326 leave_mm(smp_processor_id());
327}
328
329void flush_tlb_all(void)
330{
331 on_each_cpu(do_flush_tlb_all, NULL, 1);
332}
333