1#include <linux/init.h>
2
3#include <linux/mm.h>
4#include <linux/spinlock.h>
5#include <linux/smp.h>
6#include <linux/interrupt.h>
7#include <linux/export.h>
8#include <linux/cpu.h>
9
10#include <asm/tlbflush.h>
11#include <asm/mmu_context.h>
12#include <asm/cache.h>
13#include <asm/apic.h>
14#include <asm/uv/uv.h>
15#include <linux/debugfs.h>
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#ifdef CONFIG_SMP
32
33struct flush_tlb_info {
34 struct mm_struct *flush_mm;
35 unsigned long flush_start;
36 unsigned long flush_end;
37};
38
39
40
41
42
43void leave_mm(int cpu)
44{
45 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
46 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
47 BUG();
48 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
49 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
50 load_cr3(swapper_pg_dir);
51
52
53
54
55
56
57 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
58 }
59}
60EXPORT_SYMBOL_GPL(leave_mm);
61
62#endif
63
64void switch_mm(struct mm_struct *prev, struct mm_struct *next,
65 struct task_struct *tsk)
66{
67 unsigned long flags;
68
69 local_irq_save(flags);
70 switch_mm_irqs_off(prev, next, tsk);
71 local_irq_restore(flags);
72}
73
74void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
75 struct task_struct *tsk)
76{
77 unsigned cpu = smp_processor_id();
78
79 if (likely(prev != next)) {
80 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
81
82
83
84
85
86 unsigned int stack_pgd_index = pgd_index(current_stack_pointer());
87
88 pgd_t *pgd = next->pgd + stack_pgd_index;
89
90 if (unlikely(pgd_none(*pgd)))
91 set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
92 }
93
94#ifdef CONFIG_SMP
95 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
96 this_cpu_write(cpu_tlbstate.active_mm, next);
97#endif
98
99 cpumask_set_cpu(cpu, mm_cpumask(next));
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129 load_cr3(next->pgd);
130
131 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
132
133
134 cpumask_clear_cpu(cpu, mm_cpumask(prev));
135
136
137 load_mm_cr4(next);
138
139#ifdef CONFIG_MODIFY_LDT_SYSCALL
140
141
142
143
144
145
146
147
148
149
150
151
152 if (unlikely(prev->context.ldt != next->context.ldt))
153 load_mm_ldt(next);
154#endif
155 }
156#ifdef CONFIG_SMP
157 else {
158 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
159 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
160
161 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
162
163
164
165
166
167
168 cpumask_set_cpu(cpu, mm_cpumask(next));
169
170
171
172
173
174
175
176
177
178 load_cr3(next->pgd);
179 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
180 load_mm_cr4(next);
181 load_mm_ldt(next);
182 }
183 }
184#endif
185}
186
187#ifdef CONFIG_SMP
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230static void flush_tlb_func(void *info)
231{
232 struct flush_tlb_info *f = info;
233
234 inc_irq_stat(irq_tlb_count);
235
236 if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
237 return;
238
239 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
240 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
241 if (f->flush_end == TLB_FLUSH_ALL) {
242 local_flush_tlb();
243 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
244 } else {
245 unsigned long addr;
246 unsigned long nr_pages =
247 (f->flush_end - f->flush_start) / PAGE_SIZE;
248 addr = f->flush_start;
249 while (addr < f->flush_end) {
250 __flush_tlb_single(addr);
251 addr += PAGE_SIZE;
252 }
253 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
254 }
255 } else
256 leave_mm(smp_processor_id());
257
258}
259
260void native_flush_tlb_others(const struct cpumask *cpumask,
261 struct mm_struct *mm, unsigned long start,
262 unsigned long end)
263{
264 struct flush_tlb_info info;
265
266 if (end == 0)
267 end = start + PAGE_SIZE;
268 info.flush_mm = mm;
269 info.flush_start = start;
270 info.flush_end = end;
271
272 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
273 if (end == TLB_FLUSH_ALL)
274 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
275 else
276 trace_tlb_flush(TLB_REMOTE_SEND_IPI,
277 (end - start) >> PAGE_SHIFT);
278
279 if (is_uv_system()) {
280 unsigned int cpu;
281
282 cpu = smp_processor_id();
283 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
284 if (cpumask)
285 smp_call_function_many(cpumask, flush_tlb_func,
286 &info, 1);
287 return;
288 }
289 smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
290}
291
292void flush_tlb_current_task(void)
293{
294 struct mm_struct *mm = current->mm;
295
296 preempt_disable();
297
298 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
299
300
301 local_flush_tlb();
302
303 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
304 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
305 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
306 preempt_enable();
307}
308
309
310
311
312
313
314
315
316
317
318
319static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
320
321void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
322 unsigned long end, unsigned long vmflag)
323{
324 unsigned long addr;
325
326 unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
327
328 preempt_disable();
329 if (current->active_mm != mm) {
330
331 smp_mb();
332
333 goto out;
334 }
335
336 if (!current->mm) {
337 leave_mm(smp_processor_id());
338
339
340 smp_mb();
341
342 goto out;
343 }
344
345 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
346 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
347
348
349
350
351
352 if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
353 base_pages_to_flush = TLB_FLUSH_ALL;
354 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
355 local_flush_tlb();
356 } else {
357
358 for (addr = start; addr < end; addr += PAGE_SIZE) {
359 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
360 __flush_tlb_single(addr);
361 }
362 }
363 trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
364out:
365 if (base_pages_to_flush == TLB_FLUSH_ALL) {
366 start = 0UL;
367 end = TLB_FLUSH_ALL;
368 }
369 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
370 flush_tlb_others(mm_cpumask(mm), mm, start, end);
371 preempt_enable();
372}
373
374void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
375{
376 struct mm_struct *mm = vma->vm_mm;
377
378 preempt_disable();
379
380 if (current->active_mm == mm) {
381 if (current->mm) {
382
383
384
385
386 __flush_tlb_one(start);
387 } else {
388 leave_mm(smp_processor_id());
389
390
391 smp_mb();
392 }
393 }
394
395 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
396 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
397
398 preempt_enable();
399}
400
401static void do_flush_tlb_all(void *info)
402{
403 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
404 __flush_tlb_all();
405 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
406 leave_mm(smp_processor_id());
407}
408
409void flush_tlb_all(void)
410{
411 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
412 on_each_cpu(do_flush_tlb_all, NULL, 1);
413}
414
415static void do_kernel_range_flush(void *info)
416{
417 struct flush_tlb_info *f = info;
418 unsigned long addr;
419
420
421 for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
422 __flush_tlb_single(addr);
423}
424
425void flush_tlb_kernel_range(unsigned long start, unsigned long end)
426{
427
428
429 if (end == TLB_FLUSH_ALL ||
430 (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
431 on_each_cpu(do_flush_tlb_all, NULL, 1);
432 } else {
433 struct flush_tlb_info info;
434 info.flush_start = start;
435 info.flush_end = end;
436 on_each_cpu(do_kernel_range_flush, &info, 1);
437 }
438}
439
440static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
441 size_t count, loff_t *ppos)
442{
443 char buf[32];
444 unsigned int len;
445
446 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
447 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
448}
449
450static ssize_t tlbflush_write_file(struct file *file,
451 const char __user *user_buf, size_t count, loff_t *ppos)
452{
453 char buf[32];
454 ssize_t len;
455 int ceiling;
456
457 len = min(count, sizeof(buf) - 1);
458 if (copy_from_user(buf, user_buf, len))
459 return -EFAULT;
460
461 buf[len] = '\0';
462 if (kstrtoint(buf, 0, &ceiling))
463 return -EINVAL;
464
465 if (ceiling < 0)
466 return -EINVAL;
467
468 tlb_single_page_flush_ceiling = ceiling;
469 return count;
470}
471
472static const struct file_operations fops_tlbflush = {
473 .read = tlbflush_read_file,
474 .write = tlbflush_write_file,
475 .llseek = default_llseek,
476};
477
478static int __init create_tlb_single_page_flush_ceiling(void)
479{
480 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
481 arch_debugfs_dir, NULL, &fops_tlbflush);
482 return 0;
483}
484late_initcall(create_tlb_single_page_flush_ceiling);
485
486#endif
487