1
2
3
4
5
6
7
8
9
10#include <linux/preempt.h>
11#include <linux/smp.h>
12
13#include <asm/smp_plat.h>
14#include <asm/tlbflush.h>
15
16
17
18
19
20
21struct tlb_args {
22 struct vm_area_struct *ta_vma;
23 unsigned long ta_start;
24 unsigned long ta_end;
25};
26
27static inline void ipi_flush_tlb_all(void *ignored)
28{
29 local_flush_tlb_all();
30}
31
32static inline void ipi_flush_tlb_mm(void *arg)
33{
34 struct mm_struct *mm = (struct mm_struct *)arg;
35
36 local_flush_tlb_mm(mm);
37}
38
39static inline void ipi_flush_tlb_page(void *arg)
40{
41 struct tlb_args *ta = (struct tlb_args *)arg;
42
43 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
44}
45
46static inline void ipi_flush_tlb_kernel_page(void *arg)
47{
48 struct tlb_args *ta = (struct tlb_args *)arg;
49
50 local_flush_tlb_kernel_page(ta->ta_start);
51}
52
53static inline void ipi_flush_tlb_range(void *arg)
54{
55 struct tlb_args *ta = (struct tlb_args *)arg;
56
57 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
58}
59
60static inline void ipi_flush_tlb_kernel_range(void *arg)
61{
62 struct tlb_args *ta = (struct tlb_args *)arg;
63
64 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
65}
66
67void flush_tlb_all(void)
68{
69 if (tlb_ops_need_broadcast())
70 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
71 else
72 local_flush_tlb_all();
73}
74
75void flush_tlb_mm(struct mm_struct *mm)
76{
77 if (tlb_ops_need_broadcast())
78 on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
79 else
80 local_flush_tlb_mm(mm);
81}
82
83void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
84{
85 if (tlb_ops_need_broadcast()) {
86 struct tlb_args ta;
87 ta.ta_vma = vma;
88 ta.ta_start = uaddr;
89 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
90 &ta, 1);
91 } else
92 local_flush_tlb_page(vma, uaddr);
93}
94
95void flush_tlb_kernel_page(unsigned long kaddr)
96{
97 if (tlb_ops_need_broadcast()) {
98 struct tlb_args ta;
99 ta.ta_start = kaddr;
100 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
101 } else
102 local_flush_tlb_kernel_page(kaddr);
103}
104
105void flush_tlb_range(struct vm_area_struct *vma,
106 unsigned long start, unsigned long end)
107{
108 if (tlb_ops_need_broadcast()) {
109 struct tlb_args ta;
110 ta.ta_vma = vma;
111 ta.ta_start = start;
112 ta.ta_end = end;
113 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range,
114 &ta, 1);
115 } else
116 local_flush_tlb_range(vma, start, end);
117}
118
119void flush_tlb_kernel_range(unsigned long start, unsigned long end)
120{
121 if (tlb_ops_need_broadcast()) {
122 struct tlb_args ta;
123 ta.ta_start = start;
124 ta.ta_end = end;
125 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
126 } else
127 local_flush_tlb_kernel_range(start, end);
128}
129
130