1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef __ASM_TLBFLUSH_H
20#define __ASM_TLBFLUSH_H
21
22#ifndef __ASSEMBLY__
23
24#include <linux/sched.h>
25#include <asm/cputype.h>
26
27
28
29
30
31
32
33
34
35
36
37
38
39#define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \
40 ALTERNATIVE("nop\n nop", \
41 "dsb ish\n tlbi " #op, \
42 ARM64_WORKAROUND_REPEAT_TLBI, \
43 CONFIG_QCOM_FALKOR_ERRATUM_1009) \
44 : : )
45
46#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \
47 ALTERNATIVE("nop\n nop", \
48 "dsb ish\n tlbi " #op ", %0", \
49 ARM64_WORKAROUND_REPEAT_TLBI, \
50 CONFIG_QCOM_FALKOR_ERRATUM_1009) \
51 : : "r" (arg))
52
53#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
54
55#define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96static inline void local_flush_tlb_all(void)
97{
98 dsb(nshst);
99 __tlbi(vmalle1);
100 dsb(nsh);
101 isb();
102}
103
104static inline void flush_tlb_all(void)
105{
106 dsb(ishst);
107 __tlbi(vmalle1is);
108 dsb(ish);
109 isb();
110}
111
112static inline void flush_tlb_mm(struct mm_struct *mm)
113{
114 unsigned long asid = ASID(mm) << 48;
115
116 dsb(ishst);
117 __tlbi(aside1is, asid);
118 dsb(ish);
119}
120
121static inline void flush_tlb_page(struct vm_area_struct *vma,
122 unsigned long uaddr)
123{
124 unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
125
126 dsb(ishst);
127 __tlbi(vale1is, addr);
128 dsb(ish);
129}
130
131
132
133
134
135#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
136
137static inline void __flush_tlb_range(struct vm_area_struct *vma,
138 unsigned long start, unsigned long end,
139 bool last_level)
140{
141 unsigned long asid = ASID(vma->vm_mm) << 48;
142 unsigned long addr;
143
144 if ((end - start) > MAX_TLB_RANGE) {
145 flush_tlb_mm(vma->vm_mm);
146 return;
147 }
148
149 start = asid | (start >> 12);
150 end = asid | (end >> 12);
151
152 dsb(ishst);
153 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
154 if (last_level)
155 __tlbi(vale1is, addr);
156 else
157 __tlbi(vae1is, addr);
158 }
159 dsb(ish);
160}
161
162static inline void flush_tlb_range(struct vm_area_struct *vma,
163 unsigned long start, unsigned long end)
164{
165 __flush_tlb_range(vma, start, end, false);
166}
167
168static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
169{
170 unsigned long addr;
171
172 if ((end - start) > MAX_TLB_RANGE) {
173 flush_tlb_all();
174 return;
175 }
176
177 start >>= 12;
178 end >>= 12;
179
180 dsb(ishst);
181 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
182 __tlbi(vaae1is, addr);
183 dsb(ish);
184 isb();
185}
186
187
188
189
190
191static inline void __flush_tlb_pgtable(struct mm_struct *mm,
192 unsigned long uaddr)
193{
194 unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
195
196 __tlbi(vae1is, addr);
197 dsb(ish);
198}
199
200#endif
201
202#endif
203