1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef _ASM_GENERIC__TLB_H
16#define _ASM_GENERIC__TLB_H
17
18#include <linux/mmu_notifier.h>
19#include <linux/swap.h>
20#include <asm/pgalloc.h>
21#include <asm/tlbflush.h>
22
23#ifdef CONFIG_MMU
24
25#ifdef CONFIG_HAVE_RCU_TABLE_FREE
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54struct mmu_table_batch {
55 struct rcu_head rcu;
56 unsigned int nr;
57 void *tables[0];
58};
59
60#define MAX_TABLE_BATCH \
61 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
62
63extern void tlb_table_flush(struct mmu_gather *tlb);
64extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
65
66#endif
67
68
69
70
71
72#define MMU_GATHER_BUNDLE 8
73
74struct mmu_gather_batch {
75 struct mmu_gather_batch *next;
76 unsigned int nr;
77 unsigned int max;
78 struct page *pages[0];
79};
80
81#define MAX_GATHER_BATCH \
82 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
83
84
85
86
87
88
89
90#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
91
92
93
94
95struct mmu_gather {
96 struct mm_struct *mm;
97#ifdef CONFIG_HAVE_RCU_TABLE_FREE
98 struct mmu_table_batch *batch;
99#endif
100 unsigned long start;
101 unsigned long end;
102
103
104
105
106 unsigned int fullmm : 1;
107
108
109
110
111
112 unsigned int need_flush_all : 1;
113
114
115
116
117 unsigned int freed_tables : 1;
118
119
120
121
122 unsigned int cleared_ptes : 1;
123 unsigned int cleared_pmds : 1;
124 unsigned int cleared_puds : 1;
125 unsigned int cleared_p4ds : 1;
126
127 struct mmu_gather_batch *active;
128 struct mmu_gather_batch local;
129 struct page *__pages[MMU_GATHER_BUNDLE];
130 unsigned int batch_count;
131 int page_size;
132};
133
134#define HAVE_GENERIC_MMU_GATHER
135
136void arch_tlb_gather_mmu(struct mmu_gather *tlb,
137 struct mm_struct *mm, unsigned long start, unsigned long end);
138void tlb_flush_mmu(struct mmu_gather *tlb);
139void arch_tlb_finish_mmu(struct mmu_gather *tlb,
140 unsigned long start, unsigned long end, bool force);
141void tlb_flush_mmu_free(struct mmu_gather *tlb);
142extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
143 int page_size);
144
145static inline void __tlb_adjust_range(struct mmu_gather *tlb,
146 unsigned long address,
147 unsigned int range_size)
148{
149 tlb->start = min(tlb->start, address);
150 tlb->end = max(tlb->end, address + range_size);
151}
152
153static inline void __tlb_reset_range(struct mmu_gather *tlb)
154{
155 if (tlb->fullmm) {
156 tlb->start = tlb->end = ~0;
157 } else {
158 tlb->start = TASK_SIZE;
159 tlb->end = 0;
160 }
161 tlb->freed_tables = 0;
162 tlb->cleared_ptes = 0;
163 tlb->cleared_pmds = 0;
164 tlb->cleared_puds = 0;
165 tlb->cleared_p4ds = 0;
166}
167
168static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
169{
170 if (!tlb->end)
171 return;
172
173 tlb_flush(tlb);
174 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
175 __tlb_reset_range(tlb);
176}
177
178static inline void tlb_remove_page_size(struct mmu_gather *tlb,
179 struct page *page, int page_size)
180{
181 if (__tlb_remove_page_size(tlb, page, page_size))
182 tlb_flush_mmu(tlb);
183}
184
185static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
186{
187 return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
188}
189
190
191
192
193
194static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
195{
196 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
197}
198
199#ifndef tlb_remove_check_page_size_change
200#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
201static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
202 unsigned int page_size)
203{
204
205
206
207
208
209#ifdef CONFIG_DEBUG_VM
210 tlb->page_size = page_size;
211#endif
212}
213#endif
214
215static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
216{
217 if (tlb->cleared_ptes)
218 return PAGE_SHIFT;
219 if (tlb->cleared_pmds)
220 return PMD_SHIFT;
221 if (tlb->cleared_puds)
222 return PUD_SHIFT;
223 if (tlb->cleared_p4ds)
224 return P4D_SHIFT;
225
226 return PAGE_SHIFT;
227}
228
229static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
230{
231 return 1UL << tlb_get_unmap_shift(tlb);
232}
233
234
235
236
237
238
239#ifndef tlb_start_vma
240#define tlb_start_vma(tlb, vma) do { } while (0)
241#endif
242
243#define __tlb_end_vma(tlb, vma) \
244 do { \
245 if (!tlb->fullmm) \
246 tlb_flush_mmu_tlbonly(tlb); \
247 } while (0)
248
249#ifndef tlb_end_vma
250#define tlb_end_vma __tlb_end_vma
251#endif
252
253#ifndef __tlb_remove_tlb_entry
254#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
255#endif
256
257
258
259
260
261
262
263
264#define tlb_remove_tlb_entry(tlb, ptep, address) \
265 do { \
266 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
267 tlb->cleared_ptes = 1; \
268 __tlb_remove_tlb_entry(tlb, ptep, address); \
269 } while (0)
270
271#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
272 do { \
273 unsigned long _sz = huge_page_size(h); \
274 __tlb_adjust_range(tlb, address, _sz); \
275 if (_sz == PMD_SIZE) \
276 tlb->cleared_pmds = 1; \
277 else if (_sz == PUD_SIZE) \
278 tlb->cleared_puds = 1; \
279 __tlb_remove_tlb_entry(tlb, ptep, address); \
280 } while (0)
281
282
283
284
285
286#ifndef __tlb_remove_pmd_tlb_entry
287#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
288#endif
289
290#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
291 do { \
292 __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
293 tlb->cleared_pmds = 1; \
294 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
295 } while (0)
296
297
298
299
300
301#ifndef __tlb_remove_pud_tlb_entry
302#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
303#endif
304
305#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
306 do { \
307 __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
308 tlb->cleared_puds = 1; \
309 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
310 } while (0)
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330#ifndef pte_free_tlb
331#define pte_free_tlb(tlb, ptep, address) \
332 do { \
333 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
334 tlb->freed_tables = 1; \
335 tlb->cleared_pmds = 1; \
336 __pte_free_tlb(tlb, ptep, address); \
337 } while (0)
338#endif
339
340#ifndef pmd_free_tlb
341#define pmd_free_tlb(tlb, pmdp, address) \
342 do { \
343 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
344 tlb->freed_tables = 1; \
345 tlb->cleared_puds = 1; \
346 __pmd_free_tlb(tlb, pmdp, address); \
347 } while (0)
348#endif
349
350#ifndef __ARCH_HAS_4LEVEL_HACK
351#ifndef pud_free_tlb
352#define pud_free_tlb(tlb, pudp, address) \
353 do { \
354 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
355 tlb->freed_tables = 1; \
356 tlb->cleared_p4ds = 1; \
357 __pud_free_tlb(tlb, pudp, address); \
358 } while (0)
359#endif
360#endif
361
362#ifndef __ARCH_HAS_5LEVEL_HACK
363#ifndef p4d_free_tlb
364#define p4d_free_tlb(tlb, pudp, address) \
365 do { \
366 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
367 tlb->freed_tables = 1; \
368 __p4d_free_tlb(tlb, pudp, address); \
369 } while (0)
370#endif
371#endif
372
373#endif
374
375#define tlb_migrate_finish(mm) do {} while (0)
376
377#endif
378