1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef _ASM_GENERIC__TLB_H
16#define _ASM_GENERIC__TLB_H
17
18#include <linux/mmu_notifier.h>
19#include <linux/swap.h>
20#include <asm/pgalloc.h>
21#include <asm/tlbflush.h>
22
23
24
25
26
27
28#ifndef nmi_uaccess_okay
29# define nmi_uaccess_okay() true
30#endif
31
32#ifdef CONFIG_MMU
33
34#ifdef CONFIG_HAVE_RCU_TABLE_FREE
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63struct mmu_table_batch {
64 struct rcu_head rcu;
65 unsigned int nr;
66 void *tables[0];
67};
68
69#define MAX_TABLE_BATCH \
70 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
71
72extern void tlb_table_flush(struct mmu_gather *tlb);
73extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
74
75#endif
76
77
78
79
80
81#define MMU_GATHER_BUNDLE 8
82
83struct mmu_gather_batch {
84 struct mmu_gather_batch *next;
85 unsigned int nr;
86 unsigned int max;
87 struct page *pages[0];
88};
89
90#define MAX_GATHER_BATCH \
91 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
92
93
94
95
96
97
98
99#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
100
101
102
103
104struct mmu_gather {
105 struct mm_struct *mm;
106#ifdef CONFIG_HAVE_RCU_TABLE_FREE
107 struct mmu_table_batch *batch;
108#endif
109 unsigned long start;
110 unsigned long end;
111
112
113 unsigned int fullmm : 1,
114
115
116 need_flush_all : 1;
117
118 struct mmu_gather_batch *active;
119 struct mmu_gather_batch local;
120 struct page *__pages[MMU_GATHER_BUNDLE];
121 unsigned int batch_count;
122 int page_size;
123};
124
125#define HAVE_GENERIC_MMU_GATHER
126
127void arch_tlb_gather_mmu(struct mmu_gather *tlb,
128 struct mm_struct *mm, unsigned long start, unsigned long end);
129void tlb_flush_mmu(struct mmu_gather *tlb);
130void arch_tlb_finish_mmu(struct mmu_gather *tlb,
131 unsigned long start, unsigned long end, bool force);
132extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
133 int page_size);
134
135static inline void __tlb_adjust_range(struct mmu_gather *tlb,
136 unsigned long address,
137 unsigned int range_size)
138{
139 tlb->start = min(tlb->start, address);
140 tlb->end = max(tlb->end, address + range_size);
141}
142
143static inline void __tlb_reset_range(struct mmu_gather *tlb)
144{
145 if (tlb->fullmm) {
146 tlb->start = tlb->end = ~0;
147 } else {
148 tlb->start = TASK_SIZE;
149 tlb->end = 0;
150 }
151}
152
153static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
154{
155 if (!tlb->end)
156 return;
157
158 tlb_flush(tlb);
159 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
160 __tlb_reset_range(tlb);
161}
162
163static inline void tlb_remove_page_size(struct mmu_gather *tlb,
164 struct page *page, int page_size)
165{
166 if (__tlb_remove_page_size(tlb, page, page_size))
167 tlb_flush_mmu(tlb);
168}
169
170static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
171{
172 return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
173}
174
175
176
177
178
179static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
180{
181 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
182}
183
184#ifndef tlb_remove_check_page_size_change
185#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
186static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
187 unsigned int page_size)
188{
189
190
191
192
193
194#ifdef CONFIG_DEBUG_VM
195 tlb->page_size = page_size;
196#endif
197}
198#endif
199
200
201
202
203
204
205#ifndef tlb_start_vma
206#define tlb_start_vma(tlb, vma) do { } while (0)
207#endif
208
209#define __tlb_end_vma(tlb, vma) \
210 do { \
211 if (!tlb->fullmm) \
212 tlb_flush_mmu_tlbonly(tlb); \
213 } while (0)
214
215#ifndef tlb_end_vma
216#define tlb_end_vma __tlb_end_vma
217#endif
218
219#ifndef __tlb_remove_tlb_entry
220#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
221#endif
222
223
224
225
226
227
228
229
230#define tlb_remove_tlb_entry(tlb, ptep, address) \
231 do { \
232 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
233 __tlb_remove_tlb_entry(tlb, ptep, address); \
234 } while (0)
235
236#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
237 do { \
238 __tlb_adjust_range(tlb, address, huge_page_size(h)); \
239 __tlb_remove_tlb_entry(tlb, ptep, address); \
240 } while (0)
241
242
243
244
245
246#ifndef __tlb_remove_pmd_tlb_entry
247#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
248#endif
249
250#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
251 do { \
252 __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
253 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
254 } while (0)
255
256
257
258
259
260#ifndef __tlb_remove_pud_tlb_entry
261#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
262#endif
263
264#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
265 do { \
266 __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
267 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
268 } while (0)
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288#ifndef pte_free_tlb
289#define pte_free_tlb(tlb, ptep, address) \
290 do { \
291 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
292 __pte_free_tlb(tlb, ptep, address); \
293 } while (0)
294#endif
295
296#ifndef pmd_free_tlb
297#define pmd_free_tlb(tlb, pmdp, address) \
298 do { \
299 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
300 __pmd_free_tlb(tlb, pmdp, address); \
301 } while (0)
302#endif
303
304#ifndef __ARCH_HAS_4LEVEL_HACK
305#ifndef pud_free_tlb
306#define pud_free_tlb(tlb, pudp, address) \
307 do { \
308 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
309 __pud_free_tlb(tlb, pudp, address); \
310 } while (0)
311#endif
312#endif
313
314#ifndef __ARCH_HAS_5LEVEL_HACK
315#ifndef p4d_free_tlb
316#define p4d_free_tlb(tlb, pudp, address) \
317 do { \
318 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
319 __p4d_free_tlb(tlb, pudp, address); \
320 } while (0)
321#endif
322#endif
323
324#endif
325
326#define tlb_migrate_finish(mm) do {} while (0)
327
328#endif
329