1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef _ASM_GENERIC__TLB_H
16#define _ASM_GENERIC__TLB_H
17
18#include <linux/swap.h>
19#include <asm/pgalloc.h>
20#include <asm/tlbflush.h>
21
22#ifdef CONFIG_HAVE_RCU_TABLE_FREE
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51struct mmu_table_batch {
52 struct rcu_head rcu;
53 unsigned int nr;
54 void *tables[0];
55};
56
57#define MAX_TABLE_BATCH \
58 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
59
60extern void tlb_table_flush(struct mmu_gather *tlb);
61extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
62
63#endif
64
65
66
67
68
69#define MMU_GATHER_BUNDLE 8
70
71struct mmu_gather_batch {
72 struct mmu_gather_batch *next;
73 unsigned int nr;
74 unsigned int max;
75 struct page *pages[0];
76};
77
78#define MAX_GATHER_BATCH \
79 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
80
81
82
83
84
85
86
87#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
88
89
90
91
92struct mmu_gather {
93 struct mm_struct *mm;
94#ifdef CONFIG_HAVE_RCU_TABLE_FREE
95 struct mmu_table_batch *batch;
96#endif
97 unsigned long start;
98 unsigned long end;
99
100
101 unsigned int fullmm : 1,
102
103
104 need_flush_all : 1;
105
106 struct mmu_gather_batch *active;
107 struct mmu_gather_batch local;
108 struct page *__pages[MMU_GATHER_BUNDLE];
109 unsigned int batch_count;
110 int page_size;
111};
112
113#define HAVE_GENERIC_MMU_GATHER
114
115void arch_tlb_gather_mmu(struct mmu_gather *tlb,
116 struct mm_struct *mm, unsigned long start, unsigned long end);
117void tlb_flush_mmu(struct mmu_gather *tlb);
118void arch_tlb_finish_mmu(struct mmu_gather *tlb,
119 unsigned long start, unsigned long end, bool force);
120extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
121 int page_size);
122
123static inline void __tlb_adjust_range(struct mmu_gather *tlb,
124 unsigned long address,
125 unsigned int range_size)
126{
127 tlb->start = min(tlb->start, address);
128 tlb->end = max(tlb->end, address + range_size);
129}
130
131static inline void __tlb_reset_range(struct mmu_gather *tlb)
132{
133 if (tlb->fullmm) {
134 tlb->start = tlb->end = ~0;
135 } else {
136 tlb->start = TASK_SIZE;
137 tlb->end = 0;
138 }
139}
140
141static inline void tlb_remove_page_size(struct mmu_gather *tlb,
142 struct page *page, int page_size)
143{
144 if (__tlb_remove_page_size(tlb, page, page_size))
145 tlb_flush_mmu(tlb);
146}
147
148static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
149{
150 return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
151}
152
153
154
155
156
157static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
158{
159 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
160}
161
162#ifndef tlb_remove_check_page_size_change
163#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
164static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
165 unsigned int page_size)
166{
167
168
169
170
171
172#ifdef CONFIG_DEBUG_VM
173 tlb->page_size = page_size;
174#endif
175}
176#endif
177
178
179
180
181
182
183#ifndef tlb_start_vma
184#define tlb_start_vma(tlb, vma) do { } while (0)
185#endif
186
187#define __tlb_end_vma(tlb, vma) \
188 do { \
189 if (!tlb->fullmm && tlb->end) { \
190 tlb_flush(tlb); \
191 __tlb_reset_range(tlb); \
192 } \
193 } while (0)
194
195#ifndef tlb_end_vma
196#define tlb_end_vma __tlb_end_vma
197#endif
198
199#ifndef __tlb_remove_tlb_entry
200#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
201#endif
202
203
204
205
206
207
208
209
210#define tlb_remove_tlb_entry(tlb, ptep, address) \
211 do { \
212 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
213 __tlb_remove_tlb_entry(tlb, ptep, address); \
214 } while (0)
215
216#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
217 do { \
218 __tlb_adjust_range(tlb, address, huge_page_size(h)); \
219 __tlb_remove_tlb_entry(tlb, ptep, address); \
220 } while (0)
221
222
223
224
225
226#ifndef __tlb_remove_pmd_tlb_entry
227#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
228#endif
229
230#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
231 do { \
232 __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
233 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
234 } while (0)
235
236
237
238
239
240#ifndef __tlb_remove_pud_tlb_entry
241#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
242#endif
243
244#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
245 do { \
246 __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
247 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
248 } while (0)
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268#define pte_free_tlb(tlb, ptep, address) \
269 do { \
270 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
271 __pte_free_tlb(tlb, ptep, address); \
272 } while (0)
273
274#define pmd_free_tlb(tlb, pmdp, address) \
275 do { \
276 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
277 __pmd_free_tlb(tlb, pmdp, address); \
278 } while (0)
279
280#ifndef __ARCH_HAS_4LEVEL_HACK
281#define pud_free_tlb(tlb, pudp, address) \
282 do { \
283 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
284 __pud_free_tlb(tlb, pudp, address); \
285 } while (0)
286#endif
287
288#ifndef __ARCH_HAS_5LEVEL_HACK
289#define p4d_free_tlb(tlb, pudp, address) \
290 do { \
291 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
292 __p4d_free_tlb(tlb, pudp, address); \
293 } while (0)
294#endif
295
296#define tlb_migrate_finish(mm) do {} while (0)
297
298#endif
299