1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef _ASM_GENERIC__TLB_H
16#define _ASM_GENERIC__TLB_H
17
18#include <linux/swap.h>
19#include <asm/pgalloc.h>
20#include <asm/tlbflush.h>
21
22#ifdef CONFIG_HAVE_RCU_TABLE_FREE
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51struct mmu_table_batch {
52 struct rcu_head rcu;
53 unsigned int nr;
54 void *tables[0];
55};
56
57#define MAX_TABLE_BATCH \
58 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
59
60extern void tlb_table_flush(struct mmu_gather *tlb);
61extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
62
63#endif
64
65
66
67
68
69#define MMU_GATHER_BUNDLE 8
70
71struct mmu_gather_batch {
72 struct mmu_gather_batch *next;
73 unsigned int nr;
74 unsigned int max;
75 struct page *pages[0];
76};
77
78#define MAX_GATHER_BATCH \
79 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
80
81
82
83
84
85
86
87#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
88
89
90
91
92struct mmu_gather {
93 struct mm_struct *mm;
94#ifdef CONFIG_HAVE_RCU_TABLE_FREE
95 struct mmu_table_batch *batch;
96#endif
97 unsigned long start;
98 unsigned long end;
99
100
101 unsigned int fullmm : 1,
102
103
104 need_flush_all : 1;
105
106 struct mmu_gather_batch *active;
107 struct mmu_gather_batch local;
108 struct page *__pages[MMU_GATHER_BUNDLE];
109 unsigned int batch_count;
110
111
112
113
114 unsigned long addr;
115 int page_size;
116};
117
118#define HAVE_GENERIC_MMU_GATHER
119
120void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
121void tlb_flush_mmu(struct mmu_gather *tlb);
122void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
123 unsigned long end);
124extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
125 int page_size);
126
127static inline void __tlb_adjust_range(struct mmu_gather *tlb,
128 unsigned long address)
129{
130 tlb->start = min(tlb->start, address);
131 tlb->end = max(tlb->end, address + PAGE_SIZE);
132
133
134
135
136
137 tlb->addr = address;
138}
139
140static inline void __tlb_reset_range(struct mmu_gather *tlb)
141{
142 if (tlb->fullmm) {
143 tlb->start = tlb->end = ~0;
144 } else {
145 tlb->start = TASK_SIZE;
146 tlb->end = 0;
147 }
148}
149
150static inline void tlb_remove_page_size(struct mmu_gather *tlb,
151 struct page *page, int page_size)
152{
153 if (__tlb_remove_page_size(tlb, page, page_size)) {
154 tlb_flush_mmu(tlb);
155 tlb->page_size = page_size;
156 __tlb_adjust_range(tlb, tlb->addr);
157 __tlb_remove_page_size(tlb, page, page_size);
158 }
159}
160
161static bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
162{
163 return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
164}
165
166
167
168
169
170static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
171{
172 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
173}
174
175static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page)
176{
177
178 VM_BUG_ON_PAGE(tlb->active->nr, page);
179 tlb->page_size = PAGE_SIZE;
180 __tlb_adjust_range(tlb, tlb->addr);
181 return __tlb_remove_page(tlb, page);
182}
183
184
185
186
187
188
189#ifndef tlb_start_vma
190#define tlb_start_vma(tlb, vma) do { } while (0)
191#endif
192
193#define __tlb_end_vma(tlb, vma) \
194 do { \
195 if (!tlb->fullmm && tlb->end) { \
196 tlb_flush(tlb); \
197 __tlb_reset_range(tlb); \
198 } \
199 } while (0)
200
201#ifndef tlb_end_vma
202#define tlb_end_vma __tlb_end_vma
203#endif
204
205#ifndef __tlb_remove_tlb_entry
206#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
207#endif
208
209
210
211
212
213
214
215
216#define tlb_remove_tlb_entry(tlb, ptep, address) \
217 do { \
218 __tlb_adjust_range(tlb, address); \
219 __tlb_remove_tlb_entry(tlb, ptep, address); \
220 } while (0)
221
222
223
224
225
226#ifndef __tlb_remove_pmd_tlb_entry
227#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
228#endif
229
230#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
231 do { \
232 __tlb_adjust_range(tlb, address); \
233 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
234 } while (0)
235
236#define pte_free_tlb(tlb, ptep, address) \
237 do { \
238 __tlb_adjust_range(tlb, address); \
239 __pte_free_tlb(tlb, ptep, address); \
240 } while (0)
241
242#ifndef __ARCH_HAS_4LEVEL_HACK
243#define pud_free_tlb(tlb, pudp, address) \
244 do { \
245 __tlb_adjust_range(tlb, address); \
246 __pud_free_tlb(tlb, pudp, address); \
247 } while (0)
248#endif
249
250#define pmd_free_tlb(tlb, pmdp, address) \
251 do { \
252 __tlb_adjust_range(tlb, address); \
253 __pmd_free_tlb(tlb, pmdp, address); \
254 } while (0)
255
256#define tlb_migrate_finish(mm) do {} while (0)
257
258#endif
259