1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef _ASM_GENERIC__TLB_H
16#define _ASM_GENERIC__TLB_H
17
18#include <linux/swap.h>
19#include <asm/pgalloc.h>
20#include <asm/tlbflush.h>
21
22#ifdef CONFIG_HAVE_RCU_TABLE_FREE
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51struct mmu_table_batch {
52 struct rcu_head rcu;
53 unsigned int nr;
54 void *tables[0];
55};
56
57#define MAX_TABLE_BATCH \
58 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
59
60extern void tlb_table_flush(struct mmu_gather *tlb);
61extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
62
63#endif
64
65
66
67
68
69#define MMU_GATHER_BUNDLE 8
70
71struct mmu_gather_batch {
72 struct mmu_gather_batch *next;
73 unsigned int nr;
74 unsigned int max;
75 struct page *pages[0];
76};
77
78#define MAX_GATHER_BATCH \
79 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
80
81
82
83
84
85
86
87#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
88
89
90
91
92struct mmu_gather {
93 struct mm_struct *mm;
94#ifdef CONFIG_HAVE_RCU_TABLE_FREE
95 struct mmu_table_batch *batch;
96#endif
97 unsigned long start;
98 unsigned long end;
99
100
101 unsigned int fullmm : 1,
102
103
104 need_flush_all : 1;
105
106 struct mmu_gather_batch *active;
107 struct mmu_gather_batch local;
108 struct page *__pages[MMU_GATHER_BUNDLE];
109 unsigned int batch_count;
110};
111
112#define HAVE_GENERIC_MMU_GATHER
113
114void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
115void tlb_flush_mmu(struct mmu_gather *tlb);
116void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
117 unsigned long end);
118int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
119
120
121
122
123
124static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
125{
126 if (!__tlb_remove_page(tlb, page))
127 tlb_flush_mmu(tlb);
128}
129
130static inline void __tlb_adjust_range(struct mmu_gather *tlb,
131 unsigned long address)
132{
133 tlb->start = min(tlb->start, address);
134 tlb->end = max(tlb->end, address + PAGE_SIZE);
135}
136
137static inline void __tlb_reset_range(struct mmu_gather *tlb)
138{
139 if (tlb->fullmm) {
140 tlb->start = tlb->end = ~0;
141 } else {
142 tlb->start = TASK_SIZE;
143 tlb->end = 0;
144 }
145}
146
147
148
149
150
151
152#ifndef tlb_start_vma
153#define tlb_start_vma(tlb, vma) do { } while (0)
154#endif
155
156#define __tlb_end_vma(tlb, vma) \
157 do { \
158 if (!tlb->fullmm && tlb->end) { \
159 tlb_flush(tlb); \
160 __tlb_reset_range(tlb); \
161 } \
162 } while (0)
163
164#ifndef tlb_end_vma
165#define tlb_end_vma __tlb_end_vma
166#endif
167
168#ifndef __tlb_remove_tlb_entry
169#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
170#endif
171
172
173
174
175
176
177
178
179#define tlb_remove_tlb_entry(tlb, ptep, address) \
180 do { \
181 __tlb_adjust_range(tlb, address); \
182 __tlb_remove_tlb_entry(tlb, ptep, address); \
183 } while (0)
184
185
186
187
188
189#ifndef __tlb_remove_pmd_tlb_entry
190#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
191#endif
192
193#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
194 do { \
195 __tlb_adjust_range(tlb, address); \
196 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
197 } while (0)
198
199#define pte_free_tlb(tlb, ptep, address) \
200 do { \
201 __tlb_adjust_range(tlb, address); \
202 __pte_free_tlb(tlb, ptep, address); \
203 } while (0)
204
205#ifndef __ARCH_HAS_4LEVEL_HACK
206#define pud_free_tlb(tlb, pudp, address) \
207 do { \
208 __tlb_adjust_range(tlb, address); \
209 __pud_free_tlb(tlb, pudp, address); \
210 } while (0)
211#endif
212
213#define pmd_free_tlb(tlb, pmdp, address) \
214 do { \
215 __tlb_adjust_range(tlb, address); \
216 __pmd_free_tlb(tlb, pmdp, address); \
217 } while (0)
218
219#define tlb_migrate_finish(mm) do {} while (0)
220
221#endif
222