1#ifndef _ASM_IA64_TLB_H
2#define _ASM_IA64_TLB_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#include <linux/mm.h>
41#include <linux/pagemap.h>
42#include <linux/swap.h>
43
44#include <asm/pgalloc.h>
45#include <asm/processor.h>
46#include <asm/tlbflush.h>
47#include <asm/machvec.h>
48
49#ifdef CONFIG_SMP
50# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
51#else
52# define tlb_fast_mode(tlb) (1)
53#endif
54
55
56
57
58
59#define IA64_GATHER_BUNDLE 8
60
61struct mmu_gather {
62 struct mm_struct *mm;
63 unsigned int nr;
64 unsigned int max;
65 unsigned char fullmm;
66 unsigned char need_flush;
67 unsigned long start_addr;
68 unsigned long end_addr;
69 struct page **pages;
70 struct page *local[IA64_GATHER_BUNDLE];
71};
72
73struct ia64_tr_entry {
74 u64 ifa;
75 u64 itir;
76 u64 pte;
77 u64 rr;
78};
79
80extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
81extern void ia64_ptr_entry(u64 target_mask, int slot);
82
83extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
84
85
86
87
88#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
89#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
90#define RR_VE_MASK 0x0000000000000001L
91#define RR_VE_SHIFT 0
92#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
93#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
94#define RR_PS_MASK 0x00000000000000fcL
95#define RR_PS_SHIFT 2
96#define RR_RID_MASK 0x00000000ffffff00L
97#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
98
99
100
101
102
103static inline void
104ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
105{
106 unsigned int nr;
107
108 if (!tlb->need_flush)
109 return;
110 tlb->need_flush = 0;
111
112 if (tlb->fullmm) {
113
114
115
116
117
118 flush_tlb_mm(tlb->mm);
119 } else if (unlikely (end - start >= 1024*1024*1024*1024UL
120 || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
121 {
122
123
124
125
126
127 flush_tlb_all();
128 } else {
129
130
131
132
133 struct vm_area_struct vma;
134
135 vma.vm_mm = tlb->mm;
136
137 flush_tlb_range(&vma, start, end);
138
139 flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
140 }
141
142
143 nr = tlb->nr;
144 if (!tlb_fast_mode(tlb)) {
145 unsigned long i;
146 tlb->nr = 0;
147 tlb->start_addr = ~0UL;
148 for (i = 0; i < nr; ++i)
149 free_page_and_swap_cache(tlb->pages[i]);
150 }
151}
152
153static inline void __tlb_alloc_page(struct mmu_gather *tlb)
154{
155 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
156
157 if (addr) {
158 tlb->pages = (void *)addr;
159 tlb->max = PAGE_SIZE / sizeof(void *);
160 }
161}
162
163
164static inline void
165tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
166{
167 tlb->mm = mm;
168 tlb->max = ARRAY_SIZE(tlb->local);
169 tlb->pages = tlb->local;
170
171
172
173
174
175
176
177
178
179
180
181
182
183 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
184 tlb->fullmm = full_mm_flush;
185 tlb->start_addr = ~0UL;
186}
187
188
189
190
191
192static inline void
193tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
194{
195
196
197
198
199 ia64_tlb_flush_mmu(tlb, start, end);
200
201
202 check_pgt_cache();
203
204 if (tlb->pages != tlb->local)
205 free_pages((unsigned long)tlb->pages, 0);
206}
207
208
209
210
211
212
213static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
214{
215 tlb->need_flush = 1;
216
217 if (tlb_fast_mode(tlb)) {
218 free_page_and_swap_cache(page);
219 return 1;
220 }
221
222 if (!tlb->nr && tlb->pages == tlb->local)
223 __tlb_alloc_page(tlb);
224
225 tlb->pages[tlb->nr++] = page;
226 VM_BUG_ON(tlb->nr > tlb->max);
227
228 return tlb->max - tlb->nr;
229}
230
231static inline void tlb_flush_mmu(struct mmu_gather *tlb)
232{
233 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
234}
235
236static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
237{
238 if (!__tlb_remove_page(tlb, page))
239 tlb_flush_mmu(tlb);
240}
241
242
243
244
245
246static inline void
247__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
248{
249 if (tlb->start_addr == ~0UL)
250 tlb->start_addr = address;
251 tlb->end_addr = address + PAGE_SIZE;
252}
253
254#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
255
256#define tlb_start_vma(tlb, vma) do { } while (0)
257#define tlb_end_vma(tlb, vma) do { } while (0)
258
259#define tlb_remove_tlb_entry(tlb, ptep, addr) \
260do { \
261 tlb->need_flush = 1; \
262 __tlb_remove_tlb_entry(tlb, ptep, addr); \
263} while (0)
264
265#define pte_free_tlb(tlb, ptep, address) \
266do { \
267 tlb->need_flush = 1; \
268 __pte_free_tlb(tlb, ptep, address); \
269} while (0)
270
271#define pmd_free_tlb(tlb, ptep, address) \
272do { \
273 tlb->need_flush = 1; \
274 __pmd_free_tlb(tlb, ptep, address); \
275} while (0)
276
277#define pud_free_tlb(tlb, pudp, address) \
278do { \
279 tlb->need_flush = 1; \
280 __pud_free_tlb(tlb, pudp, address); \
281} while (0)
282
283#endif
284