1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/init.h>
28#include <linux/highmem.h>
29#include <linux/pagemap.h>
30
31#include <asm/tlbflush.h>
32#include <asm/tlb.h>
33
34#include "mmu_decl.h"
35
36
37
38
39void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
40{
41 unsigned long ptephys;
42
43 if (Hash != 0) {
44 ptephys = __pa(ptep) & PAGE_MASK;
45 flush_hash_pages(mm->context.id, addr, ptephys, 1);
46 }
47}
48EXPORT_SYMBOL(flush_hash_entry);
49
50
51
52
53
54void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr)
55{
56 if (Hash != 0)
57 return;
58 _tlbie(addr);
59}
60
61
62
63
64
65void tlb_flush(struct mmu_gather *tlb)
66{
67 if (Hash == 0) {
68
69
70
71
72 _tlbia();
73 }
74
75
76 pte_free_finish();
77}
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97#ifdef CONFIG_SMP_750
98#define FINISH_FLUSH smp_send_tlb_invalidate(0)
99#else
100#define FINISH_FLUSH do { } while (0)
101#endif
102
103static void flush_range(struct mm_struct *mm, unsigned long start,
104 unsigned long end)
105{
106 pmd_t *pmd;
107 unsigned long pmd_end;
108 int count;
109 unsigned int ctx = mm->context.id;
110
111 if (Hash == 0) {
112 _tlbia();
113 return;
114 }
115 start &= PAGE_MASK;
116 if (start >= end)
117 return;
118 end = (end - 1) | ~PAGE_MASK;
119 pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start);
120 for (;;) {
121 pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
122 if (pmd_end > end)
123 pmd_end = end;
124 if (!pmd_none(*pmd)) {
125 count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
126 flush_hash_pages(ctx, start, pmd_val(*pmd), count);
127 }
128 if (pmd_end == end)
129 break;
130 start = pmd_end + 1;
131 ++pmd;
132 }
133}
134
135
136
137
138void flush_tlb_kernel_range(unsigned long start, unsigned long end)
139{
140 flush_range(&init_mm, start, end);
141 FINISH_FLUSH;
142}
143EXPORT_SYMBOL(flush_tlb_kernel_range);
144
145
146
147
148void flush_tlb_mm(struct mm_struct *mm)
149{
150 struct vm_area_struct *mp;
151
152 if (Hash == 0) {
153 _tlbia();
154 return;
155 }
156
157
158
159
160
161
162
163 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
164 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
165 FINISH_FLUSH;
166}
167EXPORT_SYMBOL(flush_tlb_mm);
168
169void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
170{
171 struct mm_struct *mm;
172 pmd_t *pmd;
173
174 if (Hash == 0) {
175 _tlbie(vmaddr);
176 return;
177 }
178 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
179 pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr);
180 if (!pmd_none(*pmd))
181 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
182 FINISH_FLUSH;
183}
184EXPORT_SYMBOL(flush_tlb_page);
185
186
187
188
189
190
191void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
192 unsigned long end)
193{
194 flush_range(vma->vm_mm, start, end);
195 FINISH_FLUSH;
196}
197EXPORT_SYMBOL(flush_tlb_range);
198