1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/kernel.h>
25#include <linux/mm.h>
26#include <linux/percpu.h>
27#include <linux/hardirq.h>
28#include <asm/pgalloc.h>
29#include <asm/tlbflush.h>
30#include <asm/tlb.h>
31#include <asm/bug.h>
32#include <asm/pte-walk.h>
33
34
35#include <trace/events/thp.h>
36
37DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
38
39
40
41
42
43
44
45void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
46 pte_t *ptep, unsigned long pte, int huge)
47{
48 unsigned long vpn;
49 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
50 unsigned long vsid;
51 unsigned int psize;
52 int ssize;
53 real_pte_t rpte;
54 int i, offset;
55
56 i = batch->index;
57
58
59
60
61
62
63
64
65 if (huge) {
66#ifdef CONFIG_HUGETLB_PAGE
67 psize = get_slice_psize(mm, addr);
68
69 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
70 if (unlikely(psize == MMU_PAGE_16G))
71 offset = PTRS_PER_PUD;
72 else
73 offset = PTRS_PER_PMD;
74#else
75 BUG();
76 psize = pte_pagesize_index(mm, addr, pte);
77#endif
78 } else {
79 psize = pte_pagesize_index(mm, addr, pte);
80
81
82
83
84 addr &= PAGE_MASK;
85 offset = PTRS_PER_PTE;
86 }
87
88
89
90 if (!is_kernel_addr(addr)) {
91 ssize = user_segment_size(addr);
92 vsid = get_user_vsid(&mm->context, addr, ssize);
93 } else {
94 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
95 ssize = mmu_kernel_ssize;
96 }
97 WARN_ON(vsid == 0);
98 vpn = hpt_vpn(addr, vsid, ssize);
99 rpte = __real_pte(__pte(pte), ptep, offset);
100
101
102
103
104
105 if (!batch->active) {
106 flush_hash_page(vpn, rpte, psize, ssize, mm_is_thread_local(mm));
107 put_cpu_var(ppc64_tlb_batch);
108 return;
109 }
110
111
112
113
114
115
116
117
118
119
120
121 if (i != 0 && (mm != batch->mm || batch->psize != psize ||
122 batch->ssize != ssize)) {
123 __flush_tlb_pending(batch);
124 i = 0;
125 }
126 if (i == 0) {
127 batch->mm = mm;
128 batch->psize = psize;
129 batch->ssize = ssize;
130 }
131 batch->pte[i] = rpte;
132 batch->vpn[i] = vpn;
133 batch->index = ++i;
134 if (i >= PPC64_TLB_BATCH_NR)
135 __flush_tlb_pending(batch);
136 put_cpu_var(ppc64_tlb_batch);
137}
138
139
140
141
142
143
144
145
146void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
147{
148 int i, local;
149
150 i = batch->index;
151 local = mm_is_thread_local(batch->mm);
152 if (i == 1)
153 flush_hash_page(batch->vpn[0], batch->pte[0],
154 batch->psize, batch->ssize, local);
155 else
156 flush_hash_range(i, local);
157 batch->index = 0;
158}
159
160void hash__tlb_flush(struct mmu_gather *tlb)
161{
162 struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
163
164
165
166
167
168 if (tlbbatch->index)
169 __flush_tlb_pending(tlbbatch);
170
171 put_cpu_var(ppc64_tlb_batch);
172}
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
193 unsigned long end)
194{
195 bool is_thp;
196 int hugepage_shift;
197 unsigned long flags;
198
199 start = _ALIGN_DOWN(start, PAGE_SIZE);
200 end = _ALIGN_UP(end, PAGE_SIZE);
201
202 BUG_ON(!mm->pgd);
203
204
205
206
207
208
209
210
211 local_irq_save(flags);
212 arch_enter_lazy_mmu_mode();
213 for (; start < end; start += PAGE_SIZE) {
214 pte_t *ptep = find_current_mm_pte(mm->pgd, start, &is_thp,
215 &hugepage_shift);
216 unsigned long pte;
217
218 if (ptep == NULL)
219 continue;
220 pte = pte_val(*ptep);
221 if (is_thp)
222 trace_hugepage_invalidate(start, pte);
223 if (!(pte & H_PAGE_HASHPTE))
224 continue;
225 if (unlikely(is_thp))
226 hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
227 else
228 hpte_need_flush(mm, start, ptep, pte, hugepage_shift);
229 }
230 arch_leave_lazy_mmu_mode();
231 local_irq_restore(flags);
232}
233
234void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
235{
236 pte_t *pte;
237 pte_t *start_pte;
238 unsigned long flags;
239
240 addr = _ALIGN_DOWN(addr, PMD_SIZE);
241
242
243
244
245
246
247
248 local_irq_save(flags);
249 arch_enter_lazy_mmu_mode();
250 start_pte = pte_offset_map(pmd, addr);
251 for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) {
252 unsigned long pteval = pte_val(*pte);
253 if (pteval & H_PAGE_HASHPTE)
254 hpte_need_flush(mm, addr, pte, pteval, 0);
255 addr += PAGE_SIZE;
256 }
257 arch_leave_lazy_mmu_mode();
258 local_irq_restore(flags);
259}
260