1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/percpu.h>
23#include <linux/hardirq.h>
24#include <asm/tlbflush.h>
25#include <asm/tlb.h>
26#include <asm/bug.h>
27#include <asm/pte-walk.h>
28
29
30#include <trace/events/thp.h>
31
32DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
33
34
35
36
37
38
39
40void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
41 pte_t *ptep, unsigned long pte, int huge)
42{
43 unsigned long vpn;
44 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
45 unsigned long vsid;
46 unsigned int psize;
47 int ssize;
48 real_pte_t rpte;
49 int i, offset;
50
51 i = batch->index;
52
53
54
55
56
57
58
59
60
61 if (huge) {
62#ifdef CONFIG_HUGETLB_PAGE
63 psize = get_slice_psize(mm, addr);
64
65 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
66 if (unlikely(psize == MMU_PAGE_16G))
67 offset = PTRS_PER_PUD;
68 else
69 offset = PTRS_PER_PMD;
70#else
71 BUG();
72 psize = pte_pagesize_index(mm, addr, pte);
73#endif
74 } else {
75 psize = pte_pagesize_index(mm, addr, pte);
76
77
78
79
80
81
82 addr &= PAGE_MASK;
83 offset = PTRS_PER_PTE;
84 }
85
86
87
88 if (!is_kernel_addr(addr)) {
89 ssize = user_segment_size(addr);
90 vsid = get_user_vsid(&mm->context, addr, ssize);
91 } else {
92 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
93 ssize = mmu_kernel_ssize;
94 }
95 WARN_ON(vsid == 0);
96 vpn = hpt_vpn(addr, vsid, ssize);
97 rpte = __real_pte(__pte(pte), ptep, offset);
98
99
100
101
102
103 if (!batch->active) {
104 flush_hash_page(vpn, rpte, psize, ssize, mm_is_thread_local(mm));
105 put_cpu_var(ppc64_tlb_batch);
106 return;
107 }
108
109
110
111
112
113
114
115
116
117
118
119 if (i != 0 && (mm != batch->mm || batch->psize != psize ||
120 batch->ssize != ssize)) {
121 __flush_tlb_pending(batch);
122 i = 0;
123 }
124 if (i == 0) {
125 batch->mm = mm;
126 batch->psize = psize;
127 batch->ssize = ssize;
128 }
129 batch->pte[i] = rpte;
130 batch->vpn[i] = vpn;
131 batch->index = ++i;
132 if (i >= PPC64_TLB_BATCH_NR)
133 __flush_tlb_pending(batch);
134 put_cpu_var(ppc64_tlb_batch);
135}
136
137
138
139
140
141
142
143
144void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
145{
146 int i, local;
147
148 i = batch->index;
149 local = mm_is_thread_local(batch->mm);
150 if (i == 1)
151 flush_hash_page(batch->vpn[0], batch->pte[0],
152 batch->psize, batch->ssize, local);
153 else
154 flush_hash_range(i, local);
155 batch->index = 0;
156}
157
158void hash__tlb_flush(struct mmu_gather *tlb)
159{
160 struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
161
162
163
164
165
166
167 if (tlbbatch->index)
168 __flush_tlb_pending(tlbbatch);
169
170 put_cpu_var(ppc64_tlb_batch);
171}
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190void __flush_hash_table_range(unsigned long start, unsigned long end)
191{
192 int hugepage_shift;
193 unsigned long flags;
194
195 start = ALIGN_DOWN(start, PAGE_SIZE);
196 end = ALIGN(end, PAGE_SIZE);
197
198
199
200
201
202
203
204
205
206
207 local_irq_save(flags);
208 arch_enter_lazy_mmu_mode();
209 for (; start < end; start += PAGE_SIZE) {
210 pte_t *ptep = find_init_mm_pte(start, &hugepage_shift);
211 unsigned long pte;
212
213 if (ptep == NULL)
214 continue;
215 pte = pte_val(*ptep);
216 if (!(pte & H_PAGE_HASHPTE))
217 continue;
218 hpte_need_flush(&init_mm, start, ptep, pte, hugepage_shift);
219 }
220 arch_leave_lazy_mmu_mode();
221 local_irq_restore(flags);
222}
223
224void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
225{
226 pte_t *pte;
227 pte_t *start_pte;
228 unsigned long flags;
229
230 addr = ALIGN_DOWN(addr, PMD_SIZE);
231
232
233
234
235
236
237
238
239 local_irq_save(flags);
240 arch_enter_lazy_mmu_mode();
241 start_pte = pte_offset_map(pmd, addr);
242 for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) {
243 unsigned long pteval = pte_val(*pte);
244 if (pteval & H_PAGE_HASHPTE)
245 hpte_need_flush(mm, addr, pte, pteval, 0);
246 addr += PAGE_SIZE;
247 }
248 arch_leave_lazy_mmu_mode();
249 local_irq_restore(flags);
250}
251