1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/kernel.h>
31#include <linux/mm.h>
32#include <linux/init.h>
33#include <linux/highmem.h>
34#include <linux/pagemap.h>
35#include <linux/preempt.h>
36#include <linux/spinlock.h>
37#include <linux/lmb.h>
38
39#include <asm/tlbflush.h>
40#include <asm/tlb.h>
41#include <asm/code-patching.h>
42
43#include "mmu_decl.h"
44
45#ifdef CONFIG_PPC_BOOK3E
46struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
47 [MMU_PAGE_4K] = {
48 .shift = 12,
49 .enc = BOOK3E_PAGESZ_4K,
50 },
51 [MMU_PAGE_16K] = {
52 .shift = 14,
53 .enc = BOOK3E_PAGESZ_16K,
54 },
55 [MMU_PAGE_64K] = {
56 .shift = 16,
57 .enc = BOOK3E_PAGESZ_64K,
58 },
59 [MMU_PAGE_1M] = {
60 .shift = 20,
61 .enc = BOOK3E_PAGESZ_1M,
62 },
63 [MMU_PAGE_16M] = {
64 .shift = 24,
65 .enc = BOOK3E_PAGESZ_16M,
66 },
67 [MMU_PAGE_256M] = {
68 .shift = 28,
69 .enc = BOOK3E_PAGESZ_256M,
70 },
71 [MMU_PAGE_1G] = {
72 .shift = 30,
73 .enc = BOOK3E_PAGESZ_1GB,
74 },
75};
76static inline int mmu_get_tsize(int psize)
77{
78 return mmu_psize_defs[psize].enc;
79}
80#else
81static inline int mmu_get_tsize(int psize)
82{
83
84 return 0;
85}
86#endif
87
88
89
90
91
92#ifdef CONFIG_PPC64
93
94int mmu_linear_psize;
95int mmu_pte_psize;
96int mmu_vmemmap_psize;
97int book3e_htw_enabled;
98unsigned long linear_map_top;
99
100#endif
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117void local_flush_tlb_mm(struct mm_struct *mm)
118{
119 unsigned int pid;
120
121 preempt_disable();
122 pid = mm->context.id;
123 if (pid != MMU_NO_CONTEXT)
124 _tlbil_pid(pid);
125 preempt_enable();
126}
127EXPORT_SYMBOL(local_flush_tlb_mm);
128
129void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
130 int tsize, int ind)
131{
132 unsigned int pid;
133
134 preempt_disable();
135 pid = mm ? mm->context.id : 0;
136 if (pid != MMU_NO_CONTEXT)
137 _tlbil_va(vmaddr, pid, tsize, ind);
138 preempt_enable();
139}
140
141void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
142{
143 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
144 mmu_get_tsize(mmu_virtual_psize), 0);
145}
146EXPORT_SYMBOL(local_flush_tlb_page);
147
148
149
150
151#ifdef CONFIG_SMP
152
153static DEFINE_SPINLOCK(tlbivax_lock);
154
155static int mm_is_core_local(struct mm_struct *mm)
156{
157 return cpumask_subset(mm_cpumask(mm),
158 topology_thread_cpumask(smp_processor_id()));
159}
160
161struct tlb_flush_param {
162 unsigned long addr;
163 unsigned int pid;
164 unsigned int tsize;
165 unsigned int ind;
166};
167
168static void do_flush_tlb_mm_ipi(void *param)
169{
170 struct tlb_flush_param *p = param;
171
172 _tlbil_pid(p ? p->pid : 0);
173}
174
175static void do_flush_tlb_page_ipi(void *param)
176{
177 struct tlb_flush_param *p = param;
178
179 _tlbil_va(p->addr, p->pid, p->tsize, p->ind);
180}
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199void flush_tlb_mm(struct mm_struct *mm)
200{
201 unsigned int pid;
202
203 preempt_disable();
204 pid = mm->context.id;
205 if (unlikely(pid == MMU_NO_CONTEXT))
206 goto no_context;
207 if (!mm_is_core_local(mm)) {
208 struct tlb_flush_param p = { .pid = pid };
209
210 smp_call_function_many(mm_cpumask(mm),
211 do_flush_tlb_mm_ipi, &p, 1);
212 }
213 _tlbil_pid(pid);
214 no_context:
215 preempt_enable();
216}
217EXPORT_SYMBOL(flush_tlb_mm);
218
219void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
220 int tsize, int ind)
221{
222 struct cpumask *cpu_mask;
223 unsigned int pid;
224
225 preempt_disable();
226 pid = mm ? mm->context.id : 0;
227 if (unlikely(pid == MMU_NO_CONTEXT))
228 goto bail;
229 cpu_mask = mm_cpumask(mm);
230 if (!mm_is_core_local(mm)) {
231
232 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
233 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
234 if (lock)
235 spin_lock(&tlbivax_lock);
236 _tlbivax_bcast(vmaddr, pid, tsize, ind);
237 if (lock)
238 spin_unlock(&tlbivax_lock);
239 goto bail;
240 } else {
241 struct tlb_flush_param p = {
242 .pid = pid,
243 .addr = vmaddr,
244 .tsize = tsize,
245 .ind = ind,
246 };
247
248 smp_call_function_many(cpu_mask,
249 do_flush_tlb_page_ipi, &p, 1);
250 }
251 }
252 _tlbil_va(vmaddr, pid, tsize, ind);
253 bail:
254 preempt_enable();
255}
256
257void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
258{
259 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
260 mmu_get_tsize(mmu_virtual_psize), 0);
261}
262EXPORT_SYMBOL(flush_tlb_page);
263
264#endif
265
266
267
268
269void flush_tlb_kernel_range(unsigned long start, unsigned long end)
270{
271#ifdef CONFIG_SMP
272 preempt_disable();
273 smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
274 _tlbil_pid(0);
275 preempt_enable();
276#else
277 _tlbil_pid(0);
278#endif
279}
280EXPORT_SYMBOL(flush_tlb_kernel_range);
281
282
283
284
285
286
287
288void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
289 unsigned long end)
290
291{
292 flush_tlb_mm(vma->vm_mm);
293}
294EXPORT_SYMBOL(flush_tlb_range);
295
296void tlb_flush(struct mmu_gather *tlb)
297{
298 flush_tlb_mm(tlb->mm);
299
300
301 pte_free_finish();
302}
303
304
305
306
307
308
309#ifdef CONFIG_PPC64
310
311
312
313
314
315void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
316{
317 int tsize = mmu_psize_defs[mmu_pte_psize].enc;
318
319 if (book3e_htw_enabled) {
320 unsigned long start = address & PMD_MASK;
321 unsigned long end = address + PMD_SIZE;
322 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
323
324
325
326
327
328 while (start < end) {
329 __flush_tlb_page(tlb->mm, start, tsize, 1);
330 start += size;
331 }
332 } else {
333 unsigned long rmask = 0xf000000000000000ul;
334 unsigned long rid = (address & rmask) | 0x1000000000000000ul;
335 unsigned long vpte = address & ~rmask;
336
337#ifdef CONFIG_PPC_64K_PAGES
338 vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
339#else
340 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
341#endif
342 vpte |= rid;
343 __flush_tlb_page(tlb->mm, vpte, tsize, 0);
344 }
345}
346
347
348
349
350static void __early_init_mmu(int boot_cpu)
351{
352 extern unsigned int interrupt_base_book3e;
353 extern unsigned int exc_data_tlb_miss_htw_book3e;
354 extern unsigned int exc_instruction_tlb_miss_htw_book3e;
355
356 unsigned int *ibase = &interrupt_base_book3e;
357 unsigned int mas4;
358
359
360
361
362
363
364
365 mmu_linear_psize = MMU_PAGE_1G;
366
367
368
369
370
371 mmu_vmemmap_psize = MMU_PAGE_16M;
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388 if (boot_cpu) {
389 unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);
390
391
392 if ((tlb0cfg & TLBnCFG_IND) &&
393 (tlb0cfg & TLBnCFG_PT)) {
394 patch_branch(ibase + (0x1c0 / 4),
395 (unsigned long)&exc_data_tlb_miss_htw_book3e, 0);
396 patch_branch(ibase + (0x1e0 / 4),
397 (unsigned long)&exc_instruction_tlb_miss_htw_book3e, 0);
398 book3e_htw_enabled = 1;
399 }
400 pr_info("MMU: Book3E Page Tables %s\n",
401 book3e_htw_enabled ? "Enabled" : "Disabled");
402 }
403
404
405
406 mas4 = 0x4 << MAS4_WIMGED_SHIFT;
407 if (book3e_htw_enabled) {
408 mas4 |= mas4 | MAS4_INDD;
409#ifdef CONFIG_PPC_64K_PAGES
410 mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
411 mmu_pte_psize = MMU_PAGE_256M;
412#else
413 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
414 mmu_pte_psize = MMU_PAGE_1M;
415#endif
416 } else {
417#ifdef CONFIG_PPC_64K_PAGES
418 mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
419#else
420 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
421#endif
422 mmu_pte_psize = mmu_virtual_psize;
423 }
424 mtspr(SPRN_MAS4, mas4);
425
426
427
428
429 linear_map_top = lmb_end_of_DRAM();
430
431
432
433
434 mb();
435}
436
437void __init early_init_mmu(void)
438{
439 __early_init_mmu(1);
440}
441
442void __cpuinit early_init_mmu_secondary(void)
443{
444 __early_init_mmu(0);
445}
446
447#endif
448