1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/export.h>
30#include <linux/types.h>
31#include <linux/mman.h>
32#include <linux/mm.h>
33#include <linux/swap.h>
34#include <linux/stddef.h>
35#include <linux/vmalloc.h>
36#include <linux/memblock.h>
37#include <linux/slab.h>
38#include <linux/hugetlb.h>
39
40#include <asm/pgalloc.h>
41#include <asm/page.h>
42#include <asm/prom.h>
43#include <asm/io.h>
44#include <asm/mmu_context.h>
45#include <asm/pgtable.h>
46#include <asm/mmu.h>
47#include <asm/smp.h>
48#include <asm/machdep.h>
49#include <asm/tlb.h>
50#include <asm/processor.h>
51#include <asm/cputable.h>
52#include <asm/sections.h>
53#include <asm/firmware.h>
54#include <asm/dma.h>
55#include <asm/powernv.h>
56
57#include "mmu_decl.h"
58
59#ifdef CONFIG_PPC_STD_MMU_64
60#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
61#error TASK_SIZE_USER64 exceeds user VSID range
62#endif
63#endif
64
65#ifdef CONFIG_PPC_BOOK3S_64
66
67
68
69struct prtb_entry *process_tb;
70struct patb_entry *partition_tb;
71
72
73
74unsigned long __pte_index_size;
75EXPORT_SYMBOL(__pte_index_size);
76unsigned long __pmd_index_size;
77EXPORT_SYMBOL(__pmd_index_size);
78unsigned long __pud_index_size;
79EXPORT_SYMBOL(__pud_index_size);
80unsigned long __pgd_index_size;
81EXPORT_SYMBOL(__pgd_index_size);
82unsigned long __pmd_cache_index;
83EXPORT_SYMBOL(__pmd_cache_index);
84unsigned long __pte_table_size;
85EXPORT_SYMBOL(__pte_table_size);
86unsigned long __pmd_table_size;
87EXPORT_SYMBOL(__pmd_table_size);
88unsigned long __pud_table_size;
89EXPORT_SYMBOL(__pud_table_size);
90unsigned long __pgd_table_size;
91EXPORT_SYMBOL(__pgd_table_size);
92unsigned long __pmd_val_bits;
93EXPORT_SYMBOL(__pmd_val_bits);
94unsigned long __pud_val_bits;
95EXPORT_SYMBOL(__pud_val_bits);
96unsigned long __pgd_val_bits;
97EXPORT_SYMBOL(__pgd_val_bits);
98unsigned long __kernel_virt_start;
99EXPORT_SYMBOL(__kernel_virt_start);
100unsigned long __kernel_virt_size;
101EXPORT_SYMBOL(__kernel_virt_size);
102unsigned long __vmalloc_start;
103EXPORT_SYMBOL(__vmalloc_start);
104unsigned long __vmalloc_end;
105EXPORT_SYMBOL(__vmalloc_end);
106struct page *vmemmap;
107EXPORT_SYMBOL(vmemmap);
108unsigned long __pte_frag_nr;
109EXPORT_SYMBOL(__pte_frag_nr);
110unsigned long __pte_frag_size_shift;
111EXPORT_SYMBOL(__pte_frag_size_shift);
112unsigned long ioremap_bot;
113#else
114unsigned long ioremap_bot = IOREMAP_BASE;
115#endif
116
117
118
119
120
121void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
122 unsigned long flags)
123{
124 unsigned long i;
125
126
127 if ((flags & _PAGE_PRESENT) == 0)
128 flags |= pgprot_val(PAGE_KERNEL);
129
130
131 if (flags & H_PAGE_4K_PFN)
132 return NULL;
133
134 WARN_ON(pa & ~PAGE_MASK);
135 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
136 WARN_ON(size & ~PAGE_MASK);
137
138 for (i = 0; i < size; i += PAGE_SIZE)
139 if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
140 return NULL;
141
142 return (void __iomem *)ea;
143}
144
145
146
147
148
149
150
151void __iounmap_at(void *ea, unsigned long size)
152{
153 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
154 WARN_ON(size & ~PAGE_MASK);
155
156 unmap_kernel_range((unsigned long)ea, size);
157}
158
159void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
160 unsigned long flags, void *caller)
161{
162 phys_addr_t paligned;
163 void __iomem *ret;
164
165
166
167
168
169
170
171
172
173
174 paligned = addr & PAGE_MASK;
175 size = PAGE_ALIGN(addr + size) - paligned;
176
177 if ((size == 0) || (paligned == 0))
178 return NULL;
179
180 if (slab_is_available()) {
181 struct vm_struct *area;
182
183 area = __get_vm_area_caller(size, VM_IOREMAP,
184 ioremap_bot, IOREMAP_END,
185 caller);
186 if (area == NULL)
187 return NULL;
188
189 area->phys_addr = paligned;
190 ret = __ioremap_at(paligned, area->addr, size, flags);
191 if (!ret)
192 vunmap(area->addr);
193 } else {
194 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
195 if (ret)
196 ioremap_bot += size;
197 }
198
199 if (ret)
200 ret += addr & ~PAGE_MASK;
201 return ret;
202}
203
204void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
205 unsigned long flags)
206{
207 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
208}
209
210void __iomem * ioremap(phys_addr_t addr, unsigned long size)
211{
212 unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0)));
213 void *caller = __builtin_return_address(0);
214
215 if (ppc_md.ioremap)
216 return ppc_md.ioremap(addr, size, flags, caller);
217 return __ioremap_caller(addr, size, flags, caller);
218}
219
220void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
221{
222 unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0)));
223 void *caller = __builtin_return_address(0);
224
225 if (ppc_md.ioremap)
226 return ppc_md.ioremap(addr, size, flags, caller);
227 return __ioremap_caller(addr, size, flags, caller);
228}
229
230void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
231 unsigned long flags)
232{
233 void *caller = __builtin_return_address(0);
234
235
236 if (flags & _PAGE_WRITE)
237 flags |= _PAGE_DIRTY;
238
239
240 flags &= ~_PAGE_EXEC;
241
242
243
244#if defined(CONFIG_PPC_BOOK3S_64)
245 flags |= _PAGE_PRIVILEGED;
246#else
247 flags &= ~_PAGE_USER;
248#endif
249
250
251#ifdef _PAGE_BAP_SR
252
253
254
255
256 flags |= _PAGE_BAP_SR;
257#endif
258
259 if (ppc_md.ioremap)
260 return ppc_md.ioremap(addr, size, flags, caller);
261 return __ioremap_caller(addr, size, flags, caller);
262}
263
264
265
266
267
268
269void __iounmap(volatile void __iomem *token)
270{
271 void *addr;
272
273 if (!slab_is_available())
274 return;
275
276 addr = (void *) ((unsigned long __force)
277 PCI_FIX_ADDR(token) & PAGE_MASK);
278 if ((unsigned long)addr < ioremap_bot) {
279 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
280 " at 0x%p\n", addr);
281 return;
282 }
283 vunmap(addr);
284}
285
286void iounmap(volatile void __iomem *token)
287{
288 if (ppc_md.iounmap)
289 ppc_md.iounmap(token);
290 else
291 __iounmap(token);
292}
293
294EXPORT_SYMBOL(ioremap);
295EXPORT_SYMBOL(ioremap_wc);
296EXPORT_SYMBOL(ioremap_prot);
297EXPORT_SYMBOL(__ioremap);
298EXPORT_SYMBOL(__ioremap_at);
299EXPORT_SYMBOL(iounmap);
300EXPORT_SYMBOL(__iounmap);
301EXPORT_SYMBOL(__iounmap_at);
302
303#ifndef __PAGETABLE_PUD_FOLDED
304
305struct page *pgd_page(pgd_t pgd)
306{
307 if (pgd_huge(pgd))
308 return pte_page(pgd_pte(pgd));
309 return virt_to_page(pgd_page_vaddr(pgd));
310}
311#endif
312
313struct page *pud_page(pud_t pud)
314{
315 if (pud_huge(pud))
316 return pte_page(pud_pte(pud));
317 return virt_to_page(pud_page_vaddr(pud));
318}
319
320
321
322
323
324struct page *pmd_page(pmd_t pmd)
325{
326 if (pmd_trans_huge(pmd) || pmd_huge(pmd))
327 return pte_page(pmd_pte(pmd));
328 return virt_to_page(pmd_page_vaddr(pmd));
329}
330
331#ifdef CONFIG_PPC_64K_PAGES
332static pte_t *get_from_cache(struct mm_struct *mm)
333{
334 void *pte_frag, *ret;
335
336 spin_lock(&mm->page_table_lock);
337 ret = mm->context.pte_frag;
338 if (ret) {
339 pte_frag = ret + PTE_FRAG_SIZE;
340
341
342
343 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
344 pte_frag = NULL;
345 mm->context.pte_frag = pte_frag;
346 }
347 spin_unlock(&mm->page_table_lock);
348 return (pte_t *)ret;
349}
350
351static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
352{
353 void *ret = NULL;
354 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
355 if (!page)
356 return NULL;
357 if (!kernel && !pgtable_page_ctor(page)) {
358 __free_page(page);
359 return NULL;
360 }
361
362 ret = page_address(page);
363 spin_lock(&mm->page_table_lock);
364
365
366
367
368
369 if (likely(!mm->context.pte_frag)) {
370 set_page_count(page, PTE_FRAG_NR);
371 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
372 }
373 spin_unlock(&mm->page_table_lock);
374
375 return (pte_t *)ret;
376}
377
378pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
379{
380 pte_t *pte;
381
382 pte = get_from_cache(mm);
383 if (pte)
384 return pte;
385
386 return __alloc_for_cache(mm, kernel);
387}
388#endif
389
390void pte_fragment_free(unsigned long *table, int kernel)
391{
392 struct page *page = virt_to_page(table);
393 if (put_page_testzero(page)) {
394 if (!kernel)
395 pgtable_page_dtor(page);
396 free_hot_cold_page(page, 0);
397 }
398}
399
400#ifdef CONFIG_SMP
401void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
402{
403 unsigned long pgf = (unsigned long)table;
404
405 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
406 pgf |= shift;
407 tlb_remove_table(tlb, (void *)pgf);
408}
409
410void __tlb_remove_table(void *_table)
411{
412 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
413 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
414
415 if (!shift)
416
417 pte_fragment_free(table, 0);
418 else {
419 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
420 kmem_cache_free(PGT_CACHE(shift), table);
421 }
422}
423#else
424void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
425{
426 if (!shift) {
427
428 pte_fragment_free(table, 0);
429 } else {
430 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
431 kmem_cache_free(PGT_CACHE(shift), table);
432 }
433}
434#endif
435
436#ifdef CONFIG_PPC_BOOK3S_64
437void __init mmu_partition_table_init(void)
438{
439 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
440 unsigned long ptcr;
441
442 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
443 partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
444 MEMBLOCK_ALLOC_ANYWHERE));
445
446
447 memset((void *)partition_tb, 0, patb_size);
448
449
450
451
452
453 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
454 mtspr(SPRN_PTCR, ptcr);
455 powernv_set_nmmu_ptcr(ptcr);
456}
457
458void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
459 unsigned long dw1)
460{
461 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
462
463 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
464 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
465
466
467
468
469
470
471 asm volatile("ptesync" : : : "memory");
472 if (old & PATB_HR)
473 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
474 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
475 else
476 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
477 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
478 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
479}
480EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
481#endif
482