1
2
3
4
5
6
7
8#include <linux/module.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/bootmem.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
16#include <linux/initrd.h>
17#include <linux/swap.h>
18#include <linux/pagemap.h>
19#include <linux/poison.h>
20#include <linux/fs.h>
21#include <linux/seq_file.h>
22#include <linux/kprobes.h>
23#include <linux/cache.h>
24#include <linux/sort.h>
25#include <linux/percpu.h>
26#include <linux/memblock.h>
27#include <linux/mmzone.h>
28#include <linux/gfp.h>
29
30#include <asm/head.h>
31#include <asm/page.h>
32#include <asm/pgalloc.h>
33#include <asm/pgtable.h>
34#include <asm/oplib.h>
35#include <asm/iommu.h>
36#include <asm/io.h>
37#include <asm/uaccess.h>
38#include <asm/mmu_context.h>
39#include <asm/tlbflush.h>
40#include <asm/dma.h>
41#include <asm/starfire.h>
42#include <asm/tlb.h>
43#include <asm/spitfire.h>
44#include <asm/sections.h>
45#include <asm/tsb.h>
46#include <asm/hypervisor.h>
47#include <asm/prom.h>
48#include <asm/mdesc.h>
49#include <asm/cpudata.h>
50#include <asm/irq.h>
51
52#include "init_64.h"
53
54unsigned long kern_linear_pte_xor[4] __read_mostly;
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
77
78#ifndef CONFIG_DEBUG_PAGEALLOC
79
80
81
82
83extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
84#endif
85
86static unsigned long cpu_pgsz_mask;
87
88#define MAX_BANKS 32
89
90static struct linux_prom64_registers pavail[MAX_BANKS];
91static int pavail_ents;
92
93static int cmp_p64(const void *a, const void *b)
94{
95 const struct linux_prom64_registers *x = a, *y = b;
96
97 if (x->phys_addr > y->phys_addr)
98 return 1;
99 if (x->phys_addr < y->phys_addr)
100 return -1;
101 return 0;
102}
103
104static void __init read_obp_memory(const char *property,
105 struct linux_prom64_registers *regs,
106 int *num_ents)
107{
108 phandle node = prom_finddevice("/memory");
109 int prop_size = prom_getproplen(node, property);
110 int ents, ret, i;
111
112 ents = prop_size / sizeof(struct linux_prom64_registers);
113 if (ents > MAX_BANKS) {
114 prom_printf("The machine has more %s property entries than "
115 "this kernel can support (%d).\n",
116 property, MAX_BANKS);
117 prom_halt();
118 }
119
120 ret = prom_getproperty(node, property, (char *) regs, prop_size);
121 if (ret == -1) {
122 prom_printf("Couldn't get %s property from /memory.\n",
123 property);
124 prom_halt();
125 }
126
127
128
129
130 for (i = 0; i < ents; i++) {
131 unsigned long base, size;
132
133 base = regs[i].phys_addr;
134 size = regs[i].reg_size;
135
136 size &= PAGE_MASK;
137 if (base & ~PAGE_MASK) {
138 unsigned long new_base = PAGE_ALIGN(base);
139
140 size -= new_base - base;
141 if ((long) size < 0L)
142 size = 0UL;
143 base = new_base;
144 }
145 if (size == 0UL) {
146
147
148
149
150 memmove(®s[i], ®s[i + 1],
151 (ents - i - 1) * sizeof(regs[0]));
152 i--;
153 ents--;
154 continue;
155 }
156 regs[i].phys_addr = base;
157 regs[i].reg_size = size;
158 }
159
160 *num_ents = ents;
161
162 sort(regs, ents, sizeof(struct linux_prom64_registers),
163 cmp_p64, NULL);
164}
165
166unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
167 sizeof(unsigned long)];
168EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
169
170
171unsigned long kern_base __read_mostly;
172unsigned long kern_size __read_mostly;
173
174
175extern unsigned long sparc_ramdisk_image64;
176extern unsigned int sparc_ramdisk_image;
177extern unsigned int sparc_ramdisk_size;
178
179struct page *mem_map_zero __read_mostly;
180EXPORT_SYMBOL(mem_map_zero);
181
182unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
183
184unsigned long sparc64_kern_pri_context __read_mostly;
185unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
186unsigned long sparc64_kern_sec_context __read_mostly;
187
188int num_kernel_image_mappings;
189
190#ifdef CONFIG_DEBUG_DCFLUSH
191atomic_t dcpage_flushes = ATOMIC_INIT(0);
192#ifdef CONFIG_SMP
193atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
194#endif
195#endif
196
197inline void flush_dcache_page_impl(struct page *page)
198{
199 BUG_ON(tlb_type == hypervisor);
200#ifdef CONFIG_DEBUG_DCFLUSH
201 atomic_inc(&dcpage_flushes);
202#endif
203
204#ifdef DCACHE_ALIASING_POSSIBLE
205 __flush_dcache_page(page_address(page),
206 ((tlb_type == spitfire) &&
207 page_mapping(page) != NULL));
208#else
209 if (page_mapping(page) != NULL &&
210 tlb_type == spitfire)
211 __flush_icache_page(__pa(page_address(page)));
212#endif
213}
214
215#define PG_dcache_dirty PG_arch_1
216#define PG_dcache_cpu_shift 32UL
217#define PG_dcache_cpu_mask \
218 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
219
220#define dcache_dirty_cpu(page) \
221 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
222
223static inline void set_dcache_dirty(struct page *page, int this_cpu)
224{
225 unsigned long mask = this_cpu;
226 unsigned long non_cpu_bits;
227
228 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
229 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
230
231 __asm__ __volatile__("1:\n\t"
232 "ldx [%2], %%g7\n\t"
233 "and %%g7, %1, %%g1\n\t"
234 "or %%g1, %0, %%g1\n\t"
235 "casx [%2], %%g7, %%g1\n\t"
236 "cmp %%g7, %%g1\n\t"
237 "bne,pn %%xcc, 1b\n\t"
238 " nop"
239 :
240 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
241 : "g1", "g7");
242}
243
244static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
245{
246 unsigned long mask = (1UL << PG_dcache_dirty);
247
248 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
249 "1:\n\t"
250 "ldx [%2], %%g7\n\t"
251 "srlx %%g7, %4, %%g1\n\t"
252 "and %%g1, %3, %%g1\n\t"
253 "cmp %%g1, %0\n\t"
254 "bne,pn %%icc, 2f\n\t"
255 " andn %%g7, %1, %%g1\n\t"
256 "casx [%2], %%g7, %%g1\n\t"
257 "cmp %%g7, %%g1\n\t"
258 "bne,pn %%xcc, 1b\n\t"
259 " nop\n"
260 "2:"
261 :
262 : "r" (cpu), "r" (mask), "r" (&page->flags),
263 "i" (PG_dcache_cpu_mask),
264 "i" (PG_dcache_cpu_shift)
265 : "g1", "g7");
266}
267
268static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
269{
270 unsigned long tsb_addr = (unsigned long) ent;
271
272 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
273 tsb_addr = __pa(tsb_addr);
274
275 __tsb_insert(tsb_addr, tag, pte);
276}
277
278unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
279
280static void flush_dcache(unsigned long pfn)
281{
282 struct page *page;
283
284 page = pfn_to_page(pfn);
285 if (page) {
286 unsigned long pg_flags;
287
288 pg_flags = page->flags;
289 if (pg_flags & (1UL << PG_dcache_dirty)) {
290 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
291 PG_dcache_cpu_mask);
292 int this_cpu = get_cpu();
293
294
295
296
297 if (cpu == this_cpu)
298 flush_dcache_page_impl(page);
299 else
300 smp_flush_dcache_page_impl(page, cpu);
301
302 clear_dcache_dirty_cpu(page, cpu);
303
304 put_cpu();
305 }
306 }
307}
308
309
310static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
311 unsigned long tsb_hash_shift, unsigned long address,
312 unsigned long tte)
313{
314 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
315 unsigned long tag;
316
317 if (unlikely(!tsb))
318 return;
319
320 tsb += ((address >> tsb_hash_shift) &
321 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
322 tag = (address >> 22UL);
323 tsb_insert(tsb, tag, tte);
324}
325
326#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
327static inline bool is_hugetlb_pte(pte_t pte)
328{
329 if ((tlb_type == hypervisor &&
330 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
331 (tlb_type != hypervisor &&
332 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
333 return true;
334 return false;
335}
336#endif
337
338void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
339{
340 struct mm_struct *mm;
341 unsigned long flags;
342 pte_t pte = *ptep;
343
344 if (tlb_type != hypervisor) {
345 unsigned long pfn = pte_pfn(pte);
346
347 if (pfn_valid(pfn))
348 flush_dcache(pfn);
349 }
350
351 mm = vma->vm_mm;
352
353 spin_lock_irqsave(&mm->context.lock, flags);
354
355#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
356 if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
357 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
358 address, pte_val(pte));
359 else
360#endif
361 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
362 address, pte_val(pte));
363
364 spin_unlock_irqrestore(&mm->context.lock, flags);
365}
366
367void flush_dcache_page(struct page *page)
368{
369 struct address_space *mapping;
370 int this_cpu;
371
372 if (tlb_type == hypervisor)
373 return;
374
375
376
377
378
379 if (page == ZERO_PAGE(0))
380 return;
381
382 this_cpu = get_cpu();
383
384 mapping = page_mapping(page);
385 if (mapping && !mapping_mapped(mapping)) {
386 int dirty = test_bit(PG_dcache_dirty, &page->flags);
387 if (dirty) {
388 int dirty_cpu = dcache_dirty_cpu(page);
389
390 if (dirty_cpu == this_cpu)
391 goto out;
392 smp_flush_dcache_page_impl(page, dirty_cpu);
393 }
394 set_dcache_dirty(page, this_cpu);
395 } else {
396
397
398
399
400
401 flush_dcache_page_impl(page);
402 }
403
404out:
405 put_cpu();
406}
407EXPORT_SYMBOL(flush_dcache_page);
408
409void __kprobes flush_icache_range(unsigned long start, unsigned long end)
410{
411
412 if (tlb_type == spitfire) {
413 unsigned long kaddr;
414
415
416
417
418 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
419 unsigned long paddr, mask = _PAGE_PADDR_4U;
420
421 if (kaddr >= PAGE_OFFSET)
422 paddr = kaddr & mask;
423 else {
424 pgd_t *pgdp = pgd_offset_k(kaddr);
425 pud_t *pudp = pud_offset(pgdp, kaddr);
426 pmd_t *pmdp = pmd_offset(pudp, kaddr);
427 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
428
429 paddr = pte_val(*ptep) & mask;
430 }
431 __flush_icache_page(paddr);
432 }
433 }
434}
435EXPORT_SYMBOL(flush_icache_range);
436
437void mmu_info(struct seq_file *m)
438{
439 static const char *pgsz_strings[] = {
440 "8K", "64K", "512K", "4MB", "32MB",
441 "256MB", "2GB", "16GB",
442 };
443 int i, printed;
444
445 if (tlb_type == cheetah)
446 seq_printf(m, "MMU Type\t: Cheetah\n");
447 else if (tlb_type == cheetah_plus)
448 seq_printf(m, "MMU Type\t: Cheetah+\n");
449 else if (tlb_type == spitfire)
450 seq_printf(m, "MMU Type\t: Spitfire\n");
451 else if (tlb_type == hypervisor)
452 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
453 else
454 seq_printf(m, "MMU Type\t: ???\n");
455
456 seq_printf(m, "MMU PGSZs\t: ");
457 printed = 0;
458 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
459 if (cpu_pgsz_mask & (1UL << i)) {
460 seq_printf(m, "%s%s",
461 printed ? "," : "", pgsz_strings[i]);
462 printed++;
463 }
464 }
465 seq_putc(m, '\n');
466
467#ifdef CONFIG_DEBUG_DCFLUSH
468 seq_printf(m, "DCPageFlushes\t: %d\n",
469 atomic_read(&dcpage_flushes));
470#ifdef CONFIG_SMP
471 seq_printf(m, "DCPageFlushesXC\t: %d\n",
472 atomic_read(&dcpage_flushes_xcall));
473#endif
474#endif
475}
476
477struct linux_prom_translation prom_trans[512] __read_mostly;
478unsigned int prom_trans_ents __read_mostly;
479
480unsigned long kern_locked_tte_data;
481
482
483
484
485
486static inline int in_obp_range(unsigned long vaddr)
487{
488 return (vaddr >= LOW_OBP_ADDRESS &&
489 vaddr < HI_OBP_ADDRESS);
490}
491
492static int cmp_ptrans(const void *a, const void *b)
493{
494 const struct linux_prom_translation *x = a, *y = b;
495
496 if (x->virt > y->virt)
497 return 1;
498 if (x->virt < y->virt)
499 return -1;
500 return 0;
501}
502
503
504static void __init read_obp_translations(void)
505{
506 int n, node, ents, first, last, i;
507
508 node = prom_finddevice("/virtual-memory");
509 n = prom_getproplen(node, "translations");
510 if (unlikely(n == 0 || n == -1)) {
511 prom_printf("prom_mappings: Couldn't get size.\n");
512 prom_halt();
513 }
514 if (unlikely(n > sizeof(prom_trans))) {
515 prom_printf("prom_mappings: Size %d is too big.\n", n);
516 prom_halt();
517 }
518
519 if ((n = prom_getproperty(node, "translations",
520 (char *)&prom_trans[0],
521 sizeof(prom_trans))) == -1) {
522 prom_printf("prom_mappings: Couldn't get property.\n");
523 prom_halt();
524 }
525
526 n = n / sizeof(struct linux_prom_translation);
527
528 ents = n;
529
530 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
531 cmp_ptrans, NULL);
532
533
534 for (i = 0; i < ents; i++) {
535 if (in_obp_range(prom_trans[i].virt))
536 break;
537 }
538 first = i;
539 for (; i < ents; i++) {
540 if (!in_obp_range(prom_trans[i].virt))
541 break;
542 }
543 last = i;
544
545 for (i = 0; i < (last - first); i++) {
546 struct linux_prom_translation *src = &prom_trans[i + first];
547 struct linux_prom_translation *dest = &prom_trans[i];
548
549 *dest = *src;
550 }
551 for (; i < ents; i++) {
552 struct linux_prom_translation *dest = &prom_trans[i];
553 dest->virt = dest->size = dest->data = 0x0UL;
554 }
555
556 prom_trans_ents = last - first;
557
558 if (tlb_type == spitfire) {
559
560 for (i = 0; i < prom_trans_ents; i++)
561 prom_trans[i].data &= ~0x0003fe0000000000UL;
562 }
563
564
565 for (i = 0; i < prom_trans_ents; i++)
566 prom_trans[i].data |= (tlb_type == hypervisor ?
567 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
568}
569
570static void __init hypervisor_tlb_lock(unsigned long vaddr,
571 unsigned long pte,
572 unsigned long mmu)
573{
574 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
575
576 if (ret != 0) {
577 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
578 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
579 prom_halt();
580 }
581}
582
583static unsigned long kern_large_tte(unsigned long paddr);
584
585static void __init remap_kernel(void)
586{
587 unsigned long phys_page, tte_vaddr, tte_data;
588 int i, tlb_ent = sparc64_highest_locked_tlbent();
589
590 tte_vaddr = (unsigned long) KERNBASE;
591 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
592 tte_data = kern_large_tte(phys_page);
593
594 kern_locked_tte_data = tte_data;
595
596
597 if (tlb_type == hypervisor) {
598 for (i = 0; i < num_kernel_image_mappings; i++) {
599 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
600 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
601 tte_vaddr += 0x400000;
602 tte_data += 0x400000;
603 }
604 } else {
605 for (i = 0; i < num_kernel_image_mappings; i++) {
606 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
607 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
608 tte_vaddr += 0x400000;
609 tte_data += 0x400000;
610 }
611 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
612 }
613 if (tlb_type == cheetah_plus) {
614 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
615 CTX_CHEETAH_PLUS_NUC);
616 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
617 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
618 }
619}
620
621
622static void __init inherit_prom_mappings(void)
623{
624
625 printk("Remapping the kernel... ");
626 remap_kernel();
627 printk("done.\n");
628}
629
630void prom_world(int enter)
631{
632 if (!enter)
633 set_fs(get_fs());
634
635 __asm__ __volatile__("flushw");
636}
637
638void __flush_dcache_range(unsigned long start, unsigned long end)
639{
640 unsigned long va;
641
642 if (tlb_type == spitfire) {
643 int n = 0;
644
645 for (va = start; va < end; va += 32) {
646 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
647 if (++n >= 512)
648 break;
649 }
650 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
651 start = __pa(start);
652 end = __pa(end);
653 for (va = start; va < end; va += 32)
654 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
655 "membar #Sync"
656 :
657 : "r" (va),
658 "i" (ASI_DCACHE_INVALIDATE));
659 }
660}
661EXPORT_SYMBOL(__flush_dcache_range);
662
663
664DEFINE_SPINLOCK(ctx_alloc_lock);
665unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
666#define MAX_CTX_NR (1UL << CTX_NR_BITS)
667#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
668DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
669
670
671
672
673
674
675
676
677
678
679
680void get_new_mmu_context(struct mm_struct *mm)
681{
682 unsigned long ctx, new_ctx;
683 unsigned long orig_pgsz_bits;
684 int new_version;
685
686 spin_lock(&ctx_alloc_lock);
687 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
688 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
689 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
690 new_version = 0;
691 if (new_ctx >= (1 << CTX_NR_BITS)) {
692 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
693 if (new_ctx >= ctx) {
694 int i;
695 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
696 CTX_FIRST_VERSION;
697 if (new_ctx == 1)
698 new_ctx = CTX_FIRST_VERSION;
699
700
701
702
703 mmu_context_bmap[0] = 3;
704 mmu_context_bmap[1] = 0;
705 mmu_context_bmap[2] = 0;
706 mmu_context_bmap[3] = 0;
707 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
708 mmu_context_bmap[i + 0] = 0;
709 mmu_context_bmap[i + 1] = 0;
710 mmu_context_bmap[i + 2] = 0;
711 mmu_context_bmap[i + 3] = 0;
712 }
713 new_version = 1;
714 goto out;
715 }
716 }
717 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
718 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
719out:
720 tlb_context_cache = new_ctx;
721 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
722 spin_unlock(&ctx_alloc_lock);
723
724 if (unlikely(new_version))
725 smp_new_mmu_context_version();
726}
727
728static int numa_enabled = 1;
729static int numa_debug;
730
731static int __init early_numa(char *p)
732{
733 if (!p)
734 return 0;
735
736 if (strstr(p, "off"))
737 numa_enabled = 0;
738
739 if (strstr(p, "debug"))
740 numa_debug = 1;
741
742 return 0;
743}
744early_param("numa", early_numa);
745
746#define numadbg(f, a...) \
747do { if (numa_debug) \
748 printk(KERN_INFO f, ## a); \
749} while (0)
750
751static void __init find_ramdisk(unsigned long phys_base)
752{
753#ifdef CONFIG_BLK_DEV_INITRD
754 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
755 unsigned long ramdisk_image;
756
757
758
759
760
761
762
763
764 ramdisk_image = sparc_ramdisk_image;
765 if (!ramdisk_image)
766 ramdisk_image = sparc_ramdisk_image64;
767
768
769
770
771
772
773 ramdisk_image -= KERNBASE;
774 ramdisk_image += phys_base;
775
776 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
777 ramdisk_image, sparc_ramdisk_size);
778
779 initrd_start = ramdisk_image;
780 initrd_end = ramdisk_image + sparc_ramdisk_size;
781
782 memblock_reserve(initrd_start, sparc_ramdisk_size);
783
784 initrd_start += PAGE_OFFSET;
785 initrd_end += PAGE_OFFSET;
786 }
787#endif
788}
789
790struct node_mem_mask {
791 unsigned long mask;
792 unsigned long val;
793};
794static struct node_mem_mask node_masks[MAX_NUMNODES];
795static int num_node_masks;
796
797int numa_cpu_lookup_table[NR_CPUS];
798cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
799
800#ifdef CONFIG_NEED_MULTIPLE_NODES
801
802struct mdesc_mblock {
803 u64 base;
804 u64 size;
805 u64 offset;
806};
807static struct mdesc_mblock *mblocks;
808static int num_mblocks;
809
810static unsigned long ra_to_pa(unsigned long addr)
811{
812 int i;
813
814 for (i = 0; i < num_mblocks; i++) {
815 struct mdesc_mblock *m = &mblocks[i];
816
817 if (addr >= m->base &&
818 addr < (m->base + m->size)) {
819 addr += m->offset;
820 break;
821 }
822 }
823 return addr;
824}
825
826static int find_node(unsigned long addr)
827{
828 int i;
829
830 addr = ra_to_pa(addr);
831 for (i = 0; i < num_node_masks; i++) {
832 struct node_mem_mask *p = &node_masks[i];
833
834 if ((addr & p->mask) == p->val)
835 return i;
836 }
837 return -1;
838}
839
840static u64 memblock_nid_range(u64 start, u64 end, int *nid)
841{
842 *nid = find_node(start);
843 start += PAGE_SIZE;
844 while (start < end) {
845 int n = find_node(start);
846
847 if (n != *nid)
848 break;
849 start += PAGE_SIZE;
850 }
851
852 if (start > end)
853 start = end;
854
855 return start;
856}
857#endif
858
859
860
861
862
863static void __init allocate_node_data(int nid)
864{
865 struct pglist_data *p;
866 unsigned long start_pfn, end_pfn;
867#ifdef CONFIG_NEED_MULTIPLE_NODES
868 unsigned long paddr;
869
870 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
871 if (!paddr) {
872 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
873 prom_halt();
874 }
875 NODE_DATA(nid) = __va(paddr);
876 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
877
878 NODE_DATA(nid)->node_id = nid;
879#endif
880
881 p = NODE_DATA(nid);
882
883 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
884 p->node_start_pfn = start_pfn;
885 p->node_spanned_pages = end_pfn - start_pfn;
886}
887
888static void init_node_masks_nonnuma(void)
889{
890 int i;
891
892 numadbg("Initializing tables for non-numa.\n");
893
894 node_masks[0].mask = node_masks[0].val = 0;
895 num_node_masks = 1;
896
897 for (i = 0; i < NR_CPUS; i++)
898 numa_cpu_lookup_table[i] = 0;
899
900 cpumask_setall(&numa_cpumask_lookup_table[0]);
901}
902
903#ifdef CONFIG_NEED_MULTIPLE_NODES
904struct pglist_data *node_data[MAX_NUMNODES];
905
906EXPORT_SYMBOL(numa_cpu_lookup_table);
907EXPORT_SYMBOL(numa_cpumask_lookup_table);
908EXPORT_SYMBOL(node_data);
909
910struct mdesc_mlgroup {
911 u64 node;
912 u64 latency;
913 u64 match;
914 u64 mask;
915};
916static struct mdesc_mlgroup *mlgroups;
917static int num_mlgroups;
918
919static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
920 u32 cfg_handle)
921{
922 u64 arc;
923
924 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
925 u64 target = mdesc_arc_target(md, arc);
926 const u64 *val;
927
928 val = mdesc_get_property(md, target,
929 "cfg-handle", NULL);
930 if (val && *val == cfg_handle)
931 return 0;
932 }
933 return -ENODEV;
934}
935
936static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
937 u32 cfg_handle)
938{
939 u64 arc, candidate, best_latency = ~(u64)0;
940
941 candidate = MDESC_NODE_NULL;
942 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
943 u64 target = mdesc_arc_target(md, arc);
944 const char *name = mdesc_node_name(md, target);
945 const u64 *val;
946
947 if (strcmp(name, "pio-latency-group"))
948 continue;
949
950 val = mdesc_get_property(md, target, "latency", NULL);
951 if (!val)
952 continue;
953
954 if (*val < best_latency) {
955 candidate = target;
956 best_latency = *val;
957 }
958 }
959
960 if (candidate == MDESC_NODE_NULL)
961 return -ENODEV;
962
963 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
964}
965
966int of_node_to_nid(struct device_node *dp)
967{
968 const struct linux_prom64_registers *regs;
969 struct mdesc_handle *md;
970 u32 cfg_handle;
971 int count, nid;
972 u64 grp;
973
974
975
976
977
978 if (!mlgroups)
979 return -1;
980
981 regs = of_get_property(dp, "reg", NULL);
982 if (!regs)
983 return -1;
984
985 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
986
987 md = mdesc_grab();
988
989 count = 0;
990 nid = -1;
991 mdesc_for_each_node_by_name(md, grp, "group") {
992 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
993 nid = count;
994 break;
995 }
996 count++;
997 }
998
999 mdesc_release(md);
1000
1001 return nid;
1002}
1003
1004static void __init add_node_ranges(void)
1005{
1006 struct memblock_region *reg;
1007
1008 for_each_memblock(memory, reg) {
1009 unsigned long size = reg->size;
1010 unsigned long start, end;
1011
1012 start = reg->base;
1013 end = start + size;
1014 while (start < end) {
1015 unsigned long this_end;
1016 int nid;
1017
1018 this_end = memblock_nid_range(start, end, &nid);
1019
1020 numadbg("Setting memblock NUMA node nid[%d] "
1021 "start[%lx] end[%lx]\n",
1022 nid, start, this_end);
1023
1024 memblock_set_node(start, this_end - start,
1025 &memblock.memory, nid);
1026 start = this_end;
1027 }
1028 }
1029}
1030
1031static int __init grab_mlgroups(struct mdesc_handle *md)
1032{
1033 unsigned long paddr;
1034 int count = 0;
1035 u64 node;
1036
1037 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1038 count++;
1039 if (!count)
1040 return -ENOENT;
1041
1042 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
1043 SMP_CACHE_BYTES);
1044 if (!paddr)
1045 return -ENOMEM;
1046
1047 mlgroups = __va(paddr);
1048 num_mlgroups = count;
1049
1050 count = 0;
1051 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1052 struct mdesc_mlgroup *m = &mlgroups[count++];
1053 const u64 *val;
1054
1055 m->node = node;
1056
1057 val = mdesc_get_property(md, node, "latency", NULL);
1058 m->latency = *val;
1059 val = mdesc_get_property(md, node, "address-match", NULL);
1060 m->match = *val;
1061 val = mdesc_get_property(md, node, "address-mask", NULL);
1062 m->mask = *val;
1063
1064 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1065 "match[%llx] mask[%llx]\n",
1066 count - 1, m->node, m->latency, m->match, m->mask);
1067 }
1068
1069 return 0;
1070}
1071
1072static int __init grab_mblocks(struct mdesc_handle *md)
1073{
1074 unsigned long paddr;
1075 int count = 0;
1076 u64 node;
1077
1078 mdesc_for_each_node_by_name(md, node, "mblock")
1079 count++;
1080 if (!count)
1081 return -ENOENT;
1082
1083 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
1084 SMP_CACHE_BYTES);
1085 if (!paddr)
1086 return -ENOMEM;
1087
1088 mblocks = __va(paddr);
1089 num_mblocks = count;
1090
1091 count = 0;
1092 mdesc_for_each_node_by_name(md, node, "mblock") {
1093 struct mdesc_mblock *m = &mblocks[count++];
1094 const u64 *val;
1095
1096 val = mdesc_get_property(md, node, "base", NULL);
1097 m->base = *val;
1098 val = mdesc_get_property(md, node, "size", NULL);
1099 m->size = *val;
1100 val = mdesc_get_property(md, node,
1101 "address-congruence-offset", NULL);
1102
1103
1104
1105
1106 if (val)
1107 m->offset = *val;
1108 else
1109 m->offset = 0UL;
1110
1111 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1112 count - 1, m->base, m->size, m->offset);
1113 }
1114
1115 return 0;
1116}
1117
1118static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1119 u64 grp, cpumask_t *mask)
1120{
1121 u64 arc;
1122
1123 cpumask_clear(mask);
1124
1125 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1126 u64 target = mdesc_arc_target(md, arc);
1127 const char *name = mdesc_node_name(md, target);
1128 const u64 *id;
1129
1130 if (strcmp(name, "cpu"))
1131 continue;
1132 id = mdesc_get_property(md, target, "id", NULL);
1133 if (*id < nr_cpu_ids)
1134 cpumask_set_cpu(*id, mask);
1135 }
1136}
1137
1138static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1139{
1140 int i;
1141
1142 for (i = 0; i < num_mlgroups; i++) {
1143 struct mdesc_mlgroup *m = &mlgroups[i];
1144 if (m->node == node)
1145 return m;
1146 }
1147 return NULL;
1148}
1149
1150static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1151 int index)
1152{
1153 struct mdesc_mlgroup *candidate = NULL;
1154 u64 arc, best_latency = ~(u64)0;
1155 struct node_mem_mask *n;
1156
1157 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1158 u64 target = mdesc_arc_target(md, arc);
1159 struct mdesc_mlgroup *m = find_mlgroup(target);
1160 if (!m)
1161 continue;
1162 if (m->latency < best_latency) {
1163 candidate = m;
1164 best_latency = m->latency;
1165 }
1166 }
1167 if (!candidate)
1168 return -ENOENT;
1169
1170 if (num_node_masks != index) {
1171 printk(KERN_ERR "Inconsistent NUMA state, "
1172 "index[%d] != num_node_masks[%d]\n",
1173 index, num_node_masks);
1174 return -EINVAL;
1175 }
1176
1177 n = &node_masks[num_node_masks++];
1178
1179 n->mask = candidate->mask;
1180 n->val = candidate->match;
1181
1182 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
1183 index, n->mask, n->val, candidate->latency);
1184
1185 return 0;
1186}
1187
1188static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1189 int index)
1190{
1191 cpumask_t mask;
1192 int cpu;
1193
1194 numa_parse_mdesc_group_cpus(md, grp, &mask);
1195
1196 for_each_cpu(cpu, &mask)
1197 numa_cpu_lookup_table[cpu] = index;
1198 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
1199
1200 if (numa_debug) {
1201 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1202 for_each_cpu(cpu, &mask)
1203 printk("%d ", cpu);
1204 printk("]\n");
1205 }
1206
1207 return numa_attach_mlgroup(md, grp, index);
1208}
1209
1210static int __init numa_parse_mdesc(void)
1211{
1212 struct mdesc_handle *md = mdesc_grab();
1213 int i, err, count;
1214 u64 node;
1215
1216 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1217 if (node == MDESC_NODE_NULL) {
1218 mdesc_release(md);
1219 return -ENOENT;
1220 }
1221
1222 err = grab_mblocks(md);
1223 if (err < 0)
1224 goto out;
1225
1226 err = grab_mlgroups(md);
1227 if (err < 0)
1228 goto out;
1229
1230 count = 0;
1231 mdesc_for_each_node_by_name(md, node, "group") {
1232 err = numa_parse_mdesc_group(md, node, count);
1233 if (err < 0)
1234 break;
1235 count++;
1236 }
1237
1238 add_node_ranges();
1239
1240 for (i = 0; i < num_node_masks; i++) {
1241 allocate_node_data(i);
1242 node_set_online(i);
1243 }
1244
1245 err = 0;
1246out:
1247 mdesc_release(md);
1248 return err;
1249}
1250
1251static int __init numa_parse_jbus(void)
1252{
1253 unsigned long cpu, index;
1254
1255
1256
1257
1258 index = 0;
1259 for_each_present_cpu(cpu) {
1260 numa_cpu_lookup_table[cpu] = index;
1261 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
1262 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1263 node_masks[index].val = cpu << 36UL;
1264
1265 index++;
1266 }
1267 num_node_masks = index;
1268
1269 add_node_ranges();
1270
1271 for (index = 0; index < num_node_masks; index++) {
1272 allocate_node_data(index);
1273 node_set_online(index);
1274 }
1275
1276 return 0;
1277}
1278
1279static int __init numa_parse_sun4u(void)
1280{
1281 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1282 unsigned long ver;
1283
1284 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1285 if ((ver >> 32UL) == __JALAPENO_ID ||
1286 (ver >> 32UL) == __SERRANO_ID)
1287 return numa_parse_jbus();
1288 }
1289 return -1;
1290}
1291
1292static int __init bootmem_init_numa(void)
1293{
1294 int err = -1;
1295
1296 numadbg("bootmem_init_numa()\n");
1297
1298 if (numa_enabled) {
1299 if (tlb_type == hypervisor)
1300 err = numa_parse_mdesc();
1301 else
1302 err = numa_parse_sun4u();
1303 }
1304 return err;
1305}
1306
1307#else
1308
1309static int bootmem_init_numa(void)
1310{
1311 return -1;
1312}
1313
1314#endif
1315
1316static void __init bootmem_init_nonnuma(void)
1317{
1318 unsigned long top_of_ram = memblock_end_of_DRAM();
1319 unsigned long total_ram = memblock_phys_mem_size();
1320
1321 numadbg("bootmem_init_nonnuma()\n");
1322
1323 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1324 top_of_ram, total_ram);
1325 printk(KERN_INFO "Memory hole size: %ldMB\n",
1326 (top_of_ram - total_ram) >> 20);
1327
1328 init_node_masks_nonnuma();
1329 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
1330 allocate_node_data(0);
1331 node_set_online(0);
1332}
1333
1334static unsigned long __init bootmem_init(unsigned long phys_base)
1335{
1336 unsigned long end_pfn;
1337
1338 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1339 max_pfn = max_low_pfn = end_pfn;
1340 min_low_pfn = (phys_base >> PAGE_SHIFT);
1341
1342 if (bootmem_init_numa() < 0)
1343 bootmem_init_nonnuma();
1344
1345
1346 memblock_dump_all();
1347
1348
1349
1350 sparse_memory_present_with_active_regions(MAX_NUMNODES);
1351 sparse_init();
1352
1353 return end_pfn;
1354}
1355
1356static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1357static int pall_ents __initdata;
1358
1359#ifdef CONFIG_DEBUG_PAGEALLOC
1360static unsigned long __ref kernel_map_range(unsigned long pstart,
1361 unsigned long pend, pgprot_t prot)
1362{
1363 unsigned long vstart = PAGE_OFFSET + pstart;
1364 unsigned long vend = PAGE_OFFSET + pend;
1365 unsigned long alloc_bytes = 0UL;
1366
1367 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1368 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1369 vstart, vend);
1370 prom_halt();
1371 }
1372
1373 while (vstart < vend) {
1374 unsigned long this_end, paddr = __pa(vstart);
1375 pgd_t *pgd = pgd_offset_k(vstart);
1376 pud_t *pud;
1377 pmd_t *pmd;
1378 pte_t *pte;
1379
1380 pud = pud_offset(pgd, vstart);
1381 if (pud_none(*pud)) {
1382 pmd_t *new;
1383
1384 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1385 alloc_bytes += PAGE_SIZE;
1386 pud_populate(&init_mm, pud, new);
1387 }
1388
1389 pmd = pmd_offset(pud, vstart);
1390 if (!pmd_present(*pmd)) {
1391 pte_t *new;
1392
1393 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1394 alloc_bytes += PAGE_SIZE;
1395 pmd_populate_kernel(&init_mm, pmd, new);
1396 }
1397
1398 pte = pte_offset_kernel(pmd, vstart);
1399 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1400 if (this_end > vend)
1401 this_end = vend;
1402
1403 while (vstart < this_end) {
1404 pte_val(*pte) = (paddr | pgprot_val(prot));
1405
1406 vstart += PAGE_SIZE;
1407 paddr += PAGE_SIZE;
1408 pte++;
1409 }
1410 }
1411
1412 return alloc_bytes;
1413}
1414
1415extern unsigned int kvmap_linear_patch[1];
1416#endif
1417
1418static void __init kpte_set_val(unsigned long index, unsigned long val)
1419{
1420 unsigned long *ptr = kpte_linear_bitmap;
1421
1422 val <<= ((index % (BITS_PER_LONG / 2)) * 2);
1423 ptr += (index / (BITS_PER_LONG / 2));
1424
1425 *ptr |= val;
1426}
1427
1428static const unsigned long kpte_shift_min = 28;
1429static const unsigned long kpte_shift_max = 34;
1430static const unsigned long kpte_shift_incr = 3;
1431
1432static unsigned long kpte_mark_using_shift(unsigned long start, unsigned long end,
1433 unsigned long shift)
1434{
1435 unsigned long size = (1UL << shift);
1436 unsigned long mask = (size - 1UL);
1437 unsigned long remains = end - start;
1438 unsigned long val;
1439
1440 if (remains < size || (start & mask))
1441 return start;
1442
1443
1444
1445
1446
1447
1448
1449 val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1;
1450
1451 remains &= ~mask;
1452 if (shift != kpte_shift_max)
1453 remains = size;
1454
1455 while (remains) {
1456 unsigned long index = start >> kpte_shift_min;
1457
1458 kpte_set_val(index, val);
1459
1460 start += 1UL << kpte_shift_min;
1461 remains -= 1UL << kpte_shift_min;
1462 }
1463
1464 return start;
1465}
1466
1467static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
1468{
1469 unsigned long smallest_size, smallest_mask;
1470 unsigned long s;
1471
1472 smallest_size = (1UL << kpte_shift_min);
1473 smallest_mask = (smallest_size - 1UL);
1474
1475 while (start < end) {
1476 unsigned long orig_start = start;
1477
1478 for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) {
1479 start = kpte_mark_using_shift(start, end, s);
1480
1481 if (start != orig_start)
1482 break;
1483 }
1484
1485 if (start == orig_start)
1486 start = (start + smallest_size) & ~smallest_mask;
1487 }
1488}
1489
1490static void __init init_kpte_bitmap(void)
1491{
1492 unsigned long i;
1493
1494 for (i = 0; i < pall_ents; i++) {
1495 unsigned long phys_start, phys_end;
1496
1497 phys_start = pall[i].phys_addr;
1498 phys_end = phys_start + pall[i].reg_size;
1499
1500 mark_kpte_bitmap(phys_start, phys_end);
1501 }
1502}
1503
1504static void __init kernel_physical_mapping_init(void)
1505{
1506#ifdef CONFIG_DEBUG_PAGEALLOC
1507 unsigned long i, mem_alloced = 0UL;
1508
1509 for (i = 0; i < pall_ents; i++) {
1510 unsigned long phys_start, phys_end;
1511
1512 phys_start = pall[i].phys_addr;
1513 phys_end = phys_start + pall[i].reg_size;
1514
1515 mem_alloced += kernel_map_range(phys_start, phys_end,
1516 PAGE_KERNEL);
1517 }
1518
1519 printk("Allocated %ld bytes for kernel page tables.\n",
1520 mem_alloced);
1521
1522 kvmap_linear_patch[0] = 0x01000000;
1523 flushi(&kvmap_linear_patch[0]);
1524
1525 __flush_tlb_all();
1526#endif
1527}
1528
1529#ifdef CONFIG_DEBUG_PAGEALLOC
1530void __kernel_map_pages(struct page *page, int numpages, int enable)
1531{
1532 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1533 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1534
1535 kernel_map_range(phys_start, phys_end,
1536 (enable ? PAGE_KERNEL : __pgprot(0)));
1537
1538 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1539 PAGE_OFFSET + phys_end);
1540
1541
1542
1543
1544 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1545 PAGE_OFFSET + phys_end);
1546}
1547#endif
1548
1549unsigned long __init find_ecache_flush_span(unsigned long size)
1550{
1551 int i;
1552
1553 for (i = 0; i < pavail_ents; i++) {
1554 if (pavail[i].reg_size >= size)
1555 return pavail[i].phys_addr;
1556 }
1557
1558 return ~0UL;
1559}
1560
1561static void __init tsb_phys_patch(void)
1562{
1563 struct tsb_ldquad_phys_patch_entry *pquad;
1564 struct tsb_phys_patch_entry *p;
1565
1566 pquad = &__tsb_ldquad_phys_patch;
1567 while (pquad < &__tsb_ldquad_phys_patch_end) {
1568 unsigned long addr = pquad->addr;
1569
1570 if (tlb_type == hypervisor)
1571 *(unsigned int *) addr = pquad->sun4v_insn;
1572 else
1573 *(unsigned int *) addr = pquad->sun4u_insn;
1574 wmb();
1575 __asm__ __volatile__("flush %0"
1576 :
1577 : "r" (addr));
1578
1579 pquad++;
1580 }
1581
1582 p = &__tsb_phys_patch;
1583 while (p < &__tsb_phys_patch_end) {
1584 unsigned long addr = p->addr;
1585
1586 *(unsigned int *) addr = p->insn;
1587 wmb();
1588 __asm__ __volatile__("flush %0"
1589 :
1590 : "r" (addr));
1591
1592 p++;
1593 }
1594}
1595
1596
1597#ifndef CONFIG_DEBUG_PAGEALLOC
1598#define NUM_KTSB_DESCR 2
1599#else
1600#define NUM_KTSB_DESCR 1
1601#endif
1602static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
1603extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1604
1605static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1606{
1607 pa >>= KTSB_PHYS_SHIFT;
1608
1609 while (start < end) {
1610 unsigned int *ia = (unsigned int *)(unsigned long)*start;
1611
1612 ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
1613 __asm__ __volatile__("flush %0" : : "r" (ia));
1614
1615 ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
1616 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
1617
1618 start++;
1619 }
1620}
1621
1622static void ktsb_phys_patch(void)
1623{
1624 extern unsigned int __swapper_tsb_phys_patch;
1625 extern unsigned int __swapper_tsb_phys_patch_end;
1626 unsigned long ktsb_pa;
1627
1628 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1629 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1630 &__swapper_tsb_phys_patch_end, ktsb_pa);
1631#ifndef CONFIG_DEBUG_PAGEALLOC
1632 {
1633 extern unsigned int __swapper_4m_tsb_phys_patch;
1634 extern unsigned int __swapper_4m_tsb_phys_patch_end;
1635 ktsb_pa = (kern_base +
1636 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1637 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1638 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
1639 }
1640#endif
1641}
1642
1643static void __init sun4v_ktsb_init(void)
1644{
1645 unsigned long ktsb_pa;
1646
1647
1648 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1649
1650 switch (PAGE_SIZE) {
1651 case 8 * 1024:
1652 default:
1653 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1654 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1655 break;
1656
1657 case 64 * 1024:
1658 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1659 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1660 break;
1661
1662 case 512 * 1024:
1663 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1664 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1665 break;
1666
1667 case 4 * 1024 * 1024:
1668 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1669 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1670 break;
1671 }
1672
1673 ktsb_descr[0].assoc = 1;
1674 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1675 ktsb_descr[0].ctx_idx = 0;
1676 ktsb_descr[0].tsb_base = ktsb_pa;
1677 ktsb_descr[0].resv = 0;
1678
1679#ifndef CONFIG_DEBUG_PAGEALLOC
1680
1681 ktsb_pa = (kern_base +
1682 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1683
1684 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1685 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
1686 HV_PGSZ_MASK_256MB |
1687 HV_PGSZ_MASK_2GB |
1688 HV_PGSZ_MASK_16GB) &
1689 cpu_pgsz_mask);
1690 ktsb_descr[1].assoc = 1;
1691 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1692 ktsb_descr[1].ctx_idx = 0;
1693 ktsb_descr[1].tsb_base = ktsb_pa;
1694 ktsb_descr[1].resv = 0;
1695#endif
1696}
1697
1698void __cpuinit sun4v_ktsb_register(void)
1699{
1700 unsigned long pa, ret;
1701
1702 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1703
1704 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1705 if (ret != 0) {
1706 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1707 "errors with %lx\n", pa, ret);
1708 prom_halt();
1709 }
1710}
1711
1712static void __init sun4u_linear_pte_xor_finalize(void)
1713{
1714#ifndef CONFIG_DEBUG_PAGEALLOC
1715
1716
1717
1718#endif
1719}
1720
1721static void __init sun4v_linear_pte_xor_finalize(void)
1722{
1723#ifndef CONFIG_DEBUG_PAGEALLOC
1724 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
1725 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
1726 0xfffff80000000000UL;
1727 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1728 _PAGE_P_4V | _PAGE_W_4V);
1729 } else {
1730 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
1731 }
1732
1733 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
1734 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
1735 0xfffff80000000000UL;
1736 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1737 _PAGE_P_4V | _PAGE_W_4V);
1738 } else {
1739 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
1740 }
1741
1742 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
1743 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
1744 0xfffff80000000000UL;
1745 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1746 _PAGE_P_4V | _PAGE_W_4V);
1747 } else {
1748 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
1749 }
1750#endif
1751}
1752
1753
1754
1755static unsigned long last_valid_pfn;
1756pgd_t swapper_pg_dir[2048];
1757
1758static void sun4u_pgprot_init(void);
1759static void sun4v_pgprot_init(void);
1760
1761void __init paging_init(void)
1762{
1763 unsigned long end_pfn, shift, phys_base;
1764 unsigned long real_end, i;
1765 int node;
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
1781
1782
1783
1784
1785
1786
1787 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
1788 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
1789
1790 BUILD_BUG_ON(NR_CPUS > 4096);
1791
1792 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1793 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1794
1795
1796 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
1797#ifndef CONFIG_DEBUG_PAGEALLOC
1798 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
1799#endif
1800
1801 if (tlb_type == hypervisor)
1802 sun4v_pgprot_init();
1803 else
1804 sun4u_pgprot_init();
1805
1806 if (tlb_type == cheetah_plus ||
1807 tlb_type == hypervisor) {
1808 tsb_phys_patch();
1809 ktsb_phys_patch();
1810 }
1811
1812 if (tlb_type == hypervisor)
1813 sun4v_patch_tlb_handlers();
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823 read_obp_translations();
1824 read_obp_memory("reg", &pall[0], &pall_ents);
1825 read_obp_memory("available", &pavail[0], &pavail_ents);
1826 read_obp_memory("available", &pavail[0], &pavail_ents);
1827
1828 phys_base = 0xffffffffffffffffUL;
1829 for (i = 0; i < pavail_ents; i++) {
1830 phys_base = min(phys_base, pavail[i].phys_addr);
1831 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
1832 }
1833
1834 memblock_reserve(kern_base, kern_size);
1835
1836 find_ramdisk(phys_base);
1837
1838 memblock_enforce_memory_limit(cmdline_memory_size);
1839
1840 memblock_allow_resize();
1841 memblock_dump_all();
1842
1843 set_bit(0, mmu_context_bmap);
1844
1845 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1846
1847 real_end = (unsigned long)_end;
1848 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
1849 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
1850 num_kernel_image_mappings);
1851
1852
1853
1854
1855 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1856
1857 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1858
1859
1860 pud_set(pud_offset(&swapper_pg_dir[0], 0),
1861 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1862
1863 inherit_prom_mappings();
1864
1865 init_kpte_bitmap();
1866
1867
1868 setup_tba();
1869
1870 __flush_tlb_all();
1871
1872 prom_build_devicetree();
1873 of_populate_present_mask();
1874#ifndef CONFIG_SMP
1875 of_fill_in_cpu_data();
1876#endif
1877
1878 if (tlb_type == hypervisor) {
1879 sun4v_mdesc_init();
1880 mdesc_populate_present_mask(cpu_all_mask);
1881#ifndef CONFIG_SMP
1882 mdesc_fill_in_cpu_data(cpu_all_mask);
1883#endif
1884 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
1885
1886 sun4v_linear_pte_xor_finalize();
1887
1888 sun4v_ktsb_init();
1889 sun4v_ktsb_register();
1890 } else {
1891 unsigned long impl, ver;
1892
1893 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
1894 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
1895
1896 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
1897 impl = ((ver >> 32) & 0xffff);
1898 if (impl == PANTHER_IMPL)
1899 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
1900 HV_PGSZ_MASK_256MB);
1901
1902 sun4u_linear_pte_xor_finalize();
1903 }
1904
1905
1906
1907
1908 __flush_tlb_all();
1909#ifndef CONFIG_DEBUG_PAGEALLOC
1910 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
1911#endif
1912 __flush_tlb_all();
1913
1914
1915 last_valid_pfn = end_pfn = bootmem_init(phys_base);
1916
1917
1918
1919
1920
1921 for_each_possible_cpu(i) {
1922 node = cpu_to_node(i);
1923
1924 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
1925 THREAD_SIZE,
1926 THREAD_SIZE, 0);
1927 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
1928 THREAD_SIZE,
1929 THREAD_SIZE, 0);
1930 }
1931
1932 kernel_physical_mapping_init();
1933
1934 {
1935 unsigned long max_zone_pfns[MAX_NR_ZONES];
1936
1937 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1938
1939 max_zone_pfns[ZONE_NORMAL] = end_pfn;
1940
1941 free_area_init_nodes(max_zone_pfns);
1942 }
1943
1944 printk("Booting Linux...\n");
1945}
1946
1947int page_in_phys_avail(unsigned long paddr)
1948{
1949 int i;
1950
1951 paddr &= PAGE_MASK;
1952
1953 for (i = 0; i < pavail_ents; i++) {
1954 unsigned long start, end;
1955
1956 start = pavail[i].phys_addr;
1957 end = start + pavail[i].reg_size;
1958
1959 if (paddr >= start && paddr < end)
1960 return 1;
1961 }
1962 if (paddr >= kern_base && paddr < (kern_base + kern_size))
1963 return 1;
1964#ifdef CONFIG_BLK_DEV_INITRD
1965 if (paddr >= __pa(initrd_start) &&
1966 paddr < __pa(PAGE_ALIGN(initrd_end)))
1967 return 1;
1968#endif
1969
1970 return 0;
1971}
1972
1973static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
1974static int pavail_rescan_ents __initdata;
1975
1976
1977
1978
1979
1980
1981
1982static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
1983{
1984 int i;
1985
1986 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
1987
1988 for (i = 0; i < pavail_ents; i++) {
1989 unsigned long old_start, old_end;
1990
1991 old_start = pavail[i].phys_addr;
1992 old_end = old_start + pavail[i].reg_size;
1993 while (old_start < old_end) {
1994 int n;
1995
1996 for (n = 0; n < pavail_rescan_ents; n++) {
1997 unsigned long new_start, new_end;
1998
1999 new_start = pavail_rescan[n].phys_addr;
2000 new_end = new_start +
2001 pavail_rescan[n].reg_size;
2002
2003 if (new_start <= old_start &&
2004 new_end >= (old_start + PAGE_SIZE)) {
2005 set_bit(old_start >> 22, bitmap);
2006 goto do_next_page;
2007 }
2008 }
2009
2010 prom_printf("mem_init: Lost memory in pavail\n");
2011 prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
2012 pavail[i].phys_addr,
2013 pavail[i].reg_size);
2014 prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
2015 pavail_rescan[i].phys_addr,
2016 pavail_rescan[i].reg_size);
2017 prom_printf("mem_init: Cannot continue, aborting.\n");
2018 prom_halt();
2019
2020 do_next_page:
2021 old_start += PAGE_SIZE;
2022 }
2023 }
2024}
2025
2026static void __init patch_tlb_miss_handler_bitmap(void)
2027{
2028 extern unsigned int valid_addr_bitmap_insn[];
2029 extern unsigned int valid_addr_bitmap_patch[];
2030
2031 valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
2032 mb();
2033 valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
2034 flushi(&valid_addr_bitmap_insn[0]);
2035}
2036
2037static void __init register_page_bootmem_info(void)
2038{
2039#ifdef CONFIG_NEED_MULTIPLE_NODES
2040 int i;
2041
2042 for_each_online_node(i)
2043 if (NODE_DATA(i)->node_spanned_pages)
2044 register_page_bootmem_info_node(NODE_DATA(i));
2045#endif
2046}
2047void __init mem_init(void)
2048{
2049 unsigned long codepages, datapages, initpages;
2050 unsigned long addr, last;
2051
2052 addr = PAGE_OFFSET + kern_base;
2053 last = PAGE_ALIGN(kern_size) + addr;
2054 while (addr < last) {
2055 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
2056 addr += PAGE_SIZE;
2057 }
2058
2059 setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap);
2060 patch_tlb_miss_handler_bitmap();
2061
2062 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2063
2064 register_page_bootmem_info();
2065 totalram_pages = free_all_bootmem();
2066
2067
2068
2069
2070 num_physpages = totalram_pages - 1;
2071
2072
2073
2074
2075
2076 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2077 if (mem_map_zero == NULL) {
2078 prom_printf("paging_init: Cannot alloc zero page.\n");
2079 prom_halt();
2080 }
2081 mark_page_reserved(mem_map_zero);
2082
2083 codepages = (((unsigned long) _etext) - ((unsigned long) _start));
2084 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
2085 datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
2086 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
2087 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
2088 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
2089
2090 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
2091 nr_free_pages() << (PAGE_SHIFT-10),
2092 codepages << (PAGE_SHIFT-10),
2093 datapages << (PAGE_SHIFT-10),
2094 initpages << (PAGE_SHIFT-10),
2095 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
2096
2097 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2098 cheetah_ecache_flush_init();
2099}
2100
2101void free_initmem(void)
2102{
2103 unsigned long addr, initend;
2104 int do_free = 1;
2105
2106
2107
2108
2109
2110
2111 if (cmdline_memory_size)
2112 do_free = 0;
2113
2114
2115
2116
2117 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2118 initend = (unsigned long)(__init_end) & PAGE_MASK;
2119 for (; addr < initend; addr += PAGE_SIZE) {
2120 unsigned long page;
2121
2122 page = (addr +
2123 ((unsigned long) __va(kern_base)) -
2124 ((unsigned long) KERNBASE));
2125 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
2126
2127 if (do_free)
2128 free_reserved_page(virt_to_page(page));
2129 }
2130}
2131
2132#ifdef CONFIG_BLK_DEV_INITRD
2133void free_initrd_mem(unsigned long start, unsigned long end)
2134{
2135 num_physpages += free_reserved_area(start, end, POISON_FREE_INITMEM,
2136 "initrd");
2137}
2138#endif
2139
2140#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2141#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2142#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2143#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2144#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2145#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2146
2147pgprot_t PAGE_KERNEL __read_mostly;
2148EXPORT_SYMBOL(PAGE_KERNEL);
2149
2150pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2151pgprot_t PAGE_COPY __read_mostly;
2152
2153pgprot_t PAGE_SHARED __read_mostly;
2154EXPORT_SYMBOL(PAGE_SHARED);
2155
2156unsigned long pg_iobits __read_mostly;
2157
2158unsigned long _PAGE_IE __read_mostly;
2159EXPORT_SYMBOL(_PAGE_IE);
2160
2161unsigned long _PAGE_E __read_mostly;
2162EXPORT_SYMBOL(_PAGE_E);
2163
2164unsigned long _PAGE_CACHE __read_mostly;
2165EXPORT_SYMBOL(_PAGE_CACHE);
2166
2167#ifdef CONFIG_SPARSEMEM_VMEMMAP
2168unsigned long vmemmap_table[VMEMMAP_SIZE];
2169
2170static long __meminitdata addr_start, addr_end;
2171static int __meminitdata node_start;
2172
2173int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2174 int node)
2175{
2176 unsigned long phys_start = (vstart - VMEMMAP_BASE);
2177 unsigned long phys_end = (vend - VMEMMAP_BASE);
2178 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
2179 unsigned long end = VMEMMAP_ALIGN(phys_end);
2180 unsigned long pte_base;
2181
2182 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2183 _PAGE_CP_4U | _PAGE_CV_4U |
2184 _PAGE_P_4U | _PAGE_W_4U);
2185 if (tlb_type == hypervisor)
2186 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2187 _PAGE_CP_4V | _PAGE_CV_4V |
2188 _PAGE_P_4V | _PAGE_W_4V);
2189
2190 for (; addr < end; addr += VMEMMAP_CHUNK) {
2191 unsigned long *vmem_pp =
2192 vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
2193 void *block;
2194
2195 if (!(*vmem_pp & _PAGE_VALID)) {
2196 block = vmemmap_alloc_block(1UL << 22, node);
2197 if (!block)
2198 return -ENOMEM;
2199
2200 *vmem_pp = pte_base | __pa(block);
2201
2202
2203 if (addr_end != addr || node_start != node) {
2204 if (addr_start)
2205 printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
2206 addr_start, addr_end-1, node_start);
2207 addr_start = addr;
2208 node_start = node;
2209 }
2210 addr_end = addr + VMEMMAP_CHUNK;
2211 }
2212 }
2213 return 0;
2214}
2215
2216void __meminit vmemmap_populate_print_last(void)
2217{
2218 if (addr_start) {
2219 printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
2220 addr_start, addr_end-1, node_start);
2221 addr_start = 0;
2222 addr_end = 0;
2223 node_start = 0;
2224 }
2225}
2226
2227void vmemmap_free(unsigned long start, unsigned long end)
2228{
2229}
2230
2231#endif
2232
2233static void prot_init_common(unsigned long page_none,
2234 unsigned long page_shared,
2235 unsigned long page_copy,
2236 unsigned long page_readonly,
2237 unsigned long page_exec_bit)
2238{
2239 PAGE_COPY = __pgprot(page_copy);
2240 PAGE_SHARED = __pgprot(page_shared);
2241
2242 protection_map[0x0] = __pgprot(page_none);
2243 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2244 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2245 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2246 protection_map[0x4] = __pgprot(page_readonly);
2247 protection_map[0x5] = __pgprot(page_readonly);
2248 protection_map[0x6] = __pgprot(page_copy);
2249 protection_map[0x7] = __pgprot(page_copy);
2250 protection_map[0x8] = __pgprot(page_none);
2251 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2252 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2253 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2254 protection_map[0xc] = __pgprot(page_readonly);
2255 protection_map[0xd] = __pgprot(page_readonly);
2256 protection_map[0xe] = __pgprot(page_shared);
2257 protection_map[0xf] = __pgprot(page_shared);
2258}
2259
2260static void __init sun4u_pgprot_init(void)
2261{
2262 unsigned long page_none, page_shared, page_copy, page_readonly;
2263 unsigned long page_exec_bit;
2264 int i;
2265
2266 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2267 _PAGE_CACHE_4U | _PAGE_P_4U |
2268 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2269 _PAGE_EXEC_4U);
2270 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2271 _PAGE_CACHE_4U | _PAGE_P_4U |
2272 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2273 _PAGE_EXEC_4U | _PAGE_L_4U);
2274
2275 _PAGE_IE = _PAGE_IE_4U;
2276 _PAGE_E = _PAGE_E_4U;
2277 _PAGE_CACHE = _PAGE_CACHE_4U;
2278
2279 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2280 __ACCESS_BITS_4U | _PAGE_E_4U);
2281
2282#ifdef CONFIG_DEBUG_PAGEALLOC
2283 kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL;
2284#else
2285 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
2286 0xfffff80000000000UL;
2287#endif
2288 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2289 _PAGE_P_4U | _PAGE_W_4U);
2290
2291 for (i = 1; i < 4; i++)
2292 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2293
2294 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2295 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2296 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2297
2298
2299 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2300 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2301 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2302 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2303 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2304 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2305 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2306
2307 page_exec_bit = _PAGE_EXEC_4U;
2308
2309 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2310 page_exec_bit);
2311}
2312
2313static void __init sun4v_pgprot_init(void)
2314{
2315 unsigned long page_none, page_shared, page_copy, page_readonly;
2316 unsigned long page_exec_bit;
2317 int i;
2318
2319 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2320 _PAGE_CACHE_4V | _PAGE_P_4V |
2321 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2322 _PAGE_EXEC_4V);
2323 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2324
2325 _PAGE_IE = _PAGE_IE_4V;
2326 _PAGE_E = _PAGE_E_4V;
2327 _PAGE_CACHE = _PAGE_CACHE_4V;
2328
2329#ifdef CONFIG_DEBUG_PAGEALLOC
2330 kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL;
2331#else
2332 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2333 0xfffff80000000000UL;
2334#endif
2335 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2336 _PAGE_P_4V | _PAGE_W_4V);
2337
2338 for (i = 1; i < 4; i++)
2339 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2340
2341 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2342 __ACCESS_BITS_4V | _PAGE_E_4V);
2343
2344 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2345 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2346 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2347 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2348
2349 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
2350 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2351 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2352 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2353 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2354 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2355 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2356
2357 page_exec_bit = _PAGE_EXEC_4V;
2358
2359 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2360 page_exec_bit);
2361}
2362
2363unsigned long pte_sz_bits(unsigned long sz)
2364{
2365 if (tlb_type == hypervisor) {
2366 switch (sz) {
2367 case 8 * 1024:
2368 default:
2369 return _PAGE_SZ8K_4V;
2370 case 64 * 1024:
2371 return _PAGE_SZ64K_4V;
2372 case 512 * 1024:
2373 return _PAGE_SZ512K_4V;
2374 case 4 * 1024 * 1024:
2375 return _PAGE_SZ4MB_4V;
2376 }
2377 } else {
2378 switch (sz) {
2379 case 8 * 1024:
2380 default:
2381 return _PAGE_SZ8K_4U;
2382 case 64 * 1024:
2383 return _PAGE_SZ64K_4U;
2384 case 512 * 1024:
2385 return _PAGE_SZ512K_4U;
2386 case 4 * 1024 * 1024:
2387 return _PAGE_SZ4MB_4U;
2388 }
2389 }
2390}
2391
2392pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2393{
2394 pte_t pte;
2395
2396 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
2397 pte_val(pte) |= (((unsigned long)space) << 32);
2398 pte_val(pte) |= pte_sz_bits(page_size);
2399
2400 return pte;
2401}
2402
2403static unsigned long kern_large_tte(unsigned long paddr)
2404{
2405 unsigned long val;
2406
2407 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2408 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2409 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2410 if (tlb_type == hypervisor)
2411 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2412 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
2413 _PAGE_EXEC_4V | _PAGE_W_4V);
2414
2415 return val | paddr;
2416}
2417
2418
2419void __flush_tlb_all(void)
2420{
2421 unsigned long pstate;
2422 int i;
2423
2424 __asm__ __volatile__("flushw\n\t"
2425 "rdpr %%pstate, %0\n\t"
2426 "wrpr %0, %1, %%pstate"
2427 : "=r" (pstate)
2428 : "i" (PSTATE_IE));
2429 if (tlb_type == hypervisor) {
2430 sun4v_mmu_demap_all();
2431 } else if (tlb_type == spitfire) {
2432 for (i = 0; i < 64; i++) {
2433
2434
2435
2436
2437 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2438 "flush %%g6"
2439 :
2440 : "r" (0),
2441 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2442
2443 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2444 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2445 "membar #Sync"
2446 :
2447 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2448 spitfire_put_dtlb_data(i, 0x0UL);
2449 }
2450
2451
2452
2453
2454
2455 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2456 "flush %%g6"
2457 :
2458 : "r" (0),
2459 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2460
2461 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2462 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2463 "membar #Sync"
2464 :
2465 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2466 spitfire_put_itlb_data(i, 0x0UL);
2467 }
2468 }
2469 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2470 cheetah_flush_dtlb_all();
2471 cheetah_flush_itlb_all();
2472 }
2473 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2474 : : "r" (pstate));
2475}
2476
2477static pte_t *get_from_cache(struct mm_struct *mm)
2478{
2479 struct page *page;
2480 pte_t *ret;
2481
2482 spin_lock(&mm->page_table_lock);
2483 page = mm->context.pgtable_page;
2484 ret = NULL;
2485 if (page) {
2486 void *p = page_address(page);
2487
2488 mm->context.pgtable_page = NULL;
2489
2490 ret = (pte_t *) (p + (PAGE_SIZE / 2));
2491 }
2492 spin_unlock(&mm->page_table_lock);
2493
2494 return ret;
2495}
2496
2497static struct page *__alloc_for_cache(struct mm_struct *mm)
2498{
2499 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2500 __GFP_REPEAT | __GFP_ZERO);
2501
2502 if (page) {
2503 spin_lock(&mm->page_table_lock);
2504 if (!mm->context.pgtable_page) {
2505 atomic_set(&page->_count, 2);
2506 mm->context.pgtable_page = page;
2507 }
2508 spin_unlock(&mm->page_table_lock);
2509 }
2510 return page;
2511}
2512
2513pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2514 unsigned long address)
2515{
2516 struct page *page;
2517 pte_t *pte;
2518
2519 pte = get_from_cache(mm);
2520 if (pte)
2521 return pte;
2522
2523 page = __alloc_for_cache(mm);
2524 if (page)
2525 pte = (pte_t *) page_address(page);
2526
2527 return pte;
2528}
2529
2530pgtable_t pte_alloc_one(struct mm_struct *mm,
2531 unsigned long address)
2532{
2533 struct page *page;
2534 pte_t *pte;
2535
2536 pte = get_from_cache(mm);
2537 if (pte)
2538 return pte;
2539
2540 page = __alloc_for_cache(mm);
2541 if (page) {
2542 pgtable_page_ctor(page);
2543 pte = (pte_t *) page_address(page);
2544 }
2545
2546 return pte;
2547}
2548
2549void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2550{
2551 struct page *page = virt_to_page(pte);
2552 if (put_page_testzero(page))
2553 free_hot_cold_page(page, 0);
2554}
2555
2556static void __pte_free(pgtable_t pte)
2557{
2558 struct page *page = virt_to_page(pte);
2559 if (put_page_testzero(page)) {
2560 pgtable_page_dtor(page);
2561 free_hot_cold_page(page, 0);
2562 }
2563}
2564
2565void pte_free(struct mm_struct *mm, pgtable_t pte)
2566{
2567 __pte_free(pte);
2568}
2569
2570void pgtable_free(void *table, bool is_page)
2571{
2572 if (is_page)
2573 __pte_free(table);
2574 else
2575 kmem_cache_free(pgtable_cache, table);
2576}
2577
2578#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2579static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot, bool for_modify)
2580{
2581 if (pgprot_val(pgprot) & _PAGE_VALID)
2582 pmd_val(pmd) |= PMD_HUGE_PRESENT;
2583 if (tlb_type == hypervisor) {
2584 if (pgprot_val(pgprot) & _PAGE_WRITE_4V)
2585 pmd_val(pmd) |= PMD_HUGE_WRITE;
2586 if (pgprot_val(pgprot) & _PAGE_EXEC_4V)
2587 pmd_val(pmd) |= PMD_HUGE_EXEC;
2588
2589 if (!for_modify) {
2590 if (pgprot_val(pgprot) & _PAGE_ACCESSED_4V)
2591 pmd_val(pmd) |= PMD_HUGE_ACCESSED;
2592 if (pgprot_val(pgprot) & _PAGE_MODIFIED_4V)
2593 pmd_val(pmd) |= PMD_HUGE_DIRTY;
2594 }
2595 } else {
2596 if (pgprot_val(pgprot) & _PAGE_WRITE_4U)
2597 pmd_val(pmd) |= PMD_HUGE_WRITE;
2598 if (pgprot_val(pgprot) & _PAGE_EXEC_4U)
2599 pmd_val(pmd) |= PMD_HUGE_EXEC;
2600
2601 if (!for_modify) {
2602 if (pgprot_val(pgprot) & _PAGE_ACCESSED_4U)
2603 pmd_val(pmd) |= PMD_HUGE_ACCESSED;
2604 if (pgprot_val(pgprot) & _PAGE_MODIFIED_4U)
2605 pmd_val(pmd) |= PMD_HUGE_DIRTY;
2606 }
2607 }
2608
2609 return pmd;
2610}
2611
2612pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
2613{
2614 pmd_t pmd;
2615
2616 pmd_val(pmd) = (page_nr << ((PAGE_SHIFT - PMD_PADDR_SHIFT)));
2617 pmd_val(pmd) |= PMD_ISHUGE;
2618 pmd = pmd_set_protbits(pmd, pgprot, false);
2619 return pmd;
2620}
2621
2622pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
2623{
2624 pmd_val(pmd) &= ~(PMD_HUGE_PRESENT |
2625 PMD_HUGE_WRITE |
2626 PMD_HUGE_EXEC);
2627 pmd = pmd_set_protbits(pmd, newprot, true);
2628 return pmd;
2629}
2630
2631pgprot_t pmd_pgprot(pmd_t entry)
2632{
2633 unsigned long pte = 0;
2634
2635 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2636 pte |= _PAGE_VALID;
2637
2638 if (tlb_type == hypervisor) {
2639 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2640 pte |= _PAGE_PRESENT_4V;
2641 if (pmd_val(entry) & PMD_HUGE_EXEC)
2642 pte |= _PAGE_EXEC_4V;
2643 if (pmd_val(entry) & PMD_HUGE_WRITE)
2644 pte |= _PAGE_W_4V;
2645 if (pmd_val(entry) & PMD_HUGE_ACCESSED)
2646 pte |= _PAGE_ACCESSED_4V;
2647 if (pmd_val(entry) & PMD_HUGE_DIRTY)
2648 pte |= _PAGE_MODIFIED_4V;
2649 pte |= _PAGE_CP_4V|_PAGE_CV_4V;
2650 } else {
2651 if (pmd_val(entry) & PMD_HUGE_PRESENT)
2652 pte |= _PAGE_PRESENT_4U;
2653 if (pmd_val(entry) & PMD_HUGE_EXEC)
2654 pte |= _PAGE_EXEC_4U;
2655 if (pmd_val(entry) & PMD_HUGE_WRITE)
2656 pte |= _PAGE_W_4U;
2657 if (pmd_val(entry) & PMD_HUGE_ACCESSED)
2658 pte |= _PAGE_ACCESSED_4U;
2659 if (pmd_val(entry) & PMD_HUGE_DIRTY)
2660 pte |= _PAGE_MODIFIED_4U;
2661 pte |= _PAGE_CP_4U|_PAGE_CV_4U;
2662 }
2663
2664 return __pgprot(pte);
2665}
2666
2667void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2668 pmd_t *pmd)
2669{
2670 unsigned long pte, flags;
2671 struct mm_struct *mm;
2672 pmd_t entry = *pmd;
2673 pgprot_t prot;
2674
2675 if (!pmd_large(entry) || !pmd_young(entry))
2676 return;
2677
2678 pte = (pmd_val(entry) & ~PMD_HUGE_PROTBITS);
2679 pte <<= PMD_PADDR_SHIFT;
2680 pte |= _PAGE_VALID;
2681
2682 prot = pmd_pgprot(entry);
2683
2684 if (tlb_type == hypervisor)
2685 pgprot_val(prot) |= _PAGE_SZHUGE_4V;
2686 else
2687 pgprot_val(prot) |= _PAGE_SZHUGE_4U;
2688
2689 pte |= pgprot_val(prot);
2690
2691 mm = vma->vm_mm;
2692
2693 spin_lock_irqsave(&mm->context.lock, flags);
2694
2695 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
2696 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
2697 addr, pte);
2698
2699 spin_unlock_irqrestore(&mm->context.lock, flags);
2700}
2701#endif
2702
2703#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2704static void context_reload(void *__data)
2705{
2706 struct mm_struct *mm = __data;
2707
2708 if (mm == current->mm)
2709 load_secondary_context(mm);
2710}
2711
2712void hugetlb_setup(struct pt_regs *regs)
2713{
2714 struct mm_struct *mm = current->mm;
2715 struct tsb_config *tp;
2716
2717 if (in_atomic() || !mm) {
2718 const struct exception_table_entry *entry;
2719
2720 entry = search_exception_tables(regs->tpc);
2721 if (entry) {
2722 regs->tpc = entry->fixup;
2723 regs->tnpc = regs->tpc + 4;
2724 return;
2725 }
2726 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2727 die_if_kernel("HugeTSB in atomic", regs);
2728 }
2729
2730 tp = &mm->context.tsb_block[MM_TSB_HUGE];
2731 if (likely(tp->tsb == NULL))
2732 tsb_grow(mm, MM_TSB_HUGE, 0);
2733
2734 tsb_context_switch(mm);
2735 smp_tsb_sync(mm);
2736
2737
2738
2739
2740 if (tlb_type == cheetah_plus) {
2741 unsigned long ctx;
2742
2743 spin_lock(&ctx_alloc_lock);
2744 ctx = mm->context.sparc64_ctx_val;
2745 ctx &= ~CTX_PGSZ_MASK;
2746 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
2747 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
2748
2749 if (ctx != mm->context.sparc64_ctx_val) {
2750
2751
2752
2753
2754
2755
2756 do_flush_tlb_mm(mm);
2757
2758
2759
2760
2761 mm->context.sparc64_ctx_val = ctx;
2762 on_each_cpu(context_reload, mm, 0);
2763 }
2764 spin_unlock(&ctx_alloc_lock);
2765 }
2766}
2767#endif
2768