linux/arch/sparc/mm/init_64.c
<<
>>
Prefs
   1/*
   2 *  arch/sparc64/mm/init.c
   3 *
   4 *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
   5 *  Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   6 */
   7 
   8#include <linux/module.h>
   9#include <linux/kernel.h>
  10#include <linux/sched.h>
  11#include <linux/string.h>
  12#include <linux/init.h>
  13#include <linux/bootmem.h>
  14#include <linux/mm.h>
  15#include <linux/hugetlb.h>
  16#include <linux/initrd.h>
  17#include <linux/swap.h>
  18#include <linux/pagemap.h>
  19#include <linux/poison.h>
  20#include <linux/fs.h>
  21#include <linux/seq_file.h>
  22#include <linux/kprobes.h>
  23#include <linux/cache.h>
  24#include <linux/sort.h>
  25#include <linux/percpu.h>
  26#include <linux/memblock.h>
  27#include <linux/mmzone.h>
  28#include <linux/gfp.h>
  29
  30#include <asm/head.h>
  31#include <asm/page.h>
  32#include <asm/pgalloc.h>
  33#include <asm/pgtable.h>
  34#include <asm/oplib.h>
  35#include <asm/iommu.h>
  36#include <asm/io.h>
  37#include <asm/uaccess.h>
  38#include <asm/mmu_context.h>
  39#include <asm/tlbflush.h>
  40#include <asm/dma.h>
  41#include <asm/starfire.h>
  42#include <asm/tlb.h>
  43#include <asm/spitfire.h>
  44#include <asm/sections.h>
  45#include <asm/tsb.h>
  46#include <asm/hypervisor.h>
  47#include <asm/prom.h>
  48#include <asm/mdesc.h>
  49#include <asm/cpudata.h>
  50#include <asm/irq.h>
  51
  52#include "init_64.h"
  53
  54unsigned long kern_linear_pte_xor[4] __read_mostly;
  55
  56/* A bitmap, two bits for every 256MB of physical memory.  These two
  57 * bits determine what page size we use for kernel linear
  58 * translations.  They form an index into kern_linear_pte_xor[].  The
  59 * value in the indexed slot is XOR'd with the TLB miss virtual
  60 * address to form the resulting TTE.  The mapping is:
  61 *
  62 *      0       ==>     4MB
  63 *      1       ==>     256MB
  64 *      2       ==>     2GB
  65 *      3       ==>     16GB
  66 *
  67 * All sun4v chips support 256MB pages.  Only SPARC-T4 and later
  68 * support 2GB pages, and hopefully future cpus will support the 16GB
  69 * pages as well.  For slots 2 and 3, we encode a 256MB TTE xor there
  70 * if these larger page sizes are not supported by the cpu.
  71 *
  72 * It would be nice to determine this from the machine description
  73 * 'cpu' properties, but we need to have this table setup before the
  74 * MDESC is initialized.
  75 */
  76unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
  77
  78#ifndef CONFIG_DEBUG_PAGEALLOC
  79/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
  80 * Space is allocated for this right after the trap table in
  81 * arch/sparc64/kernel/head.S
  82 */
  83extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
  84#endif
  85
  86static unsigned long cpu_pgsz_mask;
  87
  88#define MAX_BANKS       32
  89
  90static struct linux_prom64_registers pavail[MAX_BANKS];
  91static int pavail_ents;
  92
  93static int cmp_p64(const void *a, const void *b)
  94{
  95        const struct linux_prom64_registers *x = a, *y = b;
  96
  97        if (x->phys_addr > y->phys_addr)
  98                return 1;
  99        if (x->phys_addr < y->phys_addr)
 100                return -1;
 101        return 0;
 102}
 103
 104static void __init read_obp_memory(const char *property,
 105                                   struct linux_prom64_registers *regs,
 106                                   int *num_ents)
 107{
 108        phandle node = prom_finddevice("/memory");
 109        int prop_size = prom_getproplen(node, property);
 110        int ents, ret, i;
 111
 112        ents = prop_size / sizeof(struct linux_prom64_registers);
 113        if (ents > MAX_BANKS) {
 114                prom_printf("The machine has more %s property entries than "
 115                            "this kernel can support (%d).\n",
 116                            property, MAX_BANKS);
 117                prom_halt();
 118        }
 119
 120        ret = prom_getproperty(node, property, (char *) regs, prop_size);
 121        if (ret == -1) {
 122                prom_printf("Couldn't get %s property from /memory.\n",
 123                                property);
 124                prom_halt();
 125        }
 126
 127        /* Sanitize what we got from the firmware, by page aligning
 128         * everything.
 129         */
 130        for (i = 0; i < ents; i++) {
 131                unsigned long base, size;
 132
 133                base = regs[i].phys_addr;
 134                size = regs[i].reg_size;
 135
 136                size &= PAGE_MASK;
 137                if (base & ~PAGE_MASK) {
 138                        unsigned long new_base = PAGE_ALIGN(base);
 139
 140                        size -= new_base - base;
 141                        if ((long) size < 0L)
 142                                size = 0UL;
 143                        base = new_base;
 144                }
 145                if (size == 0UL) {
 146                        /* If it is empty, simply get rid of it.
 147                         * This simplifies the logic of the other
 148                         * functions that process these arrays.
 149                         */
 150                        memmove(&regs[i], &regs[i + 1],
 151                                (ents - i - 1) * sizeof(regs[0]));
 152                        i--;
 153                        ents--;
 154                        continue;
 155                }
 156                regs[i].phys_addr = base;
 157                regs[i].reg_size = size;
 158        }
 159
 160        *num_ents = ents;
 161
 162        sort(regs, ents, sizeof(struct linux_prom64_registers),
 163             cmp_p64, NULL);
 164}
 165
 166unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
 167                                        sizeof(unsigned long)];
 168EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
 169
 170/* Kernel physical address base and size in bytes.  */
 171unsigned long kern_base __read_mostly;
 172unsigned long kern_size __read_mostly;
 173
 174/* Initial ramdisk setup */
 175extern unsigned long sparc_ramdisk_image64;
 176extern unsigned int sparc_ramdisk_image;
 177extern unsigned int sparc_ramdisk_size;
 178
 179struct page *mem_map_zero __read_mostly;
 180EXPORT_SYMBOL(mem_map_zero);
 181
 182unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
 183
 184unsigned long sparc64_kern_pri_context __read_mostly;
 185unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
 186unsigned long sparc64_kern_sec_context __read_mostly;
 187
 188int num_kernel_image_mappings;
 189
 190#ifdef CONFIG_DEBUG_DCFLUSH
 191atomic_t dcpage_flushes = ATOMIC_INIT(0);
 192#ifdef CONFIG_SMP
 193atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
 194#endif
 195#endif
 196
 197inline void flush_dcache_page_impl(struct page *page)
 198{
 199        BUG_ON(tlb_type == hypervisor);
 200#ifdef CONFIG_DEBUG_DCFLUSH
 201        atomic_inc(&dcpage_flushes);
 202#endif
 203
 204#ifdef DCACHE_ALIASING_POSSIBLE
 205        __flush_dcache_page(page_address(page),
 206                            ((tlb_type == spitfire) &&
 207                             page_mapping(page) != NULL));
 208#else
 209        if (page_mapping(page) != NULL &&
 210            tlb_type == spitfire)
 211                __flush_icache_page(__pa(page_address(page)));
 212#endif
 213}
 214
 215#define PG_dcache_dirty         PG_arch_1
 216#define PG_dcache_cpu_shift     32UL
 217#define PG_dcache_cpu_mask      \
 218        ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
 219
 220#define dcache_dirty_cpu(page) \
 221        (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
 222
 223static inline void set_dcache_dirty(struct page *page, int this_cpu)
 224{
 225        unsigned long mask = this_cpu;
 226        unsigned long non_cpu_bits;
 227
 228        non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
 229        mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
 230
 231        __asm__ __volatile__("1:\n\t"
 232                             "ldx       [%2], %%g7\n\t"
 233                             "and       %%g7, %1, %%g1\n\t"
 234                             "or        %%g1, %0, %%g1\n\t"
 235                             "casx      [%2], %%g7, %%g1\n\t"
 236                             "cmp       %%g7, %%g1\n\t"
 237                             "bne,pn    %%xcc, 1b\n\t"
 238                             " nop"
 239                             : /* no outputs */
 240                             : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
 241                             : "g1", "g7");
 242}
 243
 244static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
 245{
 246        unsigned long mask = (1UL << PG_dcache_dirty);
 247
 248        __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
 249                             "1:\n\t"
 250                             "ldx       [%2], %%g7\n\t"
 251                             "srlx      %%g7, %4, %%g1\n\t"
 252                             "and       %%g1, %3, %%g1\n\t"
 253                             "cmp       %%g1, %0\n\t"
 254                             "bne,pn    %%icc, 2f\n\t"
 255                             " andn     %%g7, %1, %%g1\n\t"
 256                             "casx      [%2], %%g7, %%g1\n\t"
 257                             "cmp       %%g7, %%g1\n\t"
 258                             "bne,pn    %%xcc, 1b\n\t"
 259                             " nop\n"
 260                             "2:"
 261                             : /* no outputs */
 262                             : "r" (cpu), "r" (mask), "r" (&page->flags),
 263                               "i" (PG_dcache_cpu_mask),
 264                               "i" (PG_dcache_cpu_shift)
 265                             : "g1", "g7");
 266}
 267
 268static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
 269{
 270        unsigned long tsb_addr = (unsigned long) ent;
 271
 272        if (tlb_type == cheetah_plus || tlb_type == hypervisor)
 273                tsb_addr = __pa(tsb_addr);
 274
 275        __tsb_insert(tsb_addr, tag, pte);
 276}
 277
 278unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
 279
 280static void flush_dcache(unsigned long pfn)
 281{
 282        struct page *page;
 283
 284        page = pfn_to_page(pfn);
 285        if (page) {
 286                unsigned long pg_flags;
 287
 288                pg_flags = page->flags;
 289                if (pg_flags & (1UL << PG_dcache_dirty)) {
 290                        int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
 291                                   PG_dcache_cpu_mask);
 292                        int this_cpu = get_cpu();
 293
 294                        /* This is just to optimize away some function calls
 295                         * in the SMP case.
 296                         */
 297                        if (cpu == this_cpu)
 298                                flush_dcache_page_impl(page);
 299                        else
 300                                smp_flush_dcache_page_impl(page, cpu);
 301
 302                        clear_dcache_dirty_cpu(page, cpu);
 303
 304                        put_cpu();
 305                }
 306        }
 307}
 308
 309/* mm->context.lock must be held */
 310static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
 311                                    unsigned long tsb_hash_shift, unsigned long address,
 312                                    unsigned long tte)
 313{
 314        struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
 315        unsigned long tag;
 316
 317        if (unlikely(!tsb))
 318                return;
 319
 320        tsb += ((address >> tsb_hash_shift) &
 321                (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
 322        tag = (address >> 22UL);
 323        tsb_insert(tsb, tag, tte);
 324}
 325
 326#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 327static inline bool is_hugetlb_pte(pte_t pte)
 328{
 329        if ((tlb_type == hypervisor &&
 330             (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
 331            (tlb_type != hypervisor &&
 332             (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
 333                return true;
 334        return false;
 335}
 336#endif
 337
 338void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
 339{
 340        struct mm_struct *mm;
 341        unsigned long flags;
 342        pte_t pte = *ptep;
 343
 344        if (tlb_type != hypervisor) {
 345                unsigned long pfn = pte_pfn(pte);
 346
 347                if (pfn_valid(pfn))
 348                        flush_dcache(pfn);
 349        }
 350
 351        mm = vma->vm_mm;
 352
 353        spin_lock_irqsave(&mm->context.lock, flags);
 354
 355#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 356        if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
 357                __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
 358                                        address, pte_val(pte));
 359        else
 360#endif
 361                __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
 362                                        address, pte_val(pte));
 363
 364        spin_unlock_irqrestore(&mm->context.lock, flags);
 365}
 366
 367void flush_dcache_page(struct page *page)
 368{
 369        struct address_space *mapping;
 370        int this_cpu;
 371
 372        if (tlb_type == hypervisor)
 373                return;
 374
 375        /* Do not bother with the expensive D-cache flush if it
 376         * is merely the zero page.  The 'bigcore' testcase in GDB
 377         * causes this case to run millions of times.
 378         */
 379        if (page == ZERO_PAGE(0))
 380                return;
 381
 382        this_cpu = get_cpu();
 383
 384        mapping = page_mapping(page);
 385        if (mapping && !mapping_mapped(mapping)) {
 386                int dirty = test_bit(PG_dcache_dirty, &page->flags);
 387                if (dirty) {
 388                        int dirty_cpu = dcache_dirty_cpu(page);
 389
 390                        if (dirty_cpu == this_cpu)
 391                                goto out;
 392                        smp_flush_dcache_page_impl(page, dirty_cpu);
 393                }
 394                set_dcache_dirty(page, this_cpu);
 395        } else {
 396                /* We could delay the flush for the !page_mapping
 397                 * case too.  But that case is for exec env/arg
 398                 * pages and those are %99 certainly going to get
 399                 * faulted into the tlb (and thus flushed) anyways.
 400                 */
 401                flush_dcache_page_impl(page);
 402        }
 403
 404out:
 405        put_cpu();
 406}
 407EXPORT_SYMBOL(flush_dcache_page);
 408
 409void __kprobes flush_icache_range(unsigned long start, unsigned long end)
 410{
 411        /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
 412        if (tlb_type == spitfire) {
 413                unsigned long kaddr;
 414
 415                /* This code only runs on Spitfire cpus so this is
 416                 * why we can assume _PAGE_PADDR_4U.
 417                 */
 418                for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
 419                        unsigned long paddr, mask = _PAGE_PADDR_4U;
 420
 421                        if (kaddr >= PAGE_OFFSET)
 422                                paddr = kaddr & mask;
 423                        else {
 424                                pgd_t *pgdp = pgd_offset_k(kaddr);
 425                                pud_t *pudp = pud_offset(pgdp, kaddr);
 426                                pmd_t *pmdp = pmd_offset(pudp, kaddr);
 427                                pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
 428
 429                                paddr = pte_val(*ptep) & mask;
 430                        }
 431                        __flush_icache_page(paddr);
 432                }
 433        }
 434}
 435EXPORT_SYMBOL(flush_icache_range);
 436
 437void mmu_info(struct seq_file *m)
 438{
 439        static const char *pgsz_strings[] = {
 440                "8K", "64K", "512K", "4MB", "32MB",
 441                "256MB", "2GB", "16GB",
 442        };
 443        int i, printed;
 444
 445        if (tlb_type == cheetah)
 446                seq_printf(m, "MMU Type\t: Cheetah\n");
 447        else if (tlb_type == cheetah_plus)
 448                seq_printf(m, "MMU Type\t: Cheetah+\n");
 449        else if (tlb_type == spitfire)
 450                seq_printf(m, "MMU Type\t: Spitfire\n");
 451        else if (tlb_type == hypervisor)
 452                seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
 453        else
 454                seq_printf(m, "MMU Type\t: ???\n");
 455
 456        seq_printf(m, "MMU PGSZs\t: ");
 457        printed = 0;
 458        for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
 459                if (cpu_pgsz_mask & (1UL << i)) {
 460                        seq_printf(m, "%s%s",
 461                                   printed ? "," : "", pgsz_strings[i]);
 462                        printed++;
 463                }
 464        }
 465        seq_putc(m, '\n');
 466
 467#ifdef CONFIG_DEBUG_DCFLUSH
 468        seq_printf(m, "DCPageFlushes\t: %d\n",
 469                   atomic_read(&dcpage_flushes));
 470#ifdef CONFIG_SMP
 471        seq_printf(m, "DCPageFlushesXC\t: %d\n",
 472                   atomic_read(&dcpage_flushes_xcall));
 473#endif /* CONFIG_SMP */
 474#endif /* CONFIG_DEBUG_DCFLUSH */
 475}
 476
 477struct linux_prom_translation prom_trans[512] __read_mostly;
 478unsigned int prom_trans_ents __read_mostly;
 479
 480unsigned long kern_locked_tte_data;
 481
 482/* The obp translations are saved based on 8k pagesize, since obp can
 483 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
 484 * HI_OBP_ADDRESS range are handled in ktlb.S.
 485 */
 486static inline int in_obp_range(unsigned long vaddr)
 487{
 488        return (vaddr >= LOW_OBP_ADDRESS &&
 489                vaddr < HI_OBP_ADDRESS);
 490}
 491
 492static int cmp_ptrans(const void *a, const void *b)
 493{
 494        const struct linux_prom_translation *x = a, *y = b;
 495
 496        if (x->virt > y->virt)
 497                return 1;
 498        if (x->virt < y->virt)
 499                return -1;
 500        return 0;
 501}
 502
 503/* Read OBP translations property into 'prom_trans[]'.  */
 504static void __init read_obp_translations(void)
 505{
 506        int n, node, ents, first, last, i;
 507
 508        node = prom_finddevice("/virtual-memory");
 509        n = prom_getproplen(node, "translations");
 510        if (unlikely(n == 0 || n == -1)) {
 511                prom_printf("prom_mappings: Couldn't get size.\n");
 512                prom_halt();
 513        }
 514        if (unlikely(n > sizeof(prom_trans))) {
 515                prom_printf("prom_mappings: Size %d is too big.\n", n);
 516                prom_halt();
 517        }
 518
 519        if ((n = prom_getproperty(node, "translations",
 520                                  (char *)&prom_trans[0],
 521                                  sizeof(prom_trans))) == -1) {
 522                prom_printf("prom_mappings: Couldn't get property.\n");
 523                prom_halt();
 524        }
 525
 526        n = n / sizeof(struct linux_prom_translation);
 527
 528        ents = n;
 529
 530        sort(prom_trans, ents, sizeof(struct linux_prom_translation),
 531             cmp_ptrans, NULL);
 532
 533        /* Now kick out all the non-OBP entries.  */
 534        for (i = 0; i < ents; i++) {
 535                if (in_obp_range(prom_trans[i].virt))
 536                        break;
 537        }
 538        first = i;
 539        for (; i < ents; i++) {
 540                if (!in_obp_range(prom_trans[i].virt))
 541                        break;
 542        }
 543        last = i;
 544
 545        for (i = 0; i < (last - first); i++) {
 546                struct linux_prom_translation *src = &prom_trans[i + first];
 547                struct linux_prom_translation *dest = &prom_trans[i];
 548
 549                *dest = *src;
 550        }
 551        for (; i < ents; i++) {
 552                struct linux_prom_translation *dest = &prom_trans[i];
 553                dest->virt = dest->size = dest->data = 0x0UL;
 554        }
 555
 556        prom_trans_ents = last - first;
 557
 558        if (tlb_type == spitfire) {
 559                /* Clear diag TTE bits. */
 560                for (i = 0; i < prom_trans_ents; i++)
 561                        prom_trans[i].data &= ~0x0003fe0000000000UL;
 562        }
 563
 564        /* Force execute bit on.  */
 565        for (i = 0; i < prom_trans_ents; i++)
 566                prom_trans[i].data |= (tlb_type == hypervisor ?
 567                                       _PAGE_EXEC_4V : _PAGE_EXEC_4U);
 568}
 569
 570static void __init hypervisor_tlb_lock(unsigned long vaddr,
 571                                       unsigned long pte,
 572                                       unsigned long mmu)
 573{
 574        unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
 575
 576        if (ret != 0) {
 577                prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
 578                            "errors with %lx\n", vaddr, 0, pte, mmu, ret);
 579                prom_halt();
 580        }
 581}
 582
 583static unsigned long kern_large_tte(unsigned long paddr);
 584
 585static void __init remap_kernel(void)
 586{
 587        unsigned long phys_page, tte_vaddr, tte_data;
 588        int i, tlb_ent = sparc64_highest_locked_tlbent();
 589
 590        tte_vaddr = (unsigned long) KERNBASE;
 591        phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
 592        tte_data = kern_large_tte(phys_page);
 593
 594        kern_locked_tte_data = tte_data;
 595
 596        /* Now lock us into the TLBs via Hypervisor or OBP. */
 597        if (tlb_type == hypervisor) {
 598                for (i = 0; i < num_kernel_image_mappings; i++) {
 599                        hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
 600                        hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
 601                        tte_vaddr += 0x400000;
 602                        tte_data += 0x400000;
 603                }
 604        } else {
 605                for (i = 0; i < num_kernel_image_mappings; i++) {
 606                        prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
 607                        prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
 608                        tte_vaddr += 0x400000;
 609                        tte_data += 0x400000;
 610                }
 611                sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
 612        }
 613        if (tlb_type == cheetah_plus) {
 614                sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
 615                                            CTX_CHEETAH_PLUS_NUC);
 616                sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
 617                sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
 618        }
 619}
 620
 621
 622static void __init inherit_prom_mappings(void)
 623{
 624        /* Now fixup OBP's idea about where we really are mapped. */
 625        printk("Remapping the kernel... ");
 626        remap_kernel();
 627        printk("done.\n");
 628}
 629
 630void prom_world(int enter)
 631{
 632        if (!enter)
 633                set_fs(get_fs());
 634
 635        __asm__ __volatile__("flushw");
 636}
 637
 638void __flush_dcache_range(unsigned long start, unsigned long end)
 639{
 640        unsigned long va;
 641
 642        if (tlb_type == spitfire) {
 643                int n = 0;
 644
 645                for (va = start; va < end; va += 32) {
 646                        spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
 647                        if (++n >= 512)
 648                                break;
 649                }
 650        } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 651                start = __pa(start);
 652                end = __pa(end);
 653                for (va = start; va < end; va += 32)
 654                        __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
 655                                             "membar #Sync"
 656                                             : /* no outputs */
 657                                             : "r" (va),
 658                                               "i" (ASI_DCACHE_INVALIDATE));
 659        }
 660}
 661EXPORT_SYMBOL(__flush_dcache_range);
 662
 663/* get_new_mmu_context() uses "cache + 1".  */
 664DEFINE_SPINLOCK(ctx_alloc_lock);
 665unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
 666#define MAX_CTX_NR      (1UL << CTX_NR_BITS)
 667#define CTX_BMAP_SLOTS  BITS_TO_LONGS(MAX_CTX_NR)
 668DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
 669
 670/* Caller does TLB context flushing on local CPU if necessary.
 671 * The caller also ensures that CTX_VALID(mm->context) is false.
 672 *
 673 * We must be careful about boundary cases so that we never
 674 * let the user have CTX 0 (nucleus) or we ever use a CTX
 675 * version of zero (and thus NO_CONTEXT would not be caught
 676 * by version mis-match tests in mmu_context.h).
 677 *
 678 * Always invoked with interrupts disabled.
 679 */
 680void get_new_mmu_context(struct mm_struct *mm)
 681{
 682        unsigned long ctx, new_ctx;
 683        unsigned long orig_pgsz_bits;
 684        int new_version;
 685
 686        spin_lock(&ctx_alloc_lock);
 687        orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
 688        ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
 689        new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
 690        new_version = 0;
 691        if (new_ctx >= (1 << CTX_NR_BITS)) {
 692                new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
 693                if (new_ctx >= ctx) {
 694                        int i;
 695                        new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
 696                                CTX_FIRST_VERSION;
 697                        if (new_ctx == 1)
 698                                new_ctx = CTX_FIRST_VERSION;
 699
 700                        /* Don't call memset, for 16 entries that's just
 701                         * plain silly...
 702                         */
 703                        mmu_context_bmap[0] = 3;
 704                        mmu_context_bmap[1] = 0;
 705                        mmu_context_bmap[2] = 0;
 706                        mmu_context_bmap[3] = 0;
 707                        for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
 708                                mmu_context_bmap[i + 0] = 0;
 709                                mmu_context_bmap[i + 1] = 0;
 710                                mmu_context_bmap[i + 2] = 0;
 711                                mmu_context_bmap[i + 3] = 0;
 712                        }
 713                        new_version = 1;
 714                        goto out;
 715                }
 716        }
 717        mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
 718        new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
 719out:
 720        tlb_context_cache = new_ctx;
 721        mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
 722        spin_unlock(&ctx_alloc_lock);
 723
 724        if (unlikely(new_version))
 725                smp_new_mmu_context_version();
 726}
 727
 728static int numa_enabled = 1;
 729static int numa_debug;
 730
 731static int __init early_numa(char *p)
 732{
 733        if (!p)
 734                return 0;
 735
 736        if (strstr(p, "off"))
 737                numa_enabled = 0;
 738
 739        if (strstr(p, "debug"))
 740                numa_debug = 1;
 741
 742        return 0;
 743}
 744early_param("numa", early_numa);
 745
 746#define numadbg(f, a...) \
 747do {    if (numa_debug) \
 748                printk(KERN_INFO f, ## a); \
 749} while (0)
 750
 751static void __init find_ramdisk(unsigned long phys_base)
 752{
 753#ifdef CONFIG_BLK_DEV_INITRD
 754        if (sparc_ramdisk_image || sparc_ramdisk_image64) {
 755                unsigned long ramdisk_image;
 756
 757                /* Older versions of the bootloader only supported a
 758                 * 32-bit physical address for the ramdisk image
 759                 * location, stored at sparc_ramdisk_image.  Newer
 760                 * SILO versions set sparc_ramdisk_image to zero and
 761                 * provide a full 64-bit physical address at
 762                 * sparc_ramdisk_image64.
 763                 */
 764                ramdisk_image = sparc_ramdisk_image;
 765                if (!ramdisk_image)
 766                        ramdisk_image = sparc_ramdisk_image64;
 767
 768                /* Another bootloader quirk.  The bootloader normalizes
 769                 * the physical address to KERNBASE, so we have to
 770                 * factor that back out and add in the lowest valid
 771                 * physical page address to get the true physical address.
 772                 */
 773                ramdisk_image -= KERNBASE;
 774                ramdisk_image += phys_base;
 775
 776                numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
 777                        ramdisk_image, sparc_ramdisk_size);
 778
 779                initrd_start = ramdisk_image;
 780                initrd_end = ramdisk_image + sparc_ramdisk_size;
 781
 782                memblock_reserve(initrd_start, sparc_ramdisk_size);
 783
 784                initrd_start += PAGE_OFFSET;
 785                initrd_end += PAGE_OFFSET;
 786        }
 787#endif
 788}
 789
 790struct node_mem_mask {
 791        unsigned long mask;
 792        unsigned long val;
 793};
 794static struct node_mem_mask node_masks[MAX_NUMNODES];
 795static int num_node_masks;
 796
 797int numa_cpu_lookup_table[NR_CPUS];
 798cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
 799
 800#ifdef CONFIG_NEED_MULTIPLE_NODES
 801
 802struct mdesc_mblock {
 803        u64     base;
 804        u64     size;
 805        u64     offset; /* RA-to-PA */
 806};
 807static struct mdesc_mblock *mblocks;
 808static int num_mblocks;
 809
 810static unsigned long ra_to_pa(unsigned long addr)
 811{
 812        int i;
 813
 814        for (i = 0; i < num_mblocks; i++) {
 815                struct mdesc_mblock *m = &mblocks[i];
 816
 817                if (addr >= m->base &&
 818                    addr < (m->base + m->size)) {
 819                        addr += m->offset;
 820                        break;
 821                }
 822        }
 823        return addr;
 824}
 825
 826static int find_node(unsigned long addr)
 827{
 828        int i;
 829
 830        addr = ra_to_pa(addr);
 831        for (i = 0; i < num_node_masks; i++) {
 832                struct node_mem_mask *p = &node_masks[i];
 833
 834                if ((addr & p->mask) == p->val)
 835                        return i;
 836        }
 837        return -1;
 838}
 839
 840static u64 memblock_nid_range(u64 start, u64 end, int *nid)
 841{
 842        *nid = find_node(start);
 843        start += PAGE_SIZE;
 844        while (start < end) {
 845                int n = find_node(start);
 846
 847                if (n != *nid)
 848                        break;
 849                start += PAGE_SIZE;
 850        }
 851
 852        if (start > end)
 853                start = end;
 854
 855        return start;
 856}
 857#endif
 858
 859/* This must be invoked after performing all of the necessary
 860 * memblock_set_node() calls for 'nid'.  We need to be able to get
 861 * correct data from get_pfn_range_for_nid().
 862 */
 863static void __init allocate_node_data(int nid)
 864{
 865        struct pglist_data *p;
 866        unsigned long start_pfn, end_pfn;
 867#ifdef CONFIG_NEED_MULTIPLE_NODES
 868        unsigned long paddr;
 869
 870        paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
 871        if (!paddr) {
 872                prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
 873                prom_halt();
 874        }
 875        NODE_DATA(nid) = __va(paddr);
 876        memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
 877
 878        NODE_DATA(nid)->node_id = nid;
 879#endif
 880
 881        p = NODE_DATA(nid);
 882
 883        get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
 884        p->node_start_pfn = start_pfn;
 885        p->node_spanned_pages = end_pfn - start_pfn;
 886}
 887
 888static void init_node_masks_nonnuma(void)
 889{
 890        int i;
 891
 892        numadbg("Initializing tables for non-numa.\n");
 893
 894        node_masks[0].mask = node_masks[0].val = 0;
 895        num_node_masks = 1;
 896
 897        for (i = 0; i < NR_CPUS; i++)
 898                numa_cpu_lookup_table[i] = 0;
 899
 900        cpumask_setall(&numa_cpumask_lookup_table[0]);
 901}
 902
 903#ifdef CONFIG_NEED_MULTIPLE_NODES
 904struct pglist_data *node_data[MAX_NUMNODES];
 905
 906EXPORT_SYMBOL(numa_cpu_lookup_table);
 907EXPORT_SYMBOL(numa_cpumask_lookup_table);
 908EXPORT_SYMBOL(node_data);
 909
 910struct mdesc_mlgroup {
 911        u64     node;
 912        u64     latency;
 913        u64     match;
 914        u64     mask;
 915};
 916static struct mdesc_mlgroup *mlgroups;
 917static int num_mlgroups;
 918
 919static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
 920                                   u32 cfg_handle)
 921{
 922        u64 arc;
 923
 924        mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
 925                u64 target = mdesc_arc_target(md, arc);
 926                const u64 *val;
 927
 928                val = mdesc_get_property(md, target,
 929                                         "cfg-handle", NULL);
 930                if (val && *val == cfg_handle)
 931                        return 0;
 932        }
 933        return -ENODEV;
 934}
 935
 936static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
 937                                    u32 cfg_handle)
 938{
 939        u64 arc, candidate, best_latency = ~(u64)0;
 940
 941        candidate = MDESC_NODE_NULL;
 942        mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
 943                u64 target = mdesc_arc_target(md, arc);
 944                const char *name = mdesc_node_name(md, target);
 945                const u64 *val;
 946
 947                if (strcmp(name, "pio-latency-group"))
 948                        continue;
 949
 950                val = mdesc_get_property(md, target, "latency", NULL);
 951                if (!val)
 952                        continue;
 953
 954                if (*val < best_latency) {
 955                        candidate = target;
 956                        best_latency = *val;
 957                }
 958        }
 959
 960        if (candidate == MDESC_NODE_NULL)
 961                return -ENODEV;
 962
 963        return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
 964}
 965
 966int of_node_to_nid(struct device_node *dp)
 967{
 968        const struct linux_prom64_registers *regs;
 969        struct mdesc_handle *md;
 970        u32 cfg_handle;
 971        int count, nid;
 972        u64 grp;
 973
 974        /* This is the right thing to do on currently supported
 975         * SUN4U NUMA platforms as well, as the PCI controller does
 976         * not sit behind any particular memory controller.
 977         */
 978        if (!mlgroups)
 979                return -1;
 980
 981        regs = of_get_property(dp, "reg", NULL);
 982        if (!regs)
 983                return -1;
 984
 985        cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
 986
 987        md = mdesc_grab();
 988
 989        count = 0;
 990        nid = -1;
 991        mdesc_for_each_node_by_name(md, grp, "group") {
 992                if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
 993                        nid = count;
 994                        break;
 995                }
 996                count++;
 997        }
 998
 999        mdesc_release(md);
1000
1001        return nid;
1002}
1003
1004static void __init add_node_ranges(void)
1005{
1006        struct memblock_region *reg;
1007
1008        for_each_memblock(memory, reg) {
1009                unsigned long size = reg->size;
1010                unsigned long start, end;
1011
1012                start = reg->base;
1013                end = start + size;
1014                while (start < end) {
1015                        unsigned long this_end;
1016                        int nid;
1017
1018                        this_end = memblock_nid_range(start, end, &nid);
1019
1020                        numadbg("Setting memblock NUMA node nid[%d] "
1021                                "start[%lx] end[%lx]\n",
1022                                nid, start, this_end);
1023
1024                        memblock_set_node(start, this_end - start,
1025                                          &memblock.memory, nid);
1026                        start = this_end;
1027                }
1028        }
1029}
1030
1031static int __init grab_mlgroups(struct mdesc_handle *md)
1032{
1033        unsigned long paddr;
1034        int count = 0;
1035        u64 node;
1036
1037        mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1038                count++;
1039        if (!count)
1040                return -ENOENT;
1041
1042        paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
1043                          SMP_CACHE_BYTES);
1044        if (!paddr)
1045                return -ENOMEM;
1046
1047        mlgroups = __va(paddr);
1048        num_mlgroups = count;
1049
1050        count = 0;
1051        mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1052                struct mdesc_mlgroup *m = &mlgroups[count++];
1053                const u64 *val;
1054
1055                m->node = node;
1056
1057                val = mdesc_get_property(md, node, "latency", NULL);
1058                m->latency = *val;
1059                val = mdesc_get_property(md, node, "address-match", NULL);
1060                m->match = *val;
1061                val = mdesc_get_property(md, node, "address-mask", NULL);
1062                m->mask = *val;
1063
1064                numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1065                        "match[%llx] mask[%llx]\n",
1066                        count - 1, m->node, m->latency, m->match, m->mask);
1067        }
1068
1069        return 0;
1070}
1071
1072static int __init grab_mblocks(struct mdesc_handle *md)
1073{
1074        unsigned long paddr;
1075        int count = 0;
1076        u64 node;
1077
1078        mdesc_for_each_node_by_name(md, node, "mblock")
1079                count++;
1080        if (!count)
1081                return -ENOENT;
1082
1083        paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
1084                          SMP_CACHE_BYTES);
1085        if (!paddr)
1086                return -ENOMEM;
1087
1088        mblocks = __va(paddr);
1089        num_mblocks = count;
1090
1091        count = 0;
1092        mdesc_for_each_node_by_name(md, node, "mblock") {
1093                struct mdesc_mblock *m = &mblocks[count++];
1094                const u64 *val;
1095
1096                val = mdesc_get_property(md, node, "base", NULL);
1097                m->base = *val;
1098                val = mdesc_get_property(md, node, "size", NULL);
1099                m->size = *val;
1100                val = mdesc_get_property(md, node,
1101                                         "address-congruence-offset", NULL);
1102
1103                /* The address-congruence-offset property is optional.
1104                 * Explicity zero it be identifty this.
1105                 */
1106                if (val)
1107                        m->offset = *val;
1108                else
1109                        m->offset = 0UL;
1110
1111                numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1112                        count - 1, m->base, m->size, m->offset);
1113        }
1114
1115        return 0;
1116}
1117
1118static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1119                                               u64 grp, cpumask_t *mask)
1120{
1121        u64 arc;
1122
1123        cpumask_clear(mask);
1124
1125        mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1126                u64 target = mdesc_arc_target(md, arc);
1127                const char *name = mdesc_node_name(md, target);
1128                const u64 *id;
1129
1130                if (strcmp(name, "cpu"))
1131                        continue;
1132                id = mdesc_get_property(md, target, "id", NULL);
1133                if (*id < nr_cpu_ids)
1134                        cpumask_set_cpu(*id, mask);
1135        }
1136}
1137
1138static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1139{
1140        int i;
1141
1142        for (i = 0; i < num_mlgroups; i++) {
1143                struct mdesc_mlgroup *m = &mlgroups[i];
1144                if (m->node == node)
1145                        return m;
1146        }
1147        return NULL;
1148}
1149
1150static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1151                                      int index)
1152{
1153        struct mdesc_mlgroup *candidate = NULL;
1154        u64 arc, best_latency = ~(u64)0;
1155        struct node_mem_mask *n;
1156
1157        mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1158                u64 target = mdesc_arc_target(md, arc);
1159                struct mdesc_mlgroup *m = find_mlgroup(target);
1160                if (!m)
1161                        continue;
1162                if (m->latency < best_latency) {
1163                        candidate = m;
1164                        best_latency = m->latency;
1165                }
1166        }
1167        if (!candidate)
1168                return -ENOENT;
1169
1170        if (num_node_masks != index) {
1171                printk(KERN_ERR "Inconsistent NUMA state, "
1172                       "index[%d] != num_node_masks[%d]\n",
1173                       index, num_node_masks);
1174                return -EINVAL;
1175        }
1176
1177        n = &node_masks[num_node_masks++];
1178
1179        n->mask = candidate->mask;
1180        n->val = candidate->match;
1181
1182        numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
1183                index, n->mask, n->val, candidate->latency);
1184
1185        return 0;
1186}
1187
1188static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1189                                         int index)
1190{
1191        cpumask_t mask;
1192        int cpu;
1193
1194        numa_parse_mdesc_group_cpus(md, grp, &mask);
1195
1196        for_each_cpu(cpu, &mask)
1197                numa_cpu_lookup_table[cpu] = index;
1198        cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
1199
1200        if (numa_debug) {
1201                printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1202                for_each_cpu(cpu, &mask)
1203                        printk("%d ", cpu);
1204                printk("]\n");
1205        }
1206
1207        return numa_attach_mlgroup(md, grp, index);
1208}
1209
1210static int __init numa_parse_mdesc(void)
1211{
1212        struct mdesc_handle *md = mdesc_grab();
1213        int i, err, count;
1214        u64 node;
1215
1216        node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1217        if (node == MDESC_NODE_NULL) {
1218                mdesc_release(md);
1219                return -ENOENT;
1220        }
1221
1222        err = grab_mblocks(md);
1223        if (err < 0)
1224                goto out;
1225
1226        err = grab_mlgroups(md);
1227        if (err < 0)
1228                goto out;
1229
1230        count = 0;
1231        mdesc_for_each_node_by_name(md, node, "group") {
1232                err = numa_parse_mdesc_group(md, node, count);
1233                if (err < 0)
1234                        break;
1235                count++;
1236        }
1237
1238        add_node_ranges();
1239
1240        for (i = 0; i < num_node_masks; i++) {
1241                allocate_node_data(i);
1242                node_set_online(i);
1243        }
1244
1245        err = 0;
1246out:
1247        mdesc_release(md);
1248        return err;
1249}
1250
1251static int __init numa_parse_jbus(void)
1252{
1253        unsigned long cpu, index;
1254
1255        /* NUMA node id is encoded in bits 36 and higher, and there is
1256         * a 1-to-1 mapping from CPU ID to NUMA node ID.
1257         */
1258        index = 0;
1259        for_each_present_cpu(cpu) {
1260                numa_cpu_lookup_table[cpu] = index;
1261                cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
1262                node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1263                node_masks[index].val = cpu << 36UL;
1264
1265                index++;
1266        }
1267        num_node_masks = index;
1268
1269        add_node_ranges();
1270
1271        for (index = 0; index < num_node_masks; index++) {
1272                allocate_node_data(index);
1273                node_set_online(index);
1274        }
1275
1276        return 0;
1277}
1278
1279static int __init numa_parse_sun4u(void)
1280{
1281        if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1282                unsigned long ver;
1283
1284                __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1285                if ((ver >> 32UL) == __JALAPENO_ID ||
1286                    (ver >> 32UL) == __SERRANO_ID)
1287                        return numa_parse_jbus();
1288        }
1289        return -1;
1290}
1291
1292static int __init bootmem_init_numa(void)
1293{
1294        int err = -1;
1295
1296        numadbg("bootmem_init_numa()\n");
1297
1298        if (numa_enabled) {
1299                if (tlb_type == hypervisor)
1300                        err = numa_parse_mdesc();
1301                else
1302                        err = numa_parse_sun4u();
1303        }
1304        return err;
1305}
1306
1307#else
1308
1309static int bootmem_init_numa(void)
1310{
1311        return -1;
1312}
1313
1314#endif
1315
1316static void __init bootmem_init_nonnuma(void)
1317{
1318        unsigned long top_of_ram = memblock_end_of_DRAM();
1319        unsigned long total_ram = memblock_phys_mem_size();
1320
1321        numadbg("bootmem_init_nonnuma()\n");
1322
1323        printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1324               top_of_ram, total_ram);
1325        printk(KERN_INFO "Memory hole size: %ldMB\n",
1326               (top_of_ram - total_ram) >> 20);
1327
1328        init_node_masks_nonnuma();
1329        memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
1330        allocate_node_data(0);
1331        node_set_online(0);
1332}
1333
1334static unsigned long __init bootmem_init(unsigned long phys_base)
1335{
1336        unsigned long end_pfn;
1337
1338        end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1339        max_pfn = max_low_pfn = end_pfn;
1340        min_low_pfn = (phys_base >> PAGE_SHIFT);
1341
1342        if (bootmem_init_numa() < 0)
1343                bootmem_init_nonnuma();
1344
1345        /* Dump memblock with node info. */
1346        memblock_dump_all();
1347
1348        /* XXX cpu notifier XXX */
1349
1350        sparse_memory_present_with_active_regions(MAX_NUMNODES);
1351        sparse_init();
1352
1353        return end_pfn;
1354}
1355
1356static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1357static int pall_ents __initdata;
1358
1359#ifdef CONFIG_DEBUG_PAGEALLOC
1360static unsigned long __ref kernel_map_range(unsigned long pstart,
1361                                            unsigned long pend, pgprot_t prot)
1362{
1363        unsigned long vstart = PAGE_OFFSET + pstart;
1364        unsigned long vend = PAGE_OFFSET + pend;
1365        unsigned long alloc_bytes = 0UL;
1366
1367        if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1368                prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1369                            vstart, vend);
1370                prom_halt();
1371        }
1372
1373        while (vstart < vend) {
1374                unsigned long this_end, paddr = __pa(vstart);
1375                pgd_t *pgd = pgd_offset_k(vstart);
1376                pud_t *pud;
1377                pmd_t *pmd;
1378                pte_t *pte;
1379
1380                pud = pud_offset(pgd, vstart);
1381                if (pud_none(*pud)) {
1382                        pmd_t *new;
1383
1384                        new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1385                        alloc_bytes += PAGE_SIZE;
1386                        pud_populate(&init_mm, pud, new);
1387                }
1388
1389                pmd = pmd_offset(pud, vstart);
1390                if (!pmd_present(*pmd)) {
1391                        pte_t *new;
1392
1393                        new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1394                        alloc_bytes += PAGE_SIZE;
1395                        pmd_populate_kernel(&init_mm, pmd, new);
1396                }
1397
1398                pte = pte_offset_kernel(pmd, vstart);
1399                this_end = (vstart + PMD_SIZE) & PMD_MASK;
1400                if (this_end > vend)
1401                        this_end = vend;
1402
1403                while (vstart < this_end) {
1404                        pte_val(*pte) = (paddr | pgprot_val(prot));
1405
1406                        vstart += PAGE_SIZE;
1407                        paddr += PAGE_SIZE;
1408                        pte++;
1409                }
1410        }
1411
1412        return alloc_bytes;
1413}
1414
1415extern unsigned int kvmap_linear_patch[1];
1416#endif /* CONFIG_DEBUG_PAGEALLOC */
1417
1418static void __init kpte_set_val(unsigned long index, unsigned long val)
1419{
1420        unsigned long *ptr = kpte_linear_bitmap;
1421
1422        val <<= ((index % (BITS_PER_LONG / 2)) * 2);
1423        ptr += (index / (BITS_PER_LONG / 2));
1424
1425        *ptr |= val;
1426}
1427
1428static const unsigned long kpte_shift_min = 28; /* 256MB */
1429static const unsigned long kpte_shift_max = 34; /* 16GB */
1430static const unsigned long kpte_shift_incr = 3;
1431
1432static unsigned long kpte_mark_using_shift(unsigned long start, unsigned long end,
1433                                           unsigned long shift)
1434{
1435        unsigned long size = (1UL << shift);
1436        unsigned long mask = (size - 1UL);
1437        unsigned long remains = end - start;
1438        unsigned long val;
1439
1440        if (remains < size || (start & mask))
1441                return start;
1442
1443        /* VAL maps:
1444         *
1445         *      shift 28 --> kern_linear_pte_xor index 1
1446         *      shift 31 --> kern_linear_pte_xor index 2
1447         *      shift 34 --> kern_linear_pte_xor index 3
1448         */
1449        val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1;
1450
1451        remains &= ~mask;
1452        if (shift != kpte_shift_max)
1453                remains = size;
1454
1455        while (remains) {
1456                unsigned long index = start >> kpte_shift_min;
1457
1458                kpte_set_val(index, val);
1459
1460                start += 1UL << kpte_shift_min;
1461                remains -= 1UL << kpte_shift_min;
1462        }
1463
1464        return start;
1465}
1466
1467static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
1468{
1469        unsigned long smallest_size, smallest_mask;
1470        unsigned long s;
1471
1472        smallest_size = (1UL << kpte_shift_min);
1473        smallest_mask = (smallest_size - 1UL);
1474
1475        while (start < end) {
1476                unsigned long orig_start = start;
1477
1478                for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) {
1479                        start = kpte_mark_using_shift(start, end, s);
1480
1481                        if (start != orig_start)
1482                                break;
1483                }
1484
1485                if (start == orig_start)
1486                        start = (start + smallest_size) & ~smallest_mask;
1487        }
1488}
1489
1490static void __init init_kpte_bitmap(void)
1491{
1492        unsigned long i;
1493
1494        for (i = 0; i < pall_ents; i++) {
1495                unsigned long phys_start, phys_end;
1496
1497                phys_start = pall[i].phys_addr;
1498                phys_end = phys_start + pall[i].reg_size;
1499
1500                mark_kpte_bitmap(phys_start, phys_end);
1501        }
1502}
1503
1504static void __init kernel_physical_mapping_init(void)
1505{
1506#ifdef CONFIG_DEBUG_PAGEALLOC
1507        unsigned long i, mem_alloced = 0UL;
1508
1509        for (i = 0; i < pall_ents; i++) {
1510                unsigned long phys_start, phys_end;
1511
1512                phys_start = pall[i].phys_addr;
1513                phys_end = phys_start + pall[i].reg_size;
1514
1515                mem_alloced += kernel_map_range(phys_start, phys_end,
1516                                                PAGE_KERNEL);
1517        }
1518
1519        printk("Allocated %ld bytes for kernel page tables.\n",
1520               mem_alloced);
1521
1522        kvmap_linear_patch[0] = 0x01000000; /* nop */
1523        flushi(&kvmap_linear_patch[0]);
1524
1525        __flush_tlb_all();
1526#endif
1527}
1528
1529#ifdef CONFIG_DEBUG_PAGEALLOC
1530void kernel_map_pages(struct page *page, int numpages, int enable)
1531{
1532        unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1533        unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1534
1535        kernel_map_range(phys_start, phys_end,
1536                         (enable ? PAGE_KERNEL : __pgprot(0)));
1537
1538        flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1539                               PAGE_OFFSET + phys_end);
1540
1541        /* we should perform an IPI and flush all tlbs,
1542         * but that can deadlock->flush only current cpu.
1543         */
1544        __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1545                                 PAGE_OFFSET + phys_end);
1546}
1547#endif
1548
1549unsigned long __init find_ecache_flush_span(unsigned long size)
1550{
1551        int i;
1552
1553        for (i = 0; i < pavail_ents; i++) {
1554                if (pavail[i].reg_size >= size)
1555                        return pavail[i].phys_addr;
1556        }
1557
1558        return ~0UL;
1559}
1560
1561unsigned long PAGE_OFFSET;
1562EXPORT_SYMBOL(PAGE_OFFSET);
1563
1564static void __init page_offset_shift_patch_one(unsigned int *insn, unsigned long phys_bits)
1565{
1566        unsigned long final_shift;
1567        unsigned int val = *insn;
1568        unsigned int cnt;
1569
1570        /* We are patching in ilog2(max_supported_phys_address), and
1571         * we are doing so in a manner similar to a relocation addend.
1572         * That is, we are adding the shift value to whatever value
1573         * is in the shift instruction count field already.
1574         */
1575        cnt = (val & 0x3f);
1576        val &= ~0x3f;
1577
1578        /* If we are trying to shift >= 64 bits, clear the destination
1579         * register.  This can happen when phys_bits ends up being equal
1580         * to MAX_PHYS_ADDRESS_BITS.
1581         */
1582        final_shift = (cnt + (64 - phys_bits));
1583        if (final_shift >= 64) {
1584                unsigned int rd = (val >> 25) & 0x1f;
1585
1586                val = 0x80100000 | (rd << 25);
1587        } else {
1588                val |= final_shift;
1589        }
1590        *insn = val;
1591
1592        __asm__ __volatile__("flush     %0"
1593                             : /* no outputs */
1594                             : "r" (insn));
1595}
1596
1597static void __init page_offset_shift_patch(unsigned long phys_bits)
1598{
1599        extern unsigned int __page_offset_shift_patch;
1600        extern unsigned int __page_offset_shift_patch_end;
1601        unsigned int *p;
1602
1603        p = &__page_offset_shift_patch;
1604        while (p < &__page_offset_shift_patch_end) {
1605                unsigned int *insn = (unsigned int *)(unsigned long)*p;
1606
1607                page_offset_shift_patch_one(insn, phys_bits);
1608
1609                p++;
1610        }
1611}
1612
1613static void __init setup_page_offset(void)
1614{
1615        unsigned long max_phys_bits = 40;
1616
1617        if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1618                max_phys_bits = 42;
1619        } else if (tlb_type == hypervisor) {
1620                switch (sun4v_chip_type) {
1621                case SUN4V_CHIP_NIAGARA1:
1622                case SUN4V_CHIP_NIAGARA2:
1623                        max_phys_bits = 39;
1624                        break;
1625                case SUN4V_CHIP_NIAGARA3:
1626                        max_phys_bits = 43;
1627                        break;
1628                case SUN4V_CHIP_NIAGARA4:
1629                case SUN4V_CHIP_NIAGARA5:
1630                case SUN4V_CHIP_SPARC64X:
1631                default:
1632                        max_phys_bits = 47;
1633                        break;
1634                }
1635        }
1636
1637        if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
1638                prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
1639                            max_phys_bits);
1640                prom_halt();
1641        }
1642
1643        PAGE_OFFSET = PAGE_OFFSET_BY_BITS(max_phys_bits);
1644
1645        pr_info("PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
1646                PAGE_OFFSET, max_phys_bits);
1647
1648        page_offset_shift_patch(max_phys_bits);
1649}
1650
1651static void __init tsb_phys_patch(void)
1652{
1653        struct tsb_ldquad_phys_patch_entry *pquad;
1654        struct tsb_phys_patch_entry *p;
1655
1656        pquad = &__tsb_ldquad_phys_patch;
1657        while (pquad < &__tsb_ldquad_phys_patch_end) {
1658                unsigned long addr = pquad->addr;
1659
1660                if (tlb_type == hypervisor)
1661                        *(unsigned int *) addr = pquad->sun4v_insn;
1662                else
1663                        *(unsigned int *) addr = pquad->sun4u_insn;
1664                wmb();
1665                __asm__ __volatile__("flush     %0"
1666                                     : /* no outputs */
1667                                     : "r" (addr));
1668
1669                pquad++;
1670        }
1671
1672        p = &__tsb_phys_patch;
1673        while (p < &__tsb_phys_patch_end) {
1674                unsigned long addr = p->addr;
1675
1676                *(unsigned int *) addr = p->insn;
1677                wmb();
1678                __asm__ __volatile__("flush     %0"
1679                                     : /* no outputs */
1680                                     : "r" (addr));
1681
1682                p++;
1683        }
1684}
1685
1686/* Don't mark as init, we give this to the Hypervisor.  */
1687#ifndef CONFIG_DEBUG_PAGEALLOC
1688#define NUM_KTSB_DESCR  2
1689#else
1690#define NUM_KTSB_DESCR  1
1691#endif
1692static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
1693extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1694
1695static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1696{
1697        pa >>= KTSB_PHYS_SHIFT;
1698
1699        while (start < end) {
1700                unsigned int *ia = (unsigned int *)(unsigned long)*start;
1701
1702                ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
1703                __asm__ __volatile__("flush     %0" : : "r" (ia));
1704
1705                ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
1706                __asm__ __volatile__("flush     %0" : : "r" (ia + 1));
1707
1708                start++;
1709        }
1710}
1711
1712static void ktsb_phys_patch(void)
1713{
1714        extern unsigned int __swapper_tsb_phys_patch;
1715        extern unsigned int __swapper_tsb_phys_patch_end;
1716        unsigned long ktsb_pa;
1717
1718        ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1719        patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1720                            &__swapper_tsb_phys_patch_end, ktsb_pa);
1721#ifndef CONFIG_DEBUG_PAGEALLOC
1722        {
1723        extern unsigned int __swapper_4m_tsb_phys_patch;
1724        extern unsigned int __swapper_4m_tsb_phys_patch_end;
1725        ktsb_pa = (kern_base +
1726                   ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1727        patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1728                            &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
1729        }
1730#endif
1731}
1732
1733static void __init sun4v_ktsb_init(void)
1734{
1735        unsigned long ktsb_pa;
1736
1737        /* First KTSB for PAGE_SIZE mappings.  */
1738        ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1739
1740        switch (PAGE_SIZE) {
1741        case 8 * 1024:
1742        default:
1743                ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1744                ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1745                break;
1746
1747        case 64 * 1024:
1748                ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1749                ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1750                break;
1751
1752        case 512 * 1024:
1753                ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1754                ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1755                break;
1756
1757        case 4 * 1024 * 1024:
1758                ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1759                ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1760                break;
1761        }
1762
1763        ktsb_descr[0].assoc = 1;
1764        ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1765        ktsb_descr[0].ctx_idx = 0;
1766        ktsb_descr[0].tsb_base = ktsb_pa;
1767        ktsb_descr[0].resv = 0;
1768
1769#ifndef CONFIG_DEBUG_PAGEALLOC
1770        /* Second KTSB for 4MB/256MB/2GB/16GB mappings.  */
1771        ktsb_pa = (kern_base +
1772                   ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1773
1774        ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1775        ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
1776                                    HV_PGSZ_MASK_256MB |
1777                                    HV_PGSZ_MASK_2GB |
1778                                    HV_PGSZ_MASK_16GB) &
1779                                   cpu_pgsz_mask);
1780        ktsb_descr[1].assoc = 1;
1781        ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1782        ktsb_descr[1].ctx_idx = 0;
1783        ktsb_descr[1].tsb_base = ktsb_pa;
1784        ktsb_descr[1].resv = 0;
1785#endif
1786}
1787
1788void sun4v_ktsb_register(void)
1789{
1790        unsigned long pa, ret;
1791
1792        pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1793
1794        ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1795        if (ret != 0) {
1796                prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1797                            "errors with %lx\n", pa, ret);
1798                prom_halt();
1799        }
1800}
1801
1802static void __init sun4u_linear_pte_xor_finalize(void)
1803{
1804#ifndef CONFIG_DEBUG_PAGEALLOC
1805        /* This is where we would add Panther support for
1806         * 32MB and 256MB pages.
1807         */
1808#endif
1809}
1810
1811static void __init sun4v_linear_pte_xor_finalize(void)
1812{
1813#ifndef CONFIG_DEBUG_PAGEALLOC
1814        if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
1815                kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
1816                        PAGE_OFFSET;
1817                kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1818                                           _PAGE_P_4V | _PAGE_W_4V);
1819        } else {
1820                kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
1821        }
1822
1823        if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
1824                kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
1825                        PAGE_OFFSET;
1826                kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1827                                           _PAGE_P_4V | _PAGE_W_4V);
1828        } else {
1829                kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
1830        }
1831
1832        if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
1833                kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
1834                        PAGE_OFFSET;
1835                kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1836                                           _PAGE_P_4V | _PAGE_W_4V);
1837        } else {
1838                kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
1839        }
1840#endif
1841}
1842
1843/* paging_init() sets up the page tables */
1844
1845static unsigned long last_valid_pfn;
1846pgd_t swapper_pg_dir[PTRS_PER_PGD];
1847
1848static void sun4u_pgprot_init(void);
1849static void sun4v_pgprot_init(void);
1850
1851void __init paging_init(void)
1852{
1853        unsigned long end_pfn, shift, phys_base;
1854        unsigned long real_end, i;
1855        int node;
1856
1857        setup_page_offset();
1858
1859        /* These build time checkes make sure that the dcache_dirty_cpu()
1860         * page->flags usage will work.
1861         *
1862         * When a page gets marked as dcache-dirty, we store the
1863         * cpu number starting at bit 32 in the page->flags.  Also,
1864         * functions like clear_dcache_dirty_cpu use the cpu mask
1865         * in 13-bit signed-immediate instruction fields.
1866         */
1867
1868        /*
1869         * Page flags must not reach into upper 32 bits that are used
1870         * for the cpu number
1871         */
1872        BUILD_BUG_ON(NR_PAGEFLAGS > 32);
1873
1874        /*
1875         * The bit fields placed in the high range must not reach below
1876         * the 32 bit boundary. Otherwise we cannot place the cpu field
1877         * at the 32 bit boundary.
1878         */
1879        BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
1880                ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
1881
1882        BUILD_BUG_ON(NR_CPUS > 4096);
1883
1884        kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
1885        kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1886
1887        /* Invalidate both kernel TSBs.  */
1888        memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
1889#ifndef CONFIG_DEBUG_PAGEALLOC
1890        memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
1891#endif
1892
1893        if (tlb_type == hypervisor)
1894                sun4v_pgprot_init();
1895        else
1896                sun4u_pgprot_init();
1897
1898        if (tlb_type == cheetah_plus ||
1899            tlb_type == hypervisor) {
1900                tsb_phys_patch();
1901                ktsb_phys_patch();
1902        }
1903
1904        if (tlb_type == hypervisor)
1905                sun4v_patch_tlb_handlers();
1906
1907        /* Find available physical memory...
1908         *
1909         * Read it twice in order to work around a bug in openfirmware.
1910         * The call to grab this table itself can cause openfirmware to
1911         * allocate memory, which in turn can take away some space from
1912         * the list of available memory.  Reading it twice makes sure
1913         * we really do get the final value.
1914         */
1915        read_obp_translations();
1916        read_obp_memory("reg", &pall[0], &pall_ents);
1917        read_obp_memory("available", &pavail[0], &pavail_ents);
1918        read_obp_memory("available", &pavail[0], &pavail_ents);
1919
1920        phys_base = 0xffffffffffffffffUL;
1921        for (i = 0; i < pavail_ents; i++) {
1922                phys_base = min(phys_base, pavail[i].phys_addr);
1923                memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
1924        }
1925
1926        memblock_reserve(kern_base, kern_size);
1927
1928        find_ramdisk(phys_base);
1929
1930        memblock_enforce_memory_limit(cmdline_memory_size);
1931
1932        memblock_allow_resize();
1933        memblock_dump_all();
1934
1935        set_bit(0, mmu_context_bmap);
1936
1937        shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1938
1939        real_end = (unsigned long)_end;
1940        num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
1941        printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
1942               num_kernel_image_mappings);
1943
1944        /* Set kernel pgd to upper alias so physical page computations
1945         * work.
1946         */
1947        init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1948        
1949        memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1950
1951        /* Now can init the kernel/bad page tables. */
1952        pud_set(pud_offset(&swapper_pg_dir[0], 0),
1953                swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1954        
1955        inherit_prom_mappings();
1956        
1957        init_kpte_bitmap();
1958
1959        /* Ok, we can use our TLB miss and window trap handlers safely.  */
1960        setup_tba();
1961
1962        __flush_tlb_all();
1963
1964        prom_build_devicetree();
1965        of_populate_present_mask();
1966#ifndef CONFIG_SMP
1967        of_fill_in_cpu_data();
1968#endif
1969
1970        if (tlb_type == hypervisor) {
1971                sun4v_mdesc_init();
1972                mdesc_populate_present_mask(cpu_all_mask);
1973#ifndef CONFIG_SMP
1974                mdesc_fill_in_cpu_data(cpu_all_mask);
1975#endif
1976                mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
1977
1978                sun4v_linear_pte_xor_finalize();
1979
1980                sun4v_ktsb_init();
1981                sun4v_ktsb_register();
1982        } else {
1983                unsigned long impl, ver;
1984
1985                cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
1986                                 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
1987
1988                __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
1989                impl = ((ver >> 32) & 0xffff);
1990                if (impl == PANTHER_IMPL)
1991                        cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
1992                                          HV_PGSZ_MASK_256MB);
1993
1994                sun4u_linear_pte_xor_finalize();
1995        }
1996
1997        /* Flush the TLBs and the 4M TSB so that the updated linear
1998         * pte XOR settings are realized for all mappings.
1999         */
2000        __flush_tlb_all();
2001#ifndef CONFIG_DEBUG_PAGEALLOC
2002        memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2003#endif
2004        __flush_tlb_all();
2005
2006        /* Setup bootmem... */
2007        last_valid_pfn = end_pfn = bootmem_init(phys_base);
2008
2009        /* Once the OF device tree and MDESC have been setup, we know
2010         * the list of possible cpus.  Therefore we can allocate the
2011         * IRQ stacks.
2012         */
2013        for_each_possible_cpu(i) {
2014                node = cpu_to_node(i);
2015
2016                softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2017                                                        THREAD_SIZE,
2018                                                        THREAD_SIZE, 0);
2019                hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2020                                                        THREAD_SIZE,
2021                                                        THREAD_SIZE, 0);
2022        }
2023
2024        kernel_physical_mapping_init();
2025
2026        {
2027                unsigned long max_zone_pfns[MAX_NR_ZONES];
2028
2029                memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
2030
2031                max_zone_pfns[ZONE_NORMAL] = end_pfn;
2032
2033                free_area_init_nodes(max_zone_pfns);
2034        }
2035
2036        printk("Booting Linux...\n");
2037}
2038
2039int page_in_phys_avail(unsigned long paddr)
2040{
2041        int i;
2042
2043        paddr &= PAGE_MASK;
2044
2045        for (i = 0; i < pavail_ents; i++) {
2046                unsigned long start, end;
2047
2048                start = pavail[i].phys_addr;
2049                end = start + pavail[i].reg_size;
2050
2051                if (paddr >= start && paddr < end)
2052                        return 1;
2053        }
2054        if (paddr >= kern_base && paddr < (kern_base + kern_size))
2055                return 1;
2056#ifdef CONFIG_BLK_DEV_INITRD
2057        if (paddr >= __pa(initrd_start) &&
2058            paddr < __pa(PAGE_ALIGN(initrd_end)))
2059                return 1;
2060#endif
2061
2062        return 0;
2063}
2064
2065static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
2066static int pavail_rescan_ents __initdata;
2067
2068/* Certain OBP calls, such as fetching "available" properties, can
2069 * claim physical memory.  So, along with initializing the valid
2070 * address bitmap, what we do here is refetch the physical available
2071 * memory list again, and make sure it provides at least as much
2072 * memory as 'pavail' does.
2073 */
2074static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
2075{
2076        int i;
2077
2078        read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
2079
2080        for (i = 0; i < pavail_ents; i++) {
2081                unsigned long old_start, old_end;
2082
2083                old_start = pavail[i].phys_addr;
2084                old_end = old_start + pavail[i].reg_size;
2085                while (old_start < old_end) {
2086                        int n;
2087
2088                        for (n = 0; n < pavail_rescan_ents; n++) {
2089                                unsigned long new_start, new_end;
2090
2091                                new_start = pavail_rescan[n].phys_addr;
2092                                new_end = new_start +
2093                                        pavail_rescan[n].reg_size;
2094
2095                                if (new_start <= old_start &&
2096                                    new_end >= (old_start + PAGE_SIZE)) {
2097                                        set_bit(old_start >> ILOG2_4MB, bitmap);
2098                                        goto do_next_page;
2099                                }
2100                        }
2101
2102                        prom_printf("mem_init: Lost memory in pavail\n");
2103                        prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
2104                                    pavail[i].phys_addr,
2105                                    pavail[i].reg_size);
2106                        prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
2107                                    pavail_rescan[i].phys_addr,
2108                                    pavail_rescan[i].reg_size);
2109                        prom_printf("mem_init: Cannot continue, aborting.\n");
2110                        prom_halt();
2111
2112                do_next_page:
2113                        old_start += PAGE_SIZE;
2114                }
2115        }
2116}
2117
2118static void __init patch_tlb_miss_handler_bitmap(void)
2119{
2120        extern unsigned int valid_addr_bitmap_insn[];
2121        extern unsigned int valid_addr_bitmap_patch[];
2122
2123        valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
2124        mb();
2125        valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
2126        flushi(&valid_addr_bitmap_insn[0]);
2127}
2128
2129static void __init register_page_bootmem_info(void)
2130{
2131#ifdef CONFIG_NEED_MULTIPLE_NODES
2132        int i;
2133
2134        for_each_online_node(i)
2135                if (NODE_DATA(i)->node_spanned_pages)
2136                        register_page_bootmem_info_node(NODE_DATA(i));
2137#endif
2138}
2139void __init mem_init(void)
2140{
2141        unsigned long addr, last;
2142
2143        addr = PAGE_OFFSET + kern_base;
2144        last = PAGE_ALIGN(kern_size) + addr;
2145        while (addr < last) {
2146                set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap);
2147                addr += PAGE_SIZE;
2148        }
2149
2150        setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap);
2151        patch_tlb_miss_handler_bitmap();
2152
2153        high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2154
2155        register_page_bootmem_info();
2156        free_all_bootmem();
2157
2158        /*
2159         * Set up the zero page, mark it reserved, so that page count
2160         * is not manipulated when freeing the page from user ptes.
2161         */
2162        mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2163        if (mem_map_zero == NULL) {
2164                prom_printf("paging_init: Cannot alloc zero page.\n");
2165                prom_halt();
2166        }
2167        mark_page_reserved(mem_map_zero);
2168
2169        mem_init_print_info(NULL);
2170
2171        if (tlb_type == cheetah || tlb_type == cheetah_plus)
2172                cheetah_ecache_flush_init();
2173}
2174
2175void free_initmem(void)
2176{
2177        unsigned long addr, initend;
2178        int do_free = 1;
2179
2180        /* If the physical memory maps were trimmed by kernel command
2181         * line options, don't even try freeing this initmem stuff up.
2182         * The kernel image could have been in the trimmed out region
2183         * and if so the freeing below will free invalid page structs.
2184         */
2185        if (cmdline_memory_size)
2186                do_free = 0;
2187
2188        /*
2189         * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2190         */
2191        addr = PAGE_ALIGN((unsigned long)(__init_begin));
2192        initend = (unsigned long)(__init_end) & PAGE_MASK;
2193        for (; addr < initend; addr += PAGE_SIZE) {
2194                unsigned long page;
2195
2196                page = (addr +
2197                        ((unsigned long) __va(kern_base)) -
2198                        ((unsigned long) KERNBASE));
2199                memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
2200
2201                if (do_free)
2202                        free_reserved_page(virt_to_page(page));
2203        }
2204}
2205
2206#ifdef CONFIG_BLK_DEV_INITRD
2207void free_initrd_mem(unsigned long start, unsigned long end)
2208{
2209        free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
2210                           "initrd");
2211}
2212#endif
2213
2214#define _PAGE_CACHE_4U  (_PAGE_CP_4U | _PAGE_CV_4U)
2215#define _PAGE_CACHE_4V  (_PAGE_CP_4V | _PAGE_CV_4V)
2216#define __DIRTY_BITS_4U  (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2217#define __DIRTY_BITS_4V  (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2218#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2219#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2220
2221pgprot_t PAGE_KERNEL __read_mostly;
2222EXPORT_SYMBOL(PAGE_KERNEL);
2223
2224pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2225pgprot_t PAGE_COPY __read_mostly;
2226
2227pgprot_t PAGE_SHARED __read_mostly;
2228EXPORT_SYMBOL(PAGE_SHARED);
2229
2230unsigned long pg_iobits __read_mostly;
2231
2232unsigned long _PAGE_IE __read_mostly;
2233EXPORT_SYMBOL(_PAGE_IE);
2234
2235unsigned long _PAGE_E __read_mostly;
2236EXPORT_SYMBOL(_PAGE_E);
2237
2238unsigned long _PAGE_CACHE __read_mostly;
2239EXPORT_SYMBOL(_PAGE_CACHE);
2240
2241#ifdef CONFIG_SPARSEMEM_VMEMMAP
2242unsigned long vmemmap_table[VMEMMAP_SIZE];
2243
2244static long __meminitdata addr_start, addr_end;
2245static int __meminitdata node_start;
2246
2247int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2248                               int node)
2249{
2250        unsigned long phys_start = (vstart - VMEMMAP_BASE);
2251        unsigned long phys_end = (vend - VMEMMAP_BASE);
2252        unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
2253        unsigned long end = VMEMMAP_ALIGN(phys_end);
2254        unsigned long pte_base;
2255
2256        pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2257                    _PAGE_CP_4U | _PAGE_CV_4U |
2258                    _PAGE_P_4U | _PAGE_W_4U);
2259        if (tlb_type == hypervisor)
2260                pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2261                            _PAGE_CP_4V | _PAGE_CV_4V |
2262                            _PAGE_P_4V | _PAGE_W_4V);
2263
2264        for (; addr < end; addr += VMEMMAP_CHUNK) {
2265                unsigned long *vmem_pp =
2266                        vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
2267                void *block;
2268
2269                if (!(*vmem_pp & _PAGE_VALID)) {
2270                        block = vmemmap_alloc_block(1UL << ILOG2_4MB, node);
2271                        if (!block)
2272                                return -ENOMEM;
2273
2274                        *vmem_pp = pte_base | __pa(block);
2275
2276                        /* check to see if we have contiguous blocks */
2277                        if (addr_end != addr || node_start != node) {
2278                                if (addr_start)
2279                                        printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
2280                                               addr_start, addr_end-1, node_start);
2281                                addr_start = addr;
2282                                node_start = node;
2283                        }
2284                        addr_end = addr + VMEMMAP_CHUNK;
2285                }
2286        }
2287        return 0;
2288}
2289
2290void __meminit vmemmap_populate_print_last(void)
2291{
2292        if (addr_start) {
2293                printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
2294                       addr_start, addr_end-1, node_start);
2295                addr_start = 0;
2296                addr_end = 0;
2297                node_start = 0;
2298        }
2299}
2300
2301void vmemmap_free(unsigned long start, unsigned long end)
2302{
2303}
2304
2305#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2306
2307static void prot_init_common(unsigned long page_none,
2308                             unsigned long page_shared,
2309                             unsigned long page_copy,
2310                             unsigned long page_readonly,
2311                             unsigned long page_exec_bit)
2312{
2313        PAGE_COPY = __pgprot(page_copy);
2314        PAGE_SHARED = __pgprot(page_shared);
2315
2316        protection_map[0x0] = __pgprot(page_none);
2317        protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2318        protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2319        protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2320        protection_map[0x4] = __pgprot(page_readonly);
2321        protection_map[0x5] = __pgprot(page_readonly);
2322        protection_map[0x6] = __pgprot(page_copy);
2323        protection_map[0x7] = __pgprot(page_copy);
2324        protection_map[0x8] = __pgprot(page_none);
2325        protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2326        protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2327        protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2328        protection_map[0xc] = __pgprot(page_readonly);
2329        protection_map[0xd] = __pgprot(page_readonly);
2330        protection_map[0xe] = __pgprot(page_shared);
2331        protection_map[0xf] = __pgprot(page_shared);
2332}
2333
2334static void __init sun4u_pgprot_init(void)
2335{
2336        unsigned long page_none, page_shared, page_copy, page_readonly;
2337        unsigned long page_exec_bit;
2338        int i;
2339
2340        PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2341                                _PAGE_CACHE_4U | _PAGE_P_4U |
2342                                __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2343                                _PAGE_EXEC_4U);
2344        PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2345                                       _PAGE_CACHE_4U | _PAGE_P_4U |
2346                                       __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2347                                       _PAGE_EXEC_4U | _PAGE_L_4U);
2348
2349        _PAGE_IE = _PAGE_IE_4U;
2350        _PAGE_E = _PAGE_E_4U;
2351        _PAGE_CACHE = _PAGE_CACHE_4U;
2352
2353        pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2354                     __ACCESS_BITS_4U | _PAGE_E_4U);
2355
2356#ifdef CONFIG_DEBUG_PAGEALLOC
2357        kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2358#else
2359        kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
2360                PAGE_OFFSET;
2361#endif
2362        kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2363                                   _PAGE_P_4U | _PAGE_W_4U);
2364
2365        for (i = 1; i < 4; i++)
2366                kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2367
2368        _PAGE_ALL_SZ_BITS =  (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2369                              _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2370                              _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2371
2372
2373        page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2374        page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2375                       __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2376        page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2377                       __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2378        page_readonly   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2379                           __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2380
2381        page_exec_bit = _PAGE_EXEC_4U;
2382
2383        prot_init_common(page_none, page_shared, page_copy, page_readonly,
2384                         page_exec_bit);
2385}
2386
2387static void __init sun4v_pgprot_init(void)
2388{
2389        unsigned long page_none, page_shared, page_copy, page_readonly;
2390        unsigned long page_exec_bit;
2391        int i;
2392
2393        PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2394                                _PAGE_CACHE_4V | _PAGE_P_4V |
2395                                __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2396                                _PAGE_EXEC_4V);
2397        PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2398
2399        _PAGE_IE = _PAGE_IE_4V;
2400        _PAGE_E = _PAGE_E_4V;
2401        _PAGE_CACHE = _PAGE_CACHE_4V;
2402
2403#ifdef CONFIG_DEBUG_PAGEALLOC
2404        kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2405#else
2406        kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2407                PAGE_OFFSET;
2408#endif
2409        kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2410                                   _PAGE_P_4V | _PAGE_W_4V);
2411
2412        for (i = 1; i < 4; i++)
2413                kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2414
2415        pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2416                     __ACCESS_BITS_4V | _PAGE_E_4V);
2417
2418        _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2419                             _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2420                             _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2421                             _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2422
2423        page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
2424        page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2425                       __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2426        page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2427                       __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2428        page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2429                         __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2430
2431        page_exec_bit = _PAGE_EXEC_4V;
2432
2433        prot_init_common(page_none, page_shared, page_copy, page_readonly,
2434                         page_exec_bit);
2435}
2436
2437unsigned long pte_sz_bits(unsigned long sz)
2438{
2439        if (tlb_type == hypervisor) {
2440                switch (sz) {
2441                case 8 * 1024:
2442                default:
2443                        return _PAGE_SZ8K_4V;
2444                case 64 * 1024:
2445                        return _PAGE_SZ64K_4V;
2446                case 512 * 1024:
2447                        return _PAGE_SZ512K_4V;
2448                case 4 * 1024 * 1024:
2449                        return _PAGE_SZ4MB_4V;
2450                }
2451        } else {
2452                switch (sz) {
2453                case 8 * 1024:
2454                default:
2455                        return _PAGE_SZ8K_4U;
2456                case 64 * 1024:
2457                        return _PAGE_SZ64K_4U;
2458                case 512 * 1024:
2459                        return _PAGE_SZ512K_4U;
2460                case 4 * 1024 * 1024:
2461                        return _PAGE_SZ4MB_4U;
2462                }
2463        }
2464}
2465
2466pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2467{
2468        pte_t pte;
2469
2470        pte_val(pte)  = page | pgprot_val(pgprot_noncached(prot));
2471        pte_val(pte) |= (((unsigned long)space) << 32);
2472        pte_val(pte) |= pte_sz_bits(page_size);
2473
2474        return pte;
2475}
2476
2477static unsigned long kern_large_tte(unsigned long paddr)
2478{
2479        unsigned long val;
2480
2481        val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2482               _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2483               _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2484        if (tlb_type == hypervisor)
2485                val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2486                       _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
2487                       _PAGE_EXEC_4V | _PAGE_W_4V);
2488
2489        return val | paddr;
2490}
2491
2492/* If not locked, zap it. */
2493void __flush_tlb_all(void)
2494{
2495        unsigned long pstate;
2496        int i;
2497
2498        __asm__ __volatile__("flushw\n\t"
2499                             "rdpr      %%pstate, %0\n\t"
2500                             "wrpr      %0, %1, %%pstate"
2501                             : "=r" (pstate)
2502                             : "i" (PSTATE_IE));
2503        if (tlb_type == hypervisor) {
2504                sun4v_mmu_demap_all();
2505        } else if (tlb_type == spitfire) {
2506                for (i = 0; i < 64; i++) {
2507                        /* Spitfire Errata #32 workaround */
2508                        /* NOTE: Always runs on spitfire, so no
2509                         *       cheetah+ page size encodings.
2510                         */
2511                        __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
2512                                             "flush     %%g6"
2513                                             : /* No outputs */
2514                                             : "r" (0),
2515                                             "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2516
2517                        if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2518                                __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2519                                                     "membar #Sync"
2520                                                     : /* no outputs */
2521                                                     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2522                                spitfire_put_dtlb_data(i, 0x0UL);
2523                        }
2524
2525                        /* Spitfire Errata #32 workaround */
2526                        /* NOTE: Always runs on spitfire, so no
2527                         *       cheetah+ page size encodings.
2528                         */
2529                        __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
2530                                             "flush     %%g6"
2531                                             : /* No outputs */
2532                                             : "r" (0),
2533                                             "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2534
2535                        if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2536                                __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2537                                                     "membar #Sync"
2538                                                     : /* no outputs */
2539                                                     : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2540                                spitfire_put_itlb_data(i, 0x0UL);
2541                        }
2542                }
2543        } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2544                cheetah_flush_dtlb_all();
2545                cheetah_flush_itlb_all();
2546        }
2547        __asm__ __volatile__("wrpr      %0, 0, %%pstate"
2548                             : : "r" (pstate));
2549}
2550
2551pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2552                            unsigned long address)
2553{
2554        struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2555                                       __GFP_REPEAT | __GFP_ZERO);
2556        pte_t *pte = NULL;
2557
2558        if (page)
2559                pte = (pte_t *) page_address(page);
2560
2561        return pte;
2562}
2563
2564pgtable_t pte_alloc_one(struct mm_struct *mm,
2565                        unsigned long address)
2566{
2567        struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2568                                       __GFP_REPEAT | __GFP_ZERO);
2569        if (!page)
2570                return NULL;
2571        if (!pgtable_page_ctor(page)) {
2572                free_hot_cold_page(page, 0);
2573                return NULL;
2574        }
2575        return (pte_t *) page_address(page);
2576}
2577
2578void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2579{
2580        free_page((unsigned long)pte);
2581}
2582
2583static void __pte_free(pgtable_t pte)
2584{
2585        struct page *page = virt_to_page(pte);
2586
2587        pgtable_page_dtor(page);
2588        __free_page(page);
2589}
2590
2591void pte_free(struct mm_struct *mm, pgtable_t pte)
2592{
2593        __pte_free(pte);
2594}
2595
2596void pgtable_free(void *table, bool is_page)
2597{
2598        if (is_page)
2599                __pte_free(table);
2600        else
2601                kmem_cache_free(pgtable_cache, table);
2602}
2603
2604#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2605void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2606                          pmd_t *pmd)
2607{
2608        unsigned long pte, flags;
2609        struct mm_struct *mm;
2610        pmd_t entry = *pmd;
2611
2612        if (!pmd_large(entry) || !pmd_young(entry))
2613                return;
2614
2615        pte = pmd_val(entry);
2616
2617        /* We are fabricating 8MB pages using 4MB real hw pages.  */
2618        pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
2619
2620        mm = vma->vm_mm;
2621
2622        spin_lock_irqsave(&mm->context.lock, flags);
2623
2624        if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
2625                __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
2626                                        addr, pte);
2627
2628        spin_unlock_irqrestore(&mm->context.lock, flags);
2629}
2630#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2631
2632#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2633static void context_reload(void *__data)
2634{
2635        struct mm_struct *mm = __data;
2636
2637        if (mm == current->mm)
2638                load_secondary_context(mm);
2639}
2640
2641void hugetlb_setup(struct pt_regs *regs)
2642{
2643        struct mm_struct *mm = current->mm;
2644        struct tsb_config *tp;
2645
2646        if (in_atomic() || !mm) {
2647                const struct exception_table_entry *entry;
2648
2649                entry = search_exception_tables(regs->tpc);
2650                if (entry) {
2651                        regs->tpc = entry->fixup;
2652                        regs->tnpc = regs->tpc + 4;
2653                        return;
2654                }
2655                pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2656                die_if_kernel("HugeTSB in atomic", regs);
2657        }
2658
2659        tp = &mm->context.tsb_block[MM_TSB_HUGE];
2660        if (likely(tp->tsb == NULL))
2661                tsb_grow(mm, MM_TSB_HUGE, 0);
2662
2663        tsb_context_switch(mm);
2664        smp_tsb_sync(mm);
2665
2666        /* On UltraSPARC-III+ and later, configure the second half of
2667         * the Data-TLB for huge pages.
2668         */
2669        if (tlb_type == cheetah_plus) {
2670                unsigned long ctx;
2671
2672                spin_lock(&ctx_alloc_lock);
2673                ctx = mm->context.sparc64_ctx_val;
2674                ctx &= ~CTX_PGSZ_MASK;
2675                ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
2676                ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
2677
2678                if (ctx != mm->context.sparc64_ctx_val) {
2679                        /* When changing the page size fields, we
2680                         * must perform a context flush so that no
2681                         * stale entries match.  This flush must
2682                         * occur with the original context register
2683                         * settings.
2684                         */
2685                        do_flush_tlb_mm(mm);
2686
2687                        /* Reload the context register of all processors
2688                         * also executing in this address space.
2689                         */
2690                        mm->context.sparc64_ctx_val = ctx;
2691                        on_each_cpu(context_reload, mm, 0);
2692                }
2693                spin_unlock(&ctx_alloc_lock);
2694        }
2695}
2696#endif
2697