linux/arch/sparc/mm/srmmu.c
<<
>>
Prefs
   1/*
   2 * srmmu.c:  SRMMU specific routines for memory management.
   3 *
   4 * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
   5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
   6 * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
   7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   8 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
   9 */
  10
  11#include <linux/seq_file.h>
  12#include <linux/spinlock.h>
  13#include <linux/bootmem.h>
  14#include <linux/pagemap.h>
  15#include <linux/vmalloc.h>
  16#include <linux/kdebug.h>
  17#include <linux/kernel.h>
  18#include <linux/init.h>
  19#include <linux/log2.h>
  20#include <linux/gfp.h>
  21#include <linux/fs.h>
  22#include <linux/mm.h>
  23
  24#include <asm/mmu_context.h>
  25#include <asm/cacheflush.h>
  26#include <asm/tlbflush.h>
  27#include <asm/io-unit.h>
  28#include <asm/pgalloc.h>
  29#include <asm/pgtable.h>
  30#include <asm/bitext.h>
  31#include <asm/vaddrs.h>
  32#include <asm/cache.h>
  33#include <asm/traps.h>
  34#include <asm/oplib.h>
  35#include <asm/mbus.h>
  36#include <asm/page.h>
  37#include <asm/asi.h>
  38#include <asm/msi.h>
  39#include <asm/smp.h>
  40#include <asm/io.h>
  41
  42/* Now the cpu specific definitions. */
  43#include <asm/turbosparc.h>
  44#include <asm/tsunami.h>
  45#include <asm/viking.h>
  46#include <asm/swift.h>
  47#include <asm/leon.h>
  48#include <asm/mxcc.h>
  49#include <asm/ross.h>
  50
  51#include "srmmu.h"
  52
  53enum mbus_module srmmu_modtype;
  54static unsigned int hwbug_bitmask;
  55int vac_cache_size;
  56int vac_line_size;
  57
  58extern struct resource sparc_iomap;
  59
  60extern unsigned long last_valid_pfn;
  61
  62static pgd_t *srmmu_swapper_pg_dir;
  63
  64const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
  65
  66#ifdef CONFIG_SMP
  67const struct sparc32_cachetlb_ops *local_ops;
  68
  69#define FLUSH_BEGIN(mm)
  70#define FLUSH_END
  71#else
  72#define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
  73#define FLUSH_END       }
  74#endif
  75
  76int flush_page_for_dma_global = 1;
  77
  78char *srmmu_name;
  79
  80ctxd_t *srmmu_ctx_table_phys;
  81static ctxd_t *srmmu_context_table;
  82
  83int viking_mxcc_present;
  84static DEFINE_SPINLOCK(srmmu_context_spinlock);
  85
  86static int is_hypersparc;
  87
  88static int srmmu_cache_pagetables;
  89
  90/* these will be initialized in srmmu_nocache_calcsize() */
  91static unsigned long srmmu_nocache_size;
  92static unsigned long srmmu_nocache_end;
  93
  94/* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
  95#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
  96
  97/* The context table is a nocache user with the biggest alignment needs. */
  98#define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
  99
 100void *srmmu_nocache_pool;
 101void *srmmu_nocache_bitmap;
 102static struct bit_map srmmu_nocache_map;
 103
 104static inline int srmmu_pmd_none(pmd_t pmd)
 105{ return !(pmd_val(pmd) & 0xFFFFFFF); }
 106
 107/* XXX should we hyper_flush_whole_icache here - Anton */
 108static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
 109{ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
 110
 111void pmd_set(pmd_t *pmdp, pte_t *ptep)
 112{
 113        unsigned long ptp;      /* Physical address, shifted right by 4 */
 114        int i;
 115
 116        ptp = __nocache_pa((unsigned long) ptep) >> 4;
 117        for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
 118                set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
 119                ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
 120        }
 121}
 122
 123void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
 124{
 125        unsigned long ptp;      /* Physical address, shifted right by 4 */
 126        int i;
 127
 128        ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4);      /* watch for overflow */
 129        for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
 130                set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
 131                ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
 132        }
 133}
 134
 135/* Find an entry in the third-level page table.. */
 136pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
 137{
 138        void *pte;
 139
 140        pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
 141        return (pte_t *) pte +
 142            ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
 143}
 144
 145/*
 146 * size: bytes to allocate in the nocache area.
 147 * align: bytes, number to align at.
 148 * Returns the virtual address of the allocated area.
 149 */
 150static void *__srmmu_get_nocache(int size, int align)
 151{
 152        int offset;
 153        unsigned long addr;
 154
 155        if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
 156                printk(KERN_ERR "Size 0x%x too small for nocache request\n",
 157                       size);
 158                size = SRMMU_NOCACHE_BITMAP_SHIFT;
 159        }
 160        if (size & (SRMMU_NOCACHE_BITMAP_SHIFT - 1)) {
 161                printk(KERN_ERR "Size 0x%x unaligned int nocache request\n",
 162                       size);
 163                size += SRMMU_NOCACHE_BITMAP_SHIFT - 1;
 164        }
 165        BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
 166
 167        offset = bit_map_string_get(&srmmu_nocache_map,
 168                                    size >> SRMMU_NOCACHE_BITMAP_SHIFT,
 169                                    align >> SRMMU_NOCACHE_BITMAP_SHIFT);
 170        if (offset == -1) {
 171                printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
 172                       size, (int) srmmu_nocache_size,
 173                       srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
 174                return 0;
 175        }
 176
 177        addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
 178        return (void *)addr;
 179}
 180
 181void *srmmu_get_nocache(int size, int align)
 182{
 183        void *tmp;
 184
 185        tmp = __srmmu_get_nocache(size, align);
 186
 187        if (tmp)
 188                memset(tmp, 0, size);
 189
 190        return tmp;
 191}
 192
 193void srmmu_free_nocache(void *addr, int size)
 194{
 195        unsigned long vaddr;
 196        int offset;
 197
 198        vaddr = (unsigned long)addr;
 199        if (vaddr < SRMMU_NOCACHE_VADDR) {
 200                printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
 201                    vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
 202                BUG();
 203        }
 204        if (vaddr + size > srmmu_nocache_end) {
 205                printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
 206                    vaddr, srmmu_nocache_end);
 207                BUG();
 208        }
 209        if (!is_power_of_2(size)) {
 210                printk("Size 0x%x is not a power of 2\n", size);
 211                BUG();
 212        }
 213        if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
 214                printk("Size 0x%x is too small\n", size);
 215                BUG();
 216        }
 217        if (vaddr & (size - 1)) {
 218                printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
 219                BUG();
 220        }
 221
 222        offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
 223        size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
 224
 225        bit_map_clear(&srmmu_nocache_map, offset, size);
 226}
 227
 228static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
 229                                                 unsigned long end);
 230
 231/* Return how much physical memory we have.  */
 232static unsigned long __init probe_memory(void)
 233{
 234        unsigned long total = 0;
 235        int i;
 236
 237        for (i = 0; sp_banks[i].num_bytes; i++)
 238                total += sp_banks[i].num_bytes;
 239
 240        return total;
 241}
 242
 243/*
 244 * Reserve nocache dynamically proportionally to the amount of
 245 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
 246 */
 247static void __init srmmu_nocache_calcsize(void)
 248{
 249        unsigned long sysmemavail = probe_memory() / 1024;
 250        int srmmu_nocache_npages;
 251
 252        srmmu_nocache_npages =
 253                sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
 254
 255 /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
 256        // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
 257        if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
 258                srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
 259
 260        /* anything above 1280 blows up */
 261        if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
 262                srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
 263
 264        srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
 265        srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
 266}
 267
 268static void __init srmmu_nocache_init(void)
 269{
 270        unsigned int bitmap_bits;
 271        pgd_t *pgd;
 272        pmd_t *pmd;
 273        pte_t *pte;
 274        unsigned long paddr, vaddr;
 275        unsigned long pteval;
 276
 277        bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
 278
 279        srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
 280                SRMMU_NOCACHE_ALIGN_MAX, 0UL);
 281        memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
 282
 283        srmmu_nocache_bitmap =
 284                __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
 285                                SMP_CACHE_BYTES, 0UL);
 286        bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
 287
 288        srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
 289        memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
 290        init_mm.pgd = srmmu_swapper_pg_dir;
 291
 292        srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
 293
 294        paddr = __pa((unsigned long)srmmu_nocache_pool);
 295        vaddr = SRMMU_NOCACHE_VADDR;
 296
 297        while (vaddr < srmmu_nocache_end) {
 298                pgd = pgd_offset_k(vaddr);
 299                pmd = pmd_offset(__nocache_fix(pgd), vaddr);
 300                pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
 301
 302                pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
 303
 304                if (srmmu_cache_pagetables)
 305                        pteval |= SRMMU_CACHE;
 306
 307                set_pte(__nocache_fix(pte), __pte(pteval));
 308
 309                vaddr += PAGE_SIZE;
 310                paddr += PAGE_SIZE;
 311        }
 312
 313        flush_cache_all();
 314        flush_tlb_all();
 315}
 316
 317pgd_t *get_pgd_fast(void)
 318{
 319        pgd_t *pgd = NULL;
 320
 321        pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
 322        if (pgd) {
 323                pgd_t *init = pgd_offset_k(0);
 324                memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
 325                memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
 326                                                (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
 327        }
 328
 329        return pgd;
 330}
 331
 332/*
 333 * Hardware needs alignment to 256 only, but we align to whole page size
 334 * to reduce fragmentation problems due to the buddy principle.
 335 * XXX Provide actual fragmentation statistics in /proc.
 336 *
 337 * Alignments up to the page size are the same for physical and virtual
 338 * addresses of the nocache area.
 339 */
 340pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 341{
 342        unsigned long pte;
 343        struct page *page;
 344
 345        if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
 346                return NULL;
 347        page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
 348        pgtable_page_ctor(page);
 349        return page;
 350}
 351
 352void pte_free(struct mm_struct *mm, pgtable_t pte)
 353{
 354        unsigned long p;
 355
 356        pgtable_page_dtor(pte);
 357        p = (unsigned long)page_address(pte);   /* Cached address (for test) */
 358        if (p == 0)
 359                BUG();
 360        p = page_to_pfn(pte) << PAGE_SHIFT;     /* Physical address */
 361
 362        /* free non cached virtual address*/
 363        srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
 364}
 365
 366/* context handling - a dynamically sized pool is used */
 367#define NO_CONTEXT      -1
 368
 369struct ctx_list {
 370        struct ctx_list *next;
 371        struct ctx_list *prev;
 372        unsigned int ctx_number;
 373        struct mm_struct *ctx_mm;
 374};
 375
 376static struct ctx_list *ctx_list_pool;
 377static struct ctx_list ctx_free;
 378static struct ctx_list ctx_used;
 379
 380/* At boot time we determine the number of contexts */
 381static int num_contexts;
 382
 383static inline void remove_from_ctx_list(struct ctx_list *entry)
 384{
 385        entry->next->prev = entry->prev;
 386        entry->prev->next = entry->next;
 387}
 388
 389static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
 390{
 391        entry->next = head;
 392        (entry->prev = head->prev)->next = entry;
 393        head->prev = entry;
 394}
 395#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
 396#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
 397
 398
 399static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
 400{
 401        struct ctx_list *ctxp;
 402
 403        ctxp = ctx_free.next;
 404        if (ctxp != &ctx_free) {
 405                remove_from_ctx_list(ctxp);
 406                add_to_used_ctxlist(ctxp);
 407                mm->context = ctxp->ctx_number;
 408                ctxp->ctx_mm = mm;
 409                return;
 410        }
 411        ctxp = ctx_used.next;
 412        if (ctxp->ctx_mm == old_mm)
 413                ctxp = ctxp->next;
 414        if (ctxp == &ctx_used)
 415                panic("out of mmu contexts");
 416        flush_cache_mm(ctxp->ctx_mm);
 417        flush_tlb_mm(ctxp->ctx_mm);
 418        remove_from_ctx_list(ctxp);
 419        add_to_used_ctxlist(ctxp);
 420        ctxp->ctx_mm->context = NO_CONTEXT;
 421        ctxp->ctx_mm = mm;
 422        mm->context = ctxp->ctx_number;
 423}
 424
 425static inline void free_context(int context)
 426{
 427        struct ctx_list *ctx_old;
 428
 429        ctx_old = ctx_list_pool + context;
 430        remove_from_ctx_list(ctx_old);
 431        add_to_free_ctxlist(ctx_old);
 432}
 433
 434static void __init sparc_context_init(int numctx)
 435{
 436        int ctx;
 437        unsigned long size;
 438
 439        size = numctx * sizeof(struct ctx_list);
 440        ctx_list_pool = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
 441
 442        for (ctx = 0; ctx < numctx; ctx++) {
 443                struct ctx_list *clist;
 444
 445                clist = (ctx_list_pool + ctx);
 446                clist->ctx_number = ctx;
 447                clist->ctx_mm = NULL;
 448        }
 449        ctx_free.next = ctx_free.prev = &ctx_free;
 450        ctx_used.next = ctx_used.prev = &ctx_used;
 451        for (ctx = 0; ctx < numctx; ctx++)
 452                add_to_free_ctxlist(ctx_list_pool + ctx);
 453}
 454
 455void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
 456               struct task_struct *tsk)
 457{
 458        if (mm->context == NO_CONTEXT) {
 459                spin_lock(&srmmu_context_spinlock);
 460                alloc_context(old_mm, mm);
 461                spin_unlock(&srmmu_context_spinlock);
 462                srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
 463        }
 464
 465        if (sparc_cpu_model == sparc_leon)
 466                leon_switch_mm();
 467
 468        if (is_hypersparc)
 469                hyper_flush_whole_icache();
 470
 471        srmmu_set_context(mm->context);
 472}
 473
 474/* Low level IO area allocation on the SRMMU. */
 475static inline void srmmu_mapioaddr(unsigned long physaddr,
 476                                   unsigned long virt_addr, int bus_type)
 477{
 478        pgd_t *pgdp;
 479        pmd_t *pmdp;
 480        pte_t *ptep;
 481        unsigned long tmp;
 482
 483        physaddr &= PAGE_MASK;
 484        pgdp = pgd_offset_k(virt_addr);
 485        pmdp = pmd_offset(pgdp, virt_addr);
 486        ptep = pte_offset_kernel(pmdp, virt_addr);
 487        tmp = (physaddr >> 4) | SRMMU_ET_PTE;
 488
 489        /* I need to test whether this is consistent over all
 490         * sun4m's.  The bus_type represents the upper 4 bits of
 491         * 36-bit physical address on the I/O space lines...
 492         */
 493        tmp |= (bus_type << 28);
 494        tmp |= SRMMU_PRIV;
 495        __flush_page_to_ram(virt_addr);
 496        set_pte(ptep, __pte(tmp));
 497}
 498
 499void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
 500                      unsigned long xva, unsigned int len)
 501{
 502        while (len != 0) {
 503                len -= PAGE_SIZE;
 504                srmmu_mapioaddr(xpa, xva, bus);
 505                xva += PAGE_SIZE;
 506                xpa += PAGE_SIZE;
 507        }
 508        flush_tlb_all();
 509}
 510
 511static inline void srmmu_unmapioaddr(unsigned long virt_addr)
 512{
 513        pgd_t *pgdp;
 514        pmd_t *pmdp;
 515        pte_t *ptep;
 516
 517        pgdp = pgd_offset_k(virt_addr);
 518        pmdp = pmd_offset(pgdp, virt_addr);
 519        ptep = pte_offset_kernel(pmdp, virt_addr);
 520
 521        /* No need to flush uncacheable page. */
 522        __pte_clear(ptep);
 523}
 524
 525void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
 526{
 527        while (len != 0) {
 528                len -= PAGE_SIZE;
 529                srmmu_unmapioaddr(virt_addr);
 530                virt_addr += PAGE_SIZE;
 531        }
 532        flush_tlb_all();
 533}
 534
 535/* tsunami.S */
 536extern void tsunami_flush_cache_all(void);
 537extern void tsunami_flush_cache_mm(struct mm_struct *mm);
 538extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 539extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
 540extern void tsunami_flush_page_to_ram(unsigned long page);
 541extern void tsunami_flush_page_for_dma(unsigned long page);
 542extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
 543extern void tsunami_flush_tlb_all(void);
 544extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
 545extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 546extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
 547extern void tsunami_setup_blockops(void);
 548
 549/* swift.S */
 550extern void swift_flush_cache_all(void);
 551extern void swift_flush_cache_mm(struct mm_struct *mm);
 552extern void swift_flush_cache_range(struct vm_area_struct *vma,
 553                                    unsigned long start, unsigned long end);
 554extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
 555extern void swift_flush_page_to_ram(unsigned long page);
 556extern void swift_flush_page_for_dma(unsigned long page);
 557extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
 558extern void swift_flush_tlb_all(void);
 559extern void swift_flush_tlb_mm(struct mm_struct *mm);
 560extern void swift_flush_tlb_range(struct vm_area_struct *vma,
 561                                  unsigned long start, unsigned long end);
 562extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
 563
 564#if 0  /* P3: deadwood to debug precise flushes on Swift. */
 565void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 566{
 567        int cctx, ctx1;
 568
 569        page &= PAGE_MASK;
 570        if ((ctx1 = vma->vm_mm->context) != -1) {
 571                cctx = srmmu_get_context();
 572/* Is context # ever different from current context? P3 */
 573                if (cctx != ctx1) {
 574                        printk("flush ctx %02x curr %02x\n", ctx1, cctx);
 575                        srmmu_set_context(ctx1);
 576                        swift_flush_page(page);
 577                        __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 578                                        "r" (page), "i" (ASI_M_FLUSH_PROBE));
 579                        srmmu_set_context(cctx);
 580                } else {
 581                         /* Rm. prot. bits from virt. c. */
 582                        /* swift_flush_cache_all(); */
 583                        /* swift_flush_cache_page(vma, page); */
 584                        swift_flush_page(page);
 585
 586                        __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 587                                "r" (page), "i" (ASI_M_FLUSH_PROBE));
 588                        /* same as above: srmmu_flush_tlb_page() */
 589                }
 590        }
 591}
 592#endif
 593
 594/*
 595 * The following are all MBUS based SRMMU modules, and therefore could
 596 * be found in a multiprocessor configuration.  On the whole, these
 597 * chips seems to be much more touchy about DVMA and page tables
 598 * with respect to cache coherency.
 599 */
 600
 601/* viking.S */
 602extern void viking_flush_cache_all(void);
 603extern void viking_flush_cache_mm(struct mm_struct *mm);
 604extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
 605                                     unsigned long end);
 606extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
 607extern void viking_flush_page_to_ram(unsigned long page);
 608extern void viking_flush_page_for_dma(unsigned long page);
 609extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
 610extern void viking_flush_page(unsigned long page);
 611extern void viking_mxcc_flush_page(unsigned long page);
 612extern void viking_flush_tlb_all(void);
 613extern void viking_flush_tlb_mm(struct mm_struct *mm);
 614extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 615                                   unsigned long end);
 616extern void viking_flush_tlb_page(struct vm_area_struct *vma,
 617                                  unsigned long page);
 618extern void sun4dsmp_flush_tlb_all(void);
 619extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
 620extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 621                                   unsigned long end);
 622extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
 623                                  unsigned long page);
 624
 625/* hypersparc.S */
 626extern void hypersparc_flush_cache_all(void);
 627extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
 628extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 629extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
 630extern void hypersparc_flush_page_to_ram(unsigned long page);
 631extern void hypersparc_flush_page_for_dma(unsigned long page);
 632extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
 633extern void hypersparc_flush_tlb_all(void);
 634extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
 635extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 636extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
 637extern void hypersparc_setup_blockops(void);
 638
 639/*
 640 * NOTE: All of this startup code assumes the low 16mb (approx.) of
 641 *       kernel mappings are done with one single contiguous chunk of
 642 *       ram.  On small ram machines (classics mainly) we only get
 643 *       around 8mb mapped for us.
 644 */
 645
 646static void __init early_pgtable_allocfail(char *type)
 647{
 648        prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
 649        prom_halt();
 650}
 651
 652static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
 653                                                        unsigned long end)
 654{
 655        pgd_t *pgdp;
 656        pmd_t *pmdp;
 657        pte_t *ptep;
 658
 659        while (start < end) {
 660                pgdp = pgd_offset_k(start);
 661                if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
 662                        pmdp = __srmmu_get_nocache(
 663                            SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
 664                        if (pmdp == NULL)
 665                                early_pgtable_allocfail("pmd");
 666                        memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
 667                        pgd_set(__nocache_fix(pgdp), pmdp);
 668                }
 669                pmdp = pmd_offset(__nocache_fix(pgdp), start);
 670                if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
 671                        ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
 672                        if (ptep == NULL)
 673                                early_pgtable_allocfail("pte");
 674                        memset(__nocache_fix(ptep), 0, PTE_SIZE);
 675                        pmd_set(__nocache_fix(pmdp), ptep);
 676                }
 677                if (start > (0xffffffffUL - PMD_SIZE))
 678                        break;
 679                start = (start + PMD_SIZE) & PMD_MASK;
 680        }
 681}
 682
 683static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
 684                                                  unsigned long end)
 685{
 686        pgd_t *pgdp;
 687        pmd_t *pmdp;
 688        pte_t *ptep;
 689
 690        while (start < end) {
 691                pgdp = pgd_offset_k(start);
 692                if (pgd_none(*pgdp)) {
 693                        pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
 694                        if (pmdp == NULL)
 695                                early_pgtable_allocfail("pmd");
 696                        memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
 697                        pgd_set(pgdp, pmdp);
 698                }
 699                pmdp = pmd_offset(pgdp, start);
 700                if (srmmu_pmd_none(*pmdp)) {
 701                        ptep = __srmmu_get_nocache(PTE_SIZE,
 702                                                             PTE_SIZE);
 703                        if (ptep == NULL)
 704                                early_pgtable_allocfail("pte");
 705                        memset(ptep, 0, PTE_SIZE);
 706                        pmd_set(pmdp, ptep);
 707                }
 708                if (start > (0xffffffffUL - PMD_SIZE))
 709                        break;
 710                start = (start + PMD_SIZE) & PMD_MASK;
 711        }
 712}
 713
 714/* These flush types are not available on all chips... */
 715static inline unsigned long srmmu_probe(unsigned long vaddr)
 716{
 717        unsigned long retval;
 718
 719        if (sparc_cpu_model != sparc_leon) {
 720
 721                vaddr &= PAGE_MASK;
 722                __asm__ __volatile__("lda [%1] %2, %0\n\t" :
 723                                     "=r" (retval) :
 724                                     "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
 725        } else {
 726                retval = leon_swprobe(vaddr, 0);
 727        }
 728        return retval;
 729}
 730
 731/*
 732 * This is much cleaner than poking around physical address space
 733 * looking at the prom's page table directly which is what most
 734 * other OS's do.  Yuck... this is much better.
 735 */
 736static void __init srmmu_inherit_prom_mappings(unsigned long start,
 737                                               unsigned long end)
 738{
 739        unsigned long probed;
 740        unsigned long addr;
 741        pgd_t *pgdp;
 742        pmd_t *pmdp;
 743        pte_t *ptep;
 744        int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
 745
 746        while (start <= end) {
 747                if (start == 0)
 748                        break; /* probably wrap around */
 749                if (start == 0xfef00000)
 750                        start = KADB_DEBUGGER_BEGVM;
 751                probed = srmmu_probe(start);
 752                if (!probed) {
 753                        /* continue probing until we find an entry */
 754                        start += PAGE_SIZE;
 755                        continue;
 756                }
 757
 758                /* A red snapper, see what it really is. */
 759                what = 0;
 760                addr = start - PAGE_SIZE;
 761
 762                if (!(start & ~(SRMMU_REAL_PMD_MASK))) {
 763                        if (srmmu_probe(addr + SRMMU_REAL_PMD_SIZE) == probed)
 764                                what = 1;
 765                }
 766
 767                if (!(start & ~(SRMMU_PGDIR_MASK))) {
 768                        if (srmmu_probe(addr + SRMMU_PGDIR_SIZE) == probed)
 769                                what = 2;
 770                }
 771
 772                pgdp = pgd_offset_k(start);
 773                if (what == 2) {
 774                        *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
 775                        start += SRMMU_PGDIR_SIZE;
 776                        continue;
 777                }
 778                if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
 779                        pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
 780                                                   SRMMU_PMD_TABLE_SIZE);
 781                        if (pmdp == NULL)
 782                                early_pgtable_allocfail("pmd");
 783                        memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
 784                        pgd_set(__nocache_fix(pgdp), pmdp);
 785                }
 786                pmdp = pmd_offset(__nocache_fix(pgdp), start);
 787                if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
 788                        ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
 789                        if (ptep == NULL)
 790                                early_pgtable_allocfail("pte");
 791                        memset(__nocache_fix(ptep), 0, PTE_SIZE);
 792                        pmd_set(__nocache_fix(pmdp), ptep);
 793                }
 794                if (what == 1) {
 795                        /* We bend the rule where all 16 PTPs in a pmd_t point
 796                         * inside the same PTE page, and we leak a perfectly
 797                         * good hardware PTE piece. Alternatives seem worse.
 798                         */
 799                        unsigned int x; /* Index of HW PMD in soft cluster */
 800                        unsigned long *val;
 801                        x = (start >> PMD_SHIFT) & 15;
 802                        val = &pmdp->pmdv[x];
 803                        *(unsigned long *)__nocache_fix(val) = probed;
 804                        start += SRMMU_REAL_PMD_SIZE;
 805                        continue;
 806                }
 807                ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
 808                *(pte_t *)__nocache_fix(ptep) = __pte(probed);
 809                start += PAGE_SIZE;
 810        }
 811}
 812
 813#define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
 814
 815/* Create a third-level SRMMU 16MB page mapping. */
 816static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
 817{
 818        pgd_t *pgdp = pgd_offset_k(vaddr);
 819        unsigned long big_pte;
 820
 821        big_pte = KERNEL_PTE(phys_base >> 4);
 822        *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
 823}
 824
 825/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
 826static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
 827{
 828        unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
 829        unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
 830        unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
 831        /* Map "low" memory only */
 832        const unsigned long min_vaddr = PAGE_OFFSET;
 833        const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
 834
 835        if (vstart < min_vaddr || vstart >= max_vaddr)
 836                return vstart;
 837
 838        if (vend > max_vaddr || vend < min_vaddr)
 839                vend = max_vaddr;
 840
 841        while (vstart < vend) {
 842                do_large_mapping(vstart, pstart);
 843                vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
 844        }
 845        return vstart;
 846}
 847
 848static void __init map_kernel(void)
 849{
 850        int i;
 851
 852        if (phys_base > 0) {
 853                do_large_mapping(PAGE_OFFSET, phys_base);
 854        }
 855
 856        for (i = 0; sp_banks[i].num_bytes != 0; i++) {
 857                map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
 858        }
 859}
 860
 861void (*poke_srmmu)(void) __cpuinitdata = NULL;
 862
 863extern unsigned long bootmem_init(unsigned long *pages_avail);
 864
 865void __init srmmu_paging_init(void)
 866{
 867        int i;
 868        phandle cpunode;
 869        char node_str[128];
 870        pgd_t *pgd;
 871        pmd_t *pmd;
 872        pte_t *pte;
 873        unsigned long pages_avail;
 874
 875        init_mm.context = (unsigned long) NO_CONTEXT;
 876        sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
 877
 878        if (sparc_cpu_model == sun4d)
 879                num_contexts = 65536; /* We know it is Viking */
 880        else {
 881                /* Find the number of contexts on the srmmu. */
 882                cpunode = prom_getchild(prom_root_node);
 883                num_contexts = 0;
 884                while (cpunode != 0) {
 885                        prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
 886                        if (!strcmp(node_str, "cpu")) {
 887                                num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
 888                                break;
 889                        }
 890                        cpunode = prom_getsibling(cpunode);
 891                }
 892        }
 893
 894        if (!num_contexts) {
 895                prom_printf("Something wrong, can't find cpu node in paging_init.\n");
 896                prom_halt();
 897        }
 898
 899        pages_avail = 0;
 900        last_valid_pfn = bootmem_init(&pages_avail);
 901
 902        srmmu_nocache_calcsize();
 903        srmmu_nocache_init();
 904        srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE));
 905        map_kernel();
 906
 907        /* ctx table has to be physically aligned to its size */
 908        srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t));
 909        srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
 910
 911        for (i = 0; i < num_contexts; i++)
 912                srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
 913
 914        flush_cache_all();
 915        srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
 916#ifdef CONFIG_SMP
 917        /* Stop from hanging here... */
 918        local_ops->tlb_all();
 919#else
 920        flush_tlb_all();
 921#endif
 922        poke_srmmu();
 923
 924        srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
 925        srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
 926
 927        srmmu_allocate_ptable_skeleton(
 928                __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
 929        srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
 930
 931        pgd = pgd_offset_k(PKMAP_BASE);
 932        pmd = pmd_offset(pgd, PKMAP_BASE);
 933        pte = pte_offset_kernel(pmd, PKMAP_BASE);
 934        pkmap_page_table = pte;
 935
 936        flush_cache_all();
 937        flush_tlb_all();
 938
 939        sparc_context_init(num_contexts);
 940
 941        kmap_init();
 942
 943        {
 944                unsigned long zones_size[MAX_NR_ZONES];
 945                unsigned long zholes_size[MAX_NR_ZONES];
 946                unsigned long npages;
 947                int znum;
 948
 949                for (znum = 0; znum < MAX_NR_ZONES; znum++)
 950                        zones_size[znum] = zholes_size[znum] = 0;
 951
 952                npages = max_low_pfn - pfn_base;
 953
 954                zones_size[ZONE_DMA] = npages;
 955                zholes_size[ZONE_DMA] = npages - pages_avail;
 956
 957                npages = highend_pfn - max_low_pfn;
 958                zones_size[ZONE_HIGHMEM] = npages;
 959                zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
 960
 961                free_area_init_node(0, zones_size, pfn_base, zholes_size);
 962        }
 963}
 964
 965void mmu_info(struct seq_file *m)
 966{
 967        seq_printf(m,
 968                   "MMU type\t: %s\n"
 969                   "contexts\t: %d\n"
 970                   "nocache total\t: %ld\n"
 971                   "nocache used\t: %d\n",
 972                   srmmu_name,
 973                   num_contexts,
 974                   srmmu_nocache_size,
 975                   srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
 976}
 977
 978int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 979{
 980        mm->context = NO_CONTEXT;
 981        return 0;
 982}
 983
 984void destroy_context(struct mm_struct *mm)
 985{
 986
 987        if (mm->context != NO_CONTEXT) {
 988                flush_cache_mm(mm);
 989                srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
 990                flush_tlb_mm(mm);
 991                spin_lock(&srmmu_context_spinlock);
 992                free_context(mm->context);
 993                spin_unlock(&srmmu_context_spinlock);
 994                mm->context = NO_CONTEXT;
 995        }
 996}
 997
 998/* Init various srmmu chip types. */
 999static void __init srmmu_is_bad(void)
1000{
1001        prom_printf("Could not determine SRMMU chip type.\n");
1002        prom_halt();
1003}
1004
1005static void __init init_vac_layout(void)
1006{
1007        phandle nd;
1008        int cache_lines;
1009        char node_str[128];
1010#ifdef CONFIG_SMP
1011        int cpu = 0;
1012        unsigned long max_size = 0;
1013        unsigned long min_line_size = 0x10000000;
1014#endif
1015
1016        nd = prom_getchild(prom_root_node);
1017        while ((nd = prom_getsibling(nd)) != 0) {
1018                prom_getstring(nd, "device_type", node_str, sizeof(node_str));
1019                if (!strcmp(node_str, "cpu")) {
1020                        vac_line_size = prom_getint(nd, "cache-line-size");
1021                        if (vac_line_size == -1) {
1022                                prom_printf("can't determine cache-line-size, halting.\n");
1023                                prom_halt();
1024                        }
1025                        cache_lines = prom_getint(nd, "cache-nlines");
1026                        if (cache_lines == -1) {
1027                                prom_printf("can't determine cache-nlines, halting.\n");
1028                                prom_halt();
1029                        }
1030
1031                        vac_cache_size = cache_lines * vac_line_size;
1032#ifdef CONFIG_SMP
1033                        if (vac_cache_size > max_size)
1034                                max_size = vac_cache_size;
1035                        if (vac_line_size < min_line_size)
1036                                min_line_size = vac_line_size;
1037                        //FIXME: cpus not contiguous!!
1038                        cpu++;
1039                        if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1040                                break;
1041#else
1042                        break;
1043#endif
1044                }
1045        }
1046        if (nd == 0) {
1047                prom_printf("No CPU nodes found, halting.\n");
1048                prom_halt();
1049        }
1050#ifdef CONFIG_SMP
1051        vac_cache_size = max_size;
1052        vac_line_size = min_line_size;
1053#endif
1054        printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
1055               (int)vac_cache_size, (int)vac_line_size);
1056}
1057
1058static void __cpuinit poke_hypersparc(void)
1059{
1060        volatile unsigned long clear;
1061        unsigned long mreg = srmmu_get_mmureg();
1062
1063        hyper_flush_unconditional_combined();
1064
1065        mreg &= ~(HYPERSPARC_CWENABLE);
1066        mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
1067        mreg |= (HYPERSPARC_CMODE);
1068
1069        srmmu_set_mmureg(mreg);
1070
1071#if 0 /* XXX I think this is bad news... -DaveM */
1072        hyper_clear_all_tags();
1073#endif
1074
1075        put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
1076        hyper_flush_whole_icache();
1077        clear = srmmu_get_faddr();
1078        clear = srmmu_get_fstatus();
1079}
1080
1081static const struct sparc32_cachetlb_ops hypersparc_ops = {
1082        .cache_all      = hypersparc_flush_cache_all,
1083        .cache_mm       = hypersparc_flush_cache_mm,
1084        .cache_page     = hypersparc_flush_cache_page,
1085        .cache_range    = hypersparc_flush_cache_range,
1086        .tlb_all        = hypersparc_flush_tlb_all,
1087        .tlb_mm         = hypersparc_flush_tlb_mm,
1088        .tlb_page       = hypersparc_flush_tlb_page,
1089        .tlb_range      = hypersparc_flush_tlb_range,
1090        .page_to_ram    = hypersparc_flush_page_to_ram,
1091        .sig_insns      = hypersparc_flush_sig_insns,
1092        .page_for_dma   = hypersparc_flush_page_for_dma,
1093};
1094
1095static void __init init_hypersparc(void)
1096{
1097        srmmu_name = "ROSS HyperSparc";
1098        srmmu_modtype = HyperSparc;
1099
1100        init_vac_layout();
1101
1102        is_hypersparc = 1;
1103        sparc32_cachetlb_ops = &hypersparc_ops;
1104
1105        poke_srmmu = poke_hypersparc;
1106
1107        hypersparc_setup_blockops();
1108}
1109
1110static void __cpuinit poke_swift(void)
1111{
1112        unsigned long mreg;
1113
1114        /* Clear any crap from the cache or else... */
1115        swift_flush_cache_all();
1116
1117        /* Enable I & D caches */
1118        mreg = srmmu_get_mmureg();
1119        mreg |= (SWIFT_IE | SWIFT_DE);
1120        /*
1121         * The Swift branch folding logic is completely broken.  At
1122         * trap time, if things are just right, if can mistakenly
1123         * think that a trap is coming from kernel mode when in fact
1124         * it is coming from user mode (it mis-executes the branch in
1125         * the trap code).  So you see things like crashme completely
1126         * hosing your machine which is completely unacceptable.  Turn
1127         * this shit off... nice job Fujitsu.
1128         */
1129        mreg &= ~(SWIFT_BF);
1130        srmmu_set_mmureg(mreg);
1131}
1132
1133static const struct sparc32_cachetlb_ops swift_ops = {
1134        .cache_all      = swift_flush_cache_all,
1135        .cache_mm       = swift_flush_cache_mm,
1136        .cache_page     = swift_flush_cache_page,
1137        .cache_range    = swift_flush_cache_range,
1138        .tlb_all        = swift_flush_tlb_all,
1139        .tlb_mm         = swift_flush_tlb_mm,
1140        .tlb_page       = swift_flush_tlb_page,
1141        .tlb_range      = swift_flush_tlb_range,
1142        .page_to_ram    = swift_flush_page_to_ram,
1143        .sig_insns      = swift_flush_sig_insns,
1144        .page_for_dma   = swift_flush_page_for_dma,
1145};
1146
1147#define SWIFT_MASKID_ADDR  0x10003018
1148static void __init init_swift(void)
1149{
1150        unsigned long swift_rev;
1151
1152        __asm__ __volatile__("lda [%1] %2, %0\n\t"
1153                             "srl %0, 0x18, %0\n\t" :
1154                             "=r" (swift_rev) :
1155                             "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1156        srmmu_name = "Fujitsu Swift";
1157        switch (swift_rev) {
1158        case 0x11:
1159        case 0x20:
1160        case 0x23:
1161        case 0x30:
1162                srmmu_modtype = Swift_lots_o_bugs;
1163                hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1164                /*
1165                 * Gee george, I wonder why Sun is so hush hush about
1166                 * this hardware bug... really braindamage stuff going
1167                 * on here.  However I think we can find a way to avoid
1168                 * all of the workaround overhead under Linux.  Basically,
1169                 * any page fault can cause kernel pages to become user
1170                 * accessible (the mmu gets confused and clears some of
1171                 * the ACC bits in kernel ptes).  Aha, sounds pretty
1172                 * horrible eh?  But wait, after extensive testing it appears
1173                 * that if you use pgd_t level large kernel pte's (like the
1174                 * 4MB pages on the Pentium) the bug does not get tripped
1175                 * at all.  This avoids almost all of the major overhead.
1176                 * Welcome to a world where your vendor tells you to,
1177                 * "apply this kernel patch" instead of "sorry for the
1178                 * broken hardware, send it back and we'll give you
1179                 * properly functioning parts"
1180                 */
1181                break;
1182        case 0x25:
1183        case 0x31:
1184                srmmu_modtype = Swift_bad_c;
1185                hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1186                /*
1187                 * You see Sun allude to this hardware bug but never
1188                 * admit things directly, they'll say things like,
1189                 * "the Swift chip cache problems" or similar.
1190                 */
1191                break;
1192        default:
1193                srmmu_modtype = Swift_ok;
1194                break;
1195        }
1196
1197        sparc32_cachetlb_ops = &swift_ops;
1198        flush_page_for_dma_global = 0;
1199
1200        /*
1201         * Are you now convinced that the Swift is one of the
1202         * biggest VLSI abortions of all time?  Bravo Fujitsu!
1203         * Fujitsu, the !#?!%$'d up processor people.  I bet if
1204         * you examined the microcode of the Swift you'd find
1205         * XXX's all over the place.
1206         */
1207        poke_srmmu = poke_swift;
1208}
1209
1210static void turbosparc_flush_cache_all(void)
1211{
1212        flush_user_windows();
1213        turbosparc_idflash_clear();
1214}
1215
1216static void turbosparc_flush_cache_mm(struct mm_struct *mm)
1217{
1218        FLUSH_BEGIN(mm)
1219        flush_user_windows();
1220        turbosparc_idflash_clear();
1221        FLUSH_END
1222}
1223
1224static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1225{
1226        FLUSH_BEGIN(vma->vm_mm)
1227        flush_user_windows();
1228        turbosparc_idflash_clear();
1229        FLUSH_END
1230}
1231
1232static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1233{
1234        FLUSH_BEGIN(vma->vm_mm)
1235        flush_user_windows();
1236        if (vma->vm_flags & VM_EXEC)
1237                turbosparc_flush_icache();
1238        turbosparc_flush_dcache();
1239        FLUSH_END
1240}
1241
1242/* TurboSparc is copy-back, if we turn it on, but this does not work. */
1243static void turbosparc_flush_page_to_ram(unsigned long page)
1244{
1245#ifdef TURBOSPARC_WRITEBACK
1246        volatile unsigned long clear;
1247
1248        if (srmmu_probe(page))
1249                turbosparc_flush_page_cache(page);
1250        clear = srmmu_get_fstatus();
1251#endif
1252}
1253
1254static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1255{
1256}
1257
1258static void turbosparc_flush_page_for_dma(unsigned long page)
1259{
1260        turbosparc_flush_dcache();
1261}
1262
1263static void turbosparc_flush_tlb_all(void)
1264{
1265        srmmu_flush_whole_tlb();
1266}
1267
1268static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
1269{
1270        FLUSH_BEGIN(mm)
1271        srmmu_flush_whole_tlb();
1272        FLUSH_END
1273}
1274
1275static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1276{
1277        FLUSH_BEGIN(vma->vm_mm)
1278        srmmu_flush_whole_tlb();
1279        FLUSH_END
1280}
1281
1282static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1283{
1284        FLUSH_BEGIN(vma->vm_mm)
1285        srmmu_flush_whole_tlb();
1286        FLUSH_END
1287}
1288
1289
1290static void __cpuinit poke_turbosparc(void)
1291{
1292        unsigned long mreg = srmmu_get_mmureg();
1293        unsigned long ccreg;
1294
1295        /* Clear any crap from the cache or else... */
1296        turbosparc_flush_cache_all();
1297        /* Temporarily disable I & D caches */
1298        mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE);
1299        mreg &= ~(TURBOSPARC_PCENABLE);         /* Don't check parity */
1300        srmmu_set_mmureg(mreg);
1301
1302        ccreg = turbosparc_get_ccreg();
1303
1304#ifdef TURBOSPARC_WRITEBACK
1305        ccreg |= (TURBOSPARC_SNENABLE);         /* Do DVMA snooping in Dcache */
1306        ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
1307                        /* Write-back D-cache, emulate VLSI
1308                         * abortion number three, not number one */
1309#else
1310        /* For now let's play safe, optimize later */
1311        ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
1312                        /* Do DVMA snooping in Dcache, Write-thru D-cache */
1313        ccreg &= ~(TURBOSPARC_uS2);
1314                        /* Emulate VLSI abortion number three, not number one */
1315#endif
1316
1317        switch (ccreg & 7) {
1318        case 0: /* No SE cache */
1319        case 7: /* Test mode */
1320                break;
1321        default:
1322                ccreg |= (TURBOSPARC_SCENABLE);
1323        }
1324        turbosparc_set_ccreg(ccreg);
1325
1326        mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1327        mreg |= (TURBOSPARC_ICSNOOP);           /* Icache snooping on */
1328        srmmu_set_mmureg(mreg);
1329}
1330
1331static const struct sparc32_cachetlb_ops turbosparc_ops = {
1332        .cache_all      = turbosparc_flush_cache_all,
1333        .cache_mm       = turbosparc_flush_cache_mm,
1334        .cache_page     = turbosparc_flush_cache_page,
1335        .cache_range    = turbosparc_flush_cache_range,
1336        .tlb_all        = turbosparc_flush_tlb_all,
1337        .tlb_mm         = turbosparc_flush_tlb_mm,
1338        .tlb_page       = turbosparc_flush_tlb_page,
1339        .tlb_range      = turbosparc_flush_tlb_range,
1340        .page_to_ram    = turbosparc_flush_page_to_ram,
1341        .sig_insns      = turbosparc_flush_sig_insns,
1342        .page_for_dma   = turbosparc_flush_page_for_dma,
1343};
1344
1345static void __init init_turbosparc(void)
1346{
1347        srmmu_name = "Fujitsu TurboSparc";
1348        srmmu_modtype = TurboSparc;
1349        sparc32_cachetlb_ops = &turbosparc_ops;
1350        poke_srmmu = poke_turbosparc;
1351}
1352
1353static void __cpuinit poke_tsunami(void)
1354{
1355        unsigned long mreg = srmmu_get_mmureg();
1356
1357        tsunami_flush_icache();
1358        tsunami_flush_dcache();
1359        mreg &= ~TSUNAMI_ITD;
1360        mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1361        srmmu_set_mmureg(mreg);
1362}
1363
1364static const struct sparc32_cachetlb_ops tsunami_ops = {
1365        .cache_all      = tsunami_flush_cache_all,
1366        .cache_mm       = tsunami_flush_cache_mm,
1367        .cache_page     = tsunami_flush_cache_page,
1368        .cache_range    = tsunami_flush_cache_range,
1369        .tlb_all        = tsunami_flush_tlb_all,
1370        .tlb_mm         = tsunami_flush_tlb_mm,
1371        .tlb_page       = tsunami_flush_tlb_page,
1372        .tlb_range      = tsunami_flush_tlb_range,
1373        .page_to_ram    = tsunami_flush_page_to_ram,
1374        .sig_insns      = tsunami_flush_sig_insns,
1375        .page_for_dma   = tsunami_flush_page_for_dma,
1376};
1377
1378static void __init init_tsunami(void)
1379{
1380        /*
1381         * Tsunami's pretty sane, Sun and TI actually got it
1382         * somewhat right this time.  Fujitsu should have
1383         * taken some lessons from them.
1384         */
1385
1386        srmmu_name = "TI Tsunami";
1387        srmmu_modtype = Tsunami;
1388        sparc32_cachetlb_ops = &tsunami_ops;
1389        poke_srmmu = poke_tsunami;
1390
1391        tsunami_setup_blockops();
1392}
1393
1394static void __cpuinit poke_viking(void)
1395{
1396        unsigned long mreg = srmmu_get_mmureg();
1397        static int smp_catch;
1398
1399        if (viking_mxcc_present) {
1400                unsigned long mxcc_control = mxcc_get_creg();
1401
1402                mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
1403                mxcc_control &= ~(MXCC_CTL_RRC);
1404                mxcc_set_creg(mxcc_control);
1405
1406                /*
1407                 * We don't need memory parity checks.
1408                 * XXX This is a mess, have to dig out later. ecd.
1409                viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1410                 */
1411
1412                /* We do cache ptables on MXCC. */
1413                mreg |= VIKING_TCENABLE;
1414        } else {
1415                unsigned long bpreg;
1416
1417                mreg &= ~(VIKING_TCENABLE);
1418                if (smp_catch++) {
1419                        /* Must disable mixed-cmd mode here for other cpu's. */
1420                        bpreg = viking_get_bpreg();
1421                        bpreg &= ~(VIKING_ACTION_MIX);
1422                        viking_set_bpreg(bpreg);
1423
1424                        /* Just in case PROM does something funny. */
1425                        msi_set_sync();
1426                }
1427        }
1428
1429        mreg |= VIKING_SPENABLE;
1430        mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
1431        mreg |= VIKING_SBENABLE;
1432        mreg &= ~(VIKING_ACENABLE);
1433        srmmu_set_mmureg(mreg);
1434}
1435
1436static struct sparc32_cachetlb_ops viking_ops = {
1437        .cache_all      = viking_flush_cache_all,
1438        .cache_mm       = viking_flush_cache_mm,
1439        .cache_page     = viking_flush_cache_page,
1440        .cache_range    = viking_flush_cache_range,
1441        .tlb_all        = viking_flush_tlb_all,
1442        .tlb_mm         = viking_flush_tlb_mm,
1443        .tlb_page       = viking_flush_tlb_page,
1444        .tlb_range      = viking_flush_tlb_range,
1445        .page_to_ram    = viking_flush_page_to_ram,
1446        .sig_insns      = viking_flush_sig_insns,
1447        .page_for_dma   = viking_flush_page_for_dma,
1448};
1449
1450#ifdef CONFIG_SMP
1451/* On sun4d the cpu broadcasts local TLB flushes, so we can just
1452 * perform the local TLB flush and all the other cpus will see it.
1453 * But, unfortunately, there is a bug in the sun4d XBUS backplane
1454 * that requires that we add some synchronization to these flushes.
1455 *
1456 * The bug is that the fifo which keeps track of all the pending TLB
1457 * broadcasts in the system is an entry or two too small, so if we
1458 * have too many going at once we'll overflow that fifo and lose a TLB
1459 * flush resulting in corruption.
1460 *
1461 * Our workaround is to take a global spinlock around the TLB flushes,
1462 * which guarentees we won't ever have too many pending.  It's a big
1463 * hammer, but a semaphore like system to make sure we only have N TLB
1464 * flushes going at once will require SMP locking anyways so there's
1465 * no real value in trying any harder than this.
1466 */
1467static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
1468        .cache_all      = viking_flush_cache_all,
1469        .cache_mm       = viking_flush_cache_mm,
1470        .cache_page     = viking_flush_cache_page,
1471        .cache_range    = viking_flush_cache_range,
1472        .tlb_all        = sun4dsmp_flush_tlb_all,
1473        .tlb_mm         = sun4dsmp_flush_tlb_mm,
1474        .tlb_page       = sun4dsmp_flush_tlb_page,
1475        .tlb_range      = sun4dsmp_flush_tlb_range,
1476        .page_to_ram    = viking_flush_page_to_ram,
1477        .sig_insns      = viking_flush_sig_insns,
1478        .page_for_dma   = viking_flush_page_for_dma,
1479};
1480#endif
1481
1482static void __init init_viking(void)
1483{
1484        unsigned long mreg = srmmu_get_mmureg();
1485
1486        /* Ahhh, the viking.  SRMMU VLSI abortion number two... */
1487        if (mreg & VIKING_MMODE) {
1488                srmmu_name = "TI Viking";
1489                viking_mxcc_present = 0;
1490                msi_set_sync();
1491
1492                /*
1493                 * We need this to make sure old viking takes no hits
1494                 * on it's cache for dma snoops to workaround the
1495                 * "load from non-cacheable memory" interrupt bug.
1496                 * This is only necessary because of the new way in
1497                 * which we use the IOMMU.
1498                 */
1499                viking_ops.page_for_dma = viking_flush_page;
1500#ifdef CONFIG_SMP
1501                viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
1502#endif
1503                flush_page_for_dma_global = 0;
1504        } else {
1505                srmmu_name = "TI Viking/MXCC";
1506                viking_mxcc_present = 1;
1507                srmmu_cache_pagetables = 1;
1508        }
1509
1510        sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1511                &viking_ops;
1512#ifdef CONFIG_SMP
1513        if (sparc_cpu_model == sun4d)
1514                sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1515                        &viking_sun4d_smp_ops;
1516#endif
1517
1518        poke_srmmu = poke_viking;
1519}
1520
1521/* Probe for the srmmu chip version. */
1522static void __init get_srmmu_type(void)
1523{
1524        unsigned long mreg, psr;
1525        unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1526
1527        srmmu_modtype = SRMMU_INVAL_MOD;
1528        hwbug_bitmask = 0;
1529
1530        mreg = srmmu_get_mmureg(); psr = get_psr();
1531        mod_typ = (mreg & 0xf0000000) >> 28;
1532        mod_rev = (mreg & 0x0f000000) >> 24;
1533        psr_typ = (psr >> 28) & 0xf;
1534        psr_vers = (psr >> 24) & 0xf;
1535
1536        /* First, check for sparc-leon. */
1537        if (sparc_cpu_model == sparc_leon) {
1538                init_leon();
1539                return;
1540        }
1541
1542        /* Second, check for HyperSparc or Cypress. */
1543        if (mod_typ == 1) {
1544                switch (mod_rev) {
1545                case 7:
1546                        /* UP or MP Hypersparc */
1547                        init_hypersparc();
1548                        break;
1549                case 0:
1550                case 2:
1551                case 10:
1552                case 11:
1553                case 12:
1554                case 13:
1555                case 14:
1556                case 15:
1557                default:
1558                        prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
1559                        prom_halt();
1560                        break;
1561                }
1562                return;
1563        }
1564
1565        /* Now Fujitsu TurboSparc. It might happen that it is
1566         * in Swift emulation mode, so we will check later...
1567         */
1568        if (psr_typ == 0 && psr_vers == 5) {
1569                init_turbosparc();
1570                return;
1571        }
1572
1573        /* Next check for Fujitsu Swift. */
1574        if (psr_typ == 0 && psr_vers == 4) {
1575                phandle cpunode;
1576                char node_str[128];
1577
1578                /* Look if it is not a TurboSparc emulating Swift... */
1579                cpunode = prom_getchild(prom_root_node);
1580                while ((cpunode = prom_getsibling(cpunode)) != 0) {
1581                        prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1582                        if (!strcmp(node_str, "cpu")) {
1583                                if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
1584                                    prom_getintdefault(cpunode, "psr-version", 1) == 5) {
1585                                        init_turbosparc();
1586                                        return;
1587                                }
1588                                break;
1589                        }
1590                }
1591
1592                init_swift();
1593                return;
1594        }
1595
1596        /* Now the Viking family of srmmu. */
1597        if (psr_typ == 4 &&
1598           ((psr_vers == 0) ||
1599            ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1600                init_viking();
1601                return;
1602        }
1603
1604        /* Finally the Tsunami. */
1605        if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
1606                init_tsunami();
1607                return;
1608        }
1609
1610        /* Oh well */
1611        srmmu_is_bad();
1612}
1613
1614#ifdef CONFIG_SMP
1615/* Local cross-calls. */
1616static void smp_flush_page_for_dma(unsigned long page)
1617{
1618        xc1((smpfunc_t) local_ops->page_for_dma, page);
1619        local_ops->page_for_dma(page);
1620}
1621
1622static void smp_flush_cache_all(void)
1623{
1624        xc0((smpfunc_t) local_ops->cache_all);
1625        local_ops->cache_all();
1626}
1627
1628static void smp_flush_tlb_all(void)
1629{
1630        xc0((smpfunc_t) local_ops->tlb_all);
1631        local_ops->tlb_all();
1632}
1633
1634static void smp_flush_cache_mm(struct mm_struct *mm)
1635{
1636        if (mm->context != NO_CONTEXT) {
1637                cpumask_t cpu_mask;
1638                cpumask_copy(&cpu_mask, mm_cpumask(mm));
1639                cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1640                if (!cpumask_empty(&cpu_mask))
1641                        xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
1642                local_ops->cache_mm(mm);
1643        }
1644}
1645
1646static void smp_flush_tlb_mm(struct mm_struct *mm)
1647{
1648        if (mm->context != NO_CONTEXT) {
1649                cpumask_t cpu_mask;
1650                cpumask_copy(&cpu_mask, mm_cpumask(mm));
1651                cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1652                if (!cpumask_empty(&cpu_mask)) {
1653                        xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
1654                        if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
1655                                cpumask_copy(mm_cpumask(mm),
1656                                             cpumask_of(smp_processor_id()));
1657                }
1658                local_ops->tlb_mm(mm);
1659        }
1660}
1661
1662static void smp_flush_cache_range(struct vm_area_struct *vma,
1663                                  unsigned long start,
1664                                  unsigned long end)
1665{
1666        struct mm_struct *mm = vma->vm_mm;
1667
1668        if (mm->context != NO_CONTEXT) {
1669                cpumask_t cpu_mask;
1670                cpumask_copy(&cpu_mask, mm_cpumask(mm));
1671                cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1672                if (!cpumask_empty(&cpu_mask))
1673                        xc3((smpfunc_t) local_ops->cache_range,
1674                            (unsigned long) vma, start, end);
1675                local_ops->cache_range(vma, start, end);
1676        }
1677}
1678
1679static void smp_flush_tlb_range(struct vm_area_struct *vma,
1680                                unsigned long start,
1681                                unsigned long end)
1682{
1683        struct mm_struct *mm = vma->vm_mm;
1684
1685        if (mm->context != NO_CONTEXT) {
1686                cpumask_t cpu_mask;
1687                cpumask_copy(&cpu_mask, mm_cpumask(mm));
1688                cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1689                if (!cpumask_empty(&cpu_mask))
1690                        xc3((smpfunc_t) local_ops->tlb_range,
1691                            (unsigned long) vma, start, end);
1692                local_ops->tlb_range(vma, start, end);
1693        }
1694}
1695
1696static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1697{
1698        struct mm_struct *mm = vma->vm_mm;
1699
1700        if (mm->context != NO_CONTEXT) {
1701                cpumask_t cpu_mask;
1702                cpumask_copy(&cpu_mask, mm_cpumask(mm));
1703                cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1704                if (!cpumask_empty(&cpu_mask))
1705                        xc2((smpfunc_t) local_ops->cache_page,
1706                            (unsigned long) vma, page);
1707                local_ops->cache_page(vma, page);
1708        }
1709}
1710
1711static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1712{
1713        struct mm_struct *mm = vma->vm_mm;
1714
1715        if (mm->context != NO_CONTEXT) {
1716                cpumask_t cpu_mask;
1717                cpumask_copy(&cpu_mask, mm_cpumask(mm));
1718                cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1719                if (!cpumask_empty(&cpu_mask))
1720                        xc2((smpfunc_t) local_ops->tlb_page,
1721                            (unsigned long) vma, page);
1722                local_ops->tlb_page(vma, page);
1723        }
1724}
1725
1726static void smp_flush_page_to_ram(unsigned long page)
1727{
1728        /* Current theory is that those who call this are the one's
1729         * who have just dirtied their cache with the pages contents
1730         * in kernel space, therefore we only run this on local cpu.
1731         *
1732         * XXX This experiment failed, research further... -DaveM
1733         */
1734#if 1
1735        xc1((smpfunc_t) local_ops->page_to_ram, page);
1736#endif
1737        local_ops->page_to_ram(page);
1738}
1739
1740static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1741{
1742        cpumask_t cpu_mask;
1743        cpumask_copy(&cpu_mask, mm_cpumask(mm));
1744        cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1745        if (!cpumask_empty(&cpu_mask))
1746                xc2((smpfunc_t) local_ops->sig_insns,
1747                    (unsigned long) mm, insn_addr);
1748        local_ops->sig_insns(mm, insn_addr);
1749}
1750
1751static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
1752        .cache_all      = smp_flush_cache_all,
1753        .cache_mm       = smp_flush_cache_mm,
1754        .cache_page     = smp_flush_cache_page,
1755        .cache_range    = smp_flush_cache_range,
1756        .tlb_all        = smp_flush_tlb_all,
1757        .tlb_mm         = smp_flush_tlb_mm,
1758        .tlb_page       = smp_flush_tlb_page,
1759        .tlb_range      = smp_flush_tlb_range,
1760        .page_to_ram    = smp_flush_page_to_ram,
1761        .sig_insns      = smp_flush_sig_insns,
1762        .page_for_dma   = smp_flush_page_for_dma,
1763};
1764#endif
1765
1766/* Load up routines and constants for sun4m and sun4d mmu */
1767void __init load_mmu(void)
1768{
1769        extern void ld_mmu_iommu(void);
1770        extern void ld_mmu_iounit(void);
1771
1772        /* Functions */
1773        get_srmmu_type();
1774
1775#ifdef CONFIG_SMP
1776        /* El switcheroo... */
1777        local_ops = sparc32_cachetlb_ops;
1778
1779        if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
1780                smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
1781                smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
1782                smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
1783                smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
1784        }
1785
1786        if (poke_srmmu == poke_viking) {
1787                /* Avoid unnecessary cross calls. */
1788                smp_cachetlb_ops.cache_all = local_ops->cache_all;
1789                smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
1790                smp_cachetlb_ops.cache_range = local_ops->cache_range;
1791                smp_cachetlb_ops.cache_page = local_ops->cache_page;
1792
1793                smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
1794                smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
1795                smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
1796        }
1797
1798        /* It really is const after this point. */
1799        sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1800                &smp_cachetlb_ops;
1801#endif
1802
1803        if (sparc_cpu_model == sun4d)
1804                ld_mmu_iounit();
1805        else
1806                ld_mmu_iommu();
1807#ifdef CONFIG_SMP
1808        if (sparc_cpu_model == sun4d)
1809                sun4d_init_smp();
1810        else if (sparc_cpu_model == sparc_leon)
1811                leon_init_smp();
1812        else
1813                sun4m_init_smp();
1814#endif
1815}
1816