linux/arch/mips/mm/c-r4k.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
   7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
   8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
   9 */
  10#include <linux/hardirq.h>
  11#include <linux/init.h>
  12#include <linux/highmem.h>
  13#include <linux/kernel.h>
  14#include <linux/linkage.h>
  15#include <linux/sched.h>
  16#include <linux/smp.h>
  17#include <linux/mm.h>
  18#include <linux/module.h>
  19#include <linux/bitops.h>
  20
  21#include <asm/bcache.h>
  22#include <asm/bootinfo.h>
  23#include <asm/cache.h>
  24#include <asm/cacheops.h>
  25#include <asm/cpu.h>
  26#include <asm/cpu-features.h>
  27#include <asm/io.h>
  28#include <asm/page.h>
  29#include <asm/pgtable.h>
  30#include <asm/r4kcache.h>
  31#include <asm/sections.h>
  32#include <asm/system.h>
  33#include <asm/mmu_context.h>
  34#include <asm/war.h>
  35#include <asm/cacheflush.h> /* for run_uncached() */
  36
  37
  38/*
  39 * Special Variant of smp_call_function for use by cache functions:
  40 *
  41 *  o No return value
  42 *  o collapses to normal function call on UP kernels
  43 *  o collapses to normal function call on systems with a single shared
  44 *    primary cache.
  45 *  o doesn't disable interrupts on the local CPU
  46 */
  47static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
  48{
  49        preempt_disable();
  50
  51#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
  52        smp_call_function(func, info, 1);
  53#endif
  54        func(info);
  55        preempt_enable();
  56}
  57
  58#if defined(CONFIG_MIPS_CMP)
  59#define cpu_has_safe_index_cacheops 0
  60#else
  61#define cpu_has_safe_index_cacheops 1
  62#endif
  63
  64/*
  65 * Must die.
  66 */
  67static unsigned long icache_size __read_mostly;
  68static unsigned long dcache_size __read_mostly;
  69static unsigned long scache_size __read_mostly;
  70
  71/*
  72 * Dummy cache handling routines for machines without boardcaches
  73 */
  74static void cache_noop(void) {}
  75
  76static struct bcache_ops no_sc_ops = {
  77        .bc_enable = (void *)cache_noop,
  78        .bc_disable = (void *)cache_noop,
  79        .bc_wback_inv = (void *)cache_noop,
  80        .bc_inv = (void *)cache_noop
  81};
  82
  83struct bcache_ops *bcops = &no_sc_ops;
  84
  85#define cpu_is_r4600_v1_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002010)
  86#define cpu_is_r4600_v2_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002020)
  87
  88#define R4600_HIT_CACHEOP_WAR_IMPL                                      \
  89do {                                                                    \
  90        if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())            \
  91                *(volatile unsigned long *)CKSEG1;                      \
  92        if (R4600_V1_HIT_CACHEOP_WAR)                                   \
  93                __asm__ __volatile__("nop;nop;nop;nop");                \
  94} while (0)
  95
  96static void (*r4k_blast_dcache_page)(unsigned long addr);
  97
  98static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
  99{
 100        R4600_HIT_CACHEOP_WAR_IMPL;
 101        blast_dcache32_page(addr);
 102}
 103
 104static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
 105{
 106        R4600_HIT_CACHEOP_WAR_IMPL;
 107        blast_dcache64_page(addr);
 108}
 109
 110static void __cpuinit r4k_blast_dcache_page_setup(void)
 111{
 112        unsigned long  dc_lsize = cpu_dcache_line_size();
 113
 114        if (dc_lsize == 0)
 115                r4k_blast_dcache_page = (void *)cache_noop;
 116        else if (dc_lsize == 16)
 117                r4k_blast_dcache_page = blast_dcache16_page;
 118        else if (dc_lsize == 32)
 119                r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
 120        else if (dc_lsize == 64)
 121                r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
 122}
 123
 124static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
 125
 126static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
 127{
 128        unsigned long dc_lsize = cpu_dcache_line_size();
 129
 130        if (dc_lsize == 0)
 131                r4k_blast_dcache_page_indexed = (void *)cache_noop;
 132        else if (dc_lsize == 16)
 133                r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
 134        else if (dc_lsize == 32)
 135                r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
 136        else if (dc_lsize == 64)
 137                r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
 138}
 139
 140static void (* r4k_blast_dcache)(void);
 141
 142static void __cpuinit r4k_blast_dcache_setup(void)
 143{
 144        unsigned long dc_lsize = cpu_dcache_line_size();
 145
 146        if (dc_lsize == 0)
 147                r4k_blast_dcache = (void *)cache_noop;
 148        else if (dc_lsize == 16)
 149                r4k_blast_dcache = blast_dcache16;
 150        else if (dc_lsize == 32)
 151                r4k_blast_dcache = blast_dcache32;
 152        else if (dc_lsize == 64)
 153                r4k_blast_dcache = blast_dcache64;
 154}
 155
 156/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
 157#define JUMP_TO_ALIGN(order) \
 158        __asm__ __volatile__( \
 159                "b\t1f\n\t" \
 160                ".align\t" #order "\n\t" \
 161                "1:\n\t" \
 162                )
 163#define CACHE32_UNROLL32_ALIGN  JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
 164#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
 165
 166static inline void blast_r4600_v1_icache32(void)
 167{
 168        unsigned long flags;
 169
 170        local_irq_save(flags);
 171        blast_icache32();
 172        local_irq_restore(flags);
 173}
 174
 175static inline void tx49_blast_icache32(void)
 176{
 177        unsigned long start = INDEX_BASE;
 178        unsigned long end = start + current_cpu_data.icache.waysize;
 179        unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
 180        unsigned long ws_end = current_cpu_data.icache.ways <<
 181                               current_cpu_data.icache.waybit;
 182        unsigned long ws, addr;
 183
 184        CACHE32_UNROLL32_ALIGN2;
 185        /* I'm in even chunk.  blast odd chunks */
 186        for (ws = 0; ws < ws_end; ws += ws_inc)
 187                for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
 188                        cache32_unroll32(addr|ws, Index_Invalidate_I);
 189        CACHE32_UNROLL32_ALIGN;
 190        /* I'm in odd chunk.  blast even chunks */
 191        for (ws = 0; ws < ws_end; ws += ws_inc)
 192                for (addr = start; addr < end; addr += 0x400 * 2)
 193                        cache32_unroll32(addr|ws, Index_Invalidate_I);
 194}
 195
 196static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
 197{
 198        unsigned long flags;
 199
 200        local_irq_save(flags);
 201        blast_icache32_page_indexed(page);
 202        local_irq_restore(flags);
 203}
 204
 205static inline void tx49_blast_icache32_page_indexed(unsigned long page)
 206{
 207        unsigned long indexmask = current_cpu_data.icache.waysize - 1;
 208        unsigned long start = INDEX_BASE + (page & indexmask);
 209        unsigned long end = start + PAGE_SIZE;
 210        unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
 211        unsigned long ws_end = current_cpu_data.icache.ways <<
 212                               current_cpu_data.icache.waybit;
 213        unsigned long ws, addr;
 214
 215        CACHE32_UNROLL32_ALIGN2;
 216        /* I'm in even chunk.  blast odd chunks */
 217        for (ws = 0; ws < ws_end; ws += ws_inc)
 218                for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
 219                        cache32_unroll32(addr|ws, Index_Invalidate_I);
 220        CACHE32_UNROLL32_ALIGN;
 221        /* I'm in odd chunk.  blast even chunks */
 222        for (ws = 0; ws < ws_end; ws += ws_inc)
 223                for (addr = start; addr < end; addr += 0x400 * 2)
 224                        cache32_unroll32(addr|ws, Index_Invalidate_I);
 225}
 226
 227static void (* r4k_blast_icache_page)(unsigned long addr);
 228
 229static void __cpuinit r4k_blast_icache_page_setup(void)
 230{
 231        unsigned long ic_lsize = cpu_icache_line_size();
 232
 233        if (ic_lsize == 0)
 234                r4k_blast_icache_page = (void *)cache_noop;
 235        else if (ic_lsize == 16)
 236                r4k_blast_icache_page = blast_icache16_page;
 237        else if (ic_lsize == 32)
 238                r4k_blast_icache_page = blast_icache32_page;
 239        else if (ic_lsize == 64)
 240                r4k_blast_icache_page = blast_icache64_page;
 241}
 242
 243
 244static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
 245
 246static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
 247{
 248        unsigned long ic_lsize = cpu_icache_line_size();
 249
 250        if (ic_lsize == 0)
 251                r4k_blast_icache_page_indexed = (void *)cache_noop;
 252        else if (ic_lsize == 16)
 253                r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
 254        else if (ic_lsize == 32) {
 255                if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
 256                        r4k_blast_icache_page_indexed =
 257                                blast_icache32_r4600_v1_page_indexed;
 258                else if (TX49XX_ICACHE_INDEX_INV_WAR)
 259                        r4k_blast_icache_page_indexed =
 260                                tx49_blast_icache32_page_indexed;
 261                else
 262                        r4k_blast_icache_page_indexed =
 263                                blast_icache32_page_indexed;
 264        } else if (ic_lsize == 64)
 265                r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
 266}
 267
 268static void (* r4k_blast_icache)(void);
 269
 270static void __cpuinit r4k_blast_icache_setup(void)
 271{
 272        unsigned long ic_lsize = cpu_icache_line_size();
 273
 274        if (ic_lsize == 0)
 275                r4k_blast_icache = (void *)cache_noop;
 276        else if (ic_lsize == 16)
 277                r4k_blast_icache = blast_icache16;
 278        else if (ic_lsize == 32) {
 279                if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
 280                        r4k_blast_icache = blast_r4600_v1_icache32;
 281                else if (TX49XX_ICACHE_INDEX_INV_WAR)
 282                        r4k_blast_icache = tx49_blast_icache32;
 283                else
 284                        r4k_blast_icache = blast_icache32;
 285        } else if (ic_lsize == 64)
 286                r4k_blast_icache = blast_icache64;
 287}
 288
 289static void (* r4k_blast_scache_page)(unsigned long addr);
 290
 291static void __cpuinit r4k_blast_scache_page_setup(void)
 292{
 293        unsigned long sc_lsize = cpu_scache_line_size();
 294
 295        if (scache_size == 0)
 296                r4k_blast_scache_page = (void *)cache_noop;
 297        else if (sc_lsize == 16)
 298                r4k_blast_scache_page = blast_scache16_page;
 299        else if (sc_lsize == 32)
 300                r4k_blast_scache_page = blast_scache32_page;
 301        else if (sc_lsize == 64)
 302                r4k_blast_scache_page = blast_scache64_page;
 303        else if (sc_lsize == 128)
 304                r4k_blast_scache_page = blast_scache128_page;
 305}
 306
 307static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
 308
 309static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
 310{
 311        unsigned long sc_lsize = cpu_scache_line_size();
 312
 313        if (scache_size == 0)
 314                r4k_blast_scache_page_indexed = (void *)cache_noop;
 315        else if (sc_lsize == 16)
 316                r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
 317        else if (sc_lsize == 32)
 318                r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
 319        else if (sc_lsize == 64)
 320                r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
 321        else if (sc_lsize == 128)
 322                r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
 323}
 324
 325static void (* r4k_blast_scache)(void);
 326
 327static void __cpuinit r4k_blast_scache_setup(void)
 328{
 329        unsigned long sc_lsize = cpu_scache_line_size();
 330
 331        if (scache_size == 0)
 332                r4k_blast_scache = (void *)cache_noop;
 333        else if (sc_lsize == 16)
 334                r4k_blast_scache = blast_scache16;
 335        else if (sc_lsize == 32)
 336                r4k_blast_scache = blast_scache32;
 337        else if (sc_lsize == 64)
 338                r4k_blast_scache = blast_scache64;
 339        else if (sc_lsize == 128)
 340                r4k_blast_scache = blast_scache128;
 341}
 342
 343static inline void local_r4k___flush_cache_all(void * args)
 344{
 345#if defined(CONFIG_CPU_LOONGSON2)
 346        r4k_blast_scache();
 347        return;
 348#endif
 349        r4k_blast_dcache();
 350        r4k_blast_icache();
 351
 352        switch (current_cpu_type()) {
 353        case CPU_R4000SC:
 354        case CPU_R4000MC:
 355        case CPU_R4400SC:
 356        case CPU_R4400MC:
 357        case CPU_R10000:
 358        case CPU_R12000:
 359        case CPU_R14000:
 360                r4k_blast_scache();
 361        }
 362}
 363
 364static void r4k___flush_cache_all(void)
 365{
 366        r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
 367}
 368
 369static inline int has_valid_asid(const struct mm_struct *mm)
 370{
 371#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
 372        int i;
 373
 374        for_each_online_cpu(i)
 375                if (cpu_context(i, mm))
 376                        return 1;
 377
 378        return 0;
 379#else
 380        return cpu_context(smp_processor_id(), mm);
 381#endif
 382}
 383
 384static void r4k__flush_cache_vmap(void)
 385{
 386        r4k_blast_dcache();
 387}
 388
 389static void r4k__flush_cache_vunmap(void)
 390{
 391        r4k_blast_dcache();
 392}
 393
 394static inline void local_r4k_flush_cache_range(void * args)
 395{
 396        struct vm_area_struct *vma = args;
 397        int exec = vma->vm_flags & VM_EXEC;
 398
 399        if (!(has_valid_asid(vma->vm_mm)))
 400                return;
 401
 402        r4k_blast_dcache();
 403        if (exec)
 404                r4k_blast_icache();
 405}
 406
 407static void r4k_flush_cache_range(struct vm_area_struct *vma,
 408        unsigned long start, unsigned long end)
 409{
 410        int exec = vma->vm_flags & VM_EXEC;
 411
 412        if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
 413                r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
 414}
 415
 416static inline void local_r4k_flush_cache_mm(void * args)
 417{
 418        struct mm_struct *mm = args;
 419
 420        if (!has_valid_asid(mm))
 421                return;
 422
 423        /*
 424         * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
 425         * only flush the primary caches but R10000 and R12000 behave sane ...
 426         * R4000SC and R4400SC indexed S-cache ops also invalidate primary
 427         * caches, so we can bail out early.
 428         */
 429        if (current_cpu_type() == CPU_R4000SC ||
 430            current_cpu_type() == CPU_R4000MC ||
 431            current_cpu_type() == CPU_R4400SC ||
 432            current_cpu_type() == CPU_R4400MC) {
 433                r4k_blast_scache();
 434                return;
 435        }
 436
 437        r4k_blast_dcache();
 438}
 439
 440static void r4k_flush_cache_mm(struct mm_struct *mm)
 441{
 442        if (!cpu_has_dc_aliases)
 443                return;
 444
 445        r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
 446}
 447
 448struct flush_cache_page_args {
 449        struct vm_area_struct *vma;
 450        unsigned long addr;
 451        unsigned long pfn;
 452};
 453
 454static inline void local_r4k_flush_cache_page(void *args)
 455{
 456        struct flush_cache_page_args *fcp_args = args;
 457        struct vm_area_struct *vma = fcp_args->vma;
 458        unsigned long addr = fcp_args->addr;
 459        struct page *page = pfn_to_page(fcp_args->pfn);
 460        int exec = vma->vm_flags & VM_EXEC;
 461        struct mm_struct *mm = vma->vm_mm;
 462        int map_coherent = 0;
 463        pgd_t *pgdp;
 464        pud_t *pudp;
 465        pmd_t *pmdp;
 466        pte_t *ptep;
 467        void *vaddr;
 468
 469        /*
 470         * If ownes no valid ASID yet, cannot possibly have gotten
 471         * this page into the cache.
 472         */
 473        if (!has_valid_asid(mm))
 474                return;
 475
 476        addr &= PAGE_MASK;
 477        pgdp = pgd_offset(mm, addr);
 478        pudp = pud_offset(pgdp, addr);
 479        pmdp = pmd_offset(pudp, addr);
 480        ptep = pte_offset(pmdp, addr);
 481
 482        /*
 483         * If the page isn't marked valid, the page cannot possibly be
 484         * in the cache.
 485         */
 486        if (!(pte_present(*ptep)))
 487                return;
 488
 489        if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
 490                vaddr = NULL;
 491        else {
 492                /*
 493                 * Use kmap_coherent or kmap_atomic to do flushes for
 494                 * another ASID than the current one.
 495                 */
 496                map_coherent = (cpu_has_dc_aliases &&
 497                                page_mapped(page) && !Page_dcache_dirty(page));
 498                if (map_coherent)
 499                        vaddr = kmap_coherent(page, addr);
 500                else
 501                        vaddr = kmap_atomic(page, KM_USER0);
 502                addr = (unsigned long)vaddr;
 503        }
 504
 505        if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
 506                r4k_blast_dcache_page(addr);
 507                if (exec && !cpu_icache_snoops_remote_store)
 508                        r4k_blast_scache_page(addr);
 509        }
 510        if (exec) {
 511                if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
 512                        int cpu = smp_processor_id();
 513
 514                        if (cpu_context(cpu, mm) != 0)
 515                                drop_mmu_context(mm, cpu);
 516                } else
 517                        r4k_blast_icache_page(addr);
 518        }
 519
 520        if (vaddr) {
 521                if (map_coherent)
 522                        kunmap_coherent();
 523                else
 524                        kunmap_atomic(vaddr, KM_USER0);
 525        }
 526}
 527
 528static void r4k_flush_cache_page(struct vm_area_struct *vma,
 529        unsigned long addr, unsigned long pfn)
 530{
 531        struct flush_cache_page_args args;
 532
 533        args.vma = vma;
 534        args.addr = addr;
 535        args.pfn = pfn;
 536
 537        r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
 538}
 539
 540static inline void local_r4k_flush_data_cache_page(void * addr)
 541{
 542        r4k_blast_dcache_page((unsigned long) addr);
 543}
 544
 545static void r4k_flush_data_cache_page(unsigned long addr)
 546{
 547        if (in_atomic())
 548                local_r4k_flush_data_cache_page((void *)addr);
 549        else
 550                r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
 551}
 552
 553struct flush_icache_range_args {
 554        unsigned long start;
 555        unsigned long end;
 556};
 557
 558static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
 559{
 560        if (!cpu_has_ic_fills_f_dc) {
 561                if (end - start >= dcache_size) {
 562                        r4k_blast_dcache();
 563                } else {
 564                        R4600_HIT_CACHEOP_WAR_IMPL;
 565                        protected_blast_dcache_range(start, end);
 566                }
 567        }
 568
 569        if (end - start > icache_size)
 570                r4k_blast_icache();
 571        else
 572                protected_blast_icache_range(start, end);
 573}
 574
 575static inline void local_r4k_flush_icache_range_ipi(void *args)
 576{
 577        struct flush_icache_range_args *fir_args = args;
 578        unsigned long start = fir_args->start;
 579        unsigned long end = fir_args->end;
 580
 581        local_r4k_flush_icache_range(start, end);
 582}
 583
 584static void r4k_flush_icache_range(unsigned long start, unsigned long end)
 585{
 586        struct flush_icache_range_args args;
 587
 588        args.start = start;
 589        args.end = end;
 590
 591        r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
 592        instruction_hazard();
 593}
 594
 595#ifdef CONFIG_DMA_NONCOHERENT
 596
 597static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
 598{
 599        /* Catch bad driver code */
 600        BUG_ON(size == 0);
 601
 602        if (cpu_has_inclusive_pcaches) {
 603                if (size >= scache_size)
 604                        r4k_blast_scache();
 605                else
 606                        blast_scache_range(addr, addr + size);
 607                return;
 608        }
 609
 610        /*
 611         * Either no secondary cache or the available caches don't have the
 612         * subset property so we have to flush the primary caches
 613         * explicitly
 614         */
 615        if (cpu_has_safe_index_cacheops && size >= dcache_size) {
 616                r4k_blast_dcache();
 617        } else {
 618                R4600_HIT_CACHEOP_WAR_IMPL;
 619                blast_dcache_range(addr, addr + size);
 620        }
 621
 622        bc_wback_inv(addr, size);
 623}
 624
 625static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
 626{
 627        /* Catch bad driver code */
 628        BUG_ON(size == 0);
 629
 630        if (cpu_has_inclusive_pcaches) {
 631                if (size >= scache_size)
 632                        r4k_blast_scache();
 633                else {
 634                        unsigned long lsize = cpu_scache_line_size();
 635                        unsigned long almask = ~(lsize - 1);
 636
 637                        /*
 638                         * There is no clearly documented alignment requirement
 639                         * for the cache instruction on MIPS processors and
 640                         * some processors, among them the RM5200 and RM7000
 641                         * QED processors will throw an address error for cache
 642                         * hit ops with insufficient alignment.  Solved by
 643                         * aligning the address to cache line size.
 644                         */
 645                        cache_op(Hit_Writeback_Inv_SD, addr & almask);
 646                        cache_op(Hit_Writeback_Inv_SD,
 647                                 (addr + size - 1) & almask);
 648                        blast_inv_scache_range(addr, addr + size);
 649                }
 650                return;
 651        }
 652
 653        if (cpu_has_safe_index_cacheops && size >= dcache_size) {
 654                r4k_blast_dcache();
 655        } else {
 656                unsigned long lsize = cpu_dcache_line_size();
 657                unsigned long almask = ~(lsize - 1);
 658
 659                R4600_HIT_CACHEOP_WAR_IMPL;
 660                cache_op(Hit_Writeback_Inv_D, addr & almask);
 661                cache_op(Hit_Writeback_Inv_D, (addr + size - 1)  & almask);
 662                blast_inv_dcache_range(addr, addr + size);
 663        }
 664
 665        bc_inv(addr, size);
 666}
 667#endif /* CONFIG_DMA_NONCOHERENT */
 668
 669/*
 670 * While we're protected against bad userland addresses we don't care
 671 * very much about what happens in that case.  Usually a segmentation
 672 * fault will dump the process later on anyway ...
 673 */
 674static void local_r4k_flush_cache_sigtramp(void * arg)
 675{
 676        unsigned long ic_lsize = cpu_icache_line_size();
 677        unsigned long dc_lsize = cpu_dcache_line_size();
 678        unsigned long sc_lsize = cpu_scache_line_size();
 679        unsigned long addr = (unsigned long) arg;
 680
 681        R4600_HIT_CACHEOP_WAR_IMPL;
 682        if (dc_lsize)
 683                protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
 684        if (!cpu_icache_snoops_remote_store && scache_size)
 685                protected_writeback_scache_line(addr & ~(sc_lsize - 1));
 686        if (ic_lsize)
 687                protected_flush_icache_line(addr & ~(ic_lsize - 1));
 688        if (MIPS4K_ICACHE_REFILL_WAR) {
 689                __asm__ __volatile__ (
 690                        ".set push\n\t"
 691                        ".set noat\n\t"
 692                        ".set mips3\n\t"
 693#ifdef CONFIG_32BIT
 694                        "la     $at,1f\n\t"
 695#endif
 696#ifdef CONFIG_64BIT
 697                        "dla    $at,1f\n\t"
 698#endif
 699                        "cache  %0,($at)\n\t"
 700                        "nop; nop; nop\n"
 701                        "1:\n\t"
 702                        ".set pop"
 703                        :
 704                        : "i" (Hit_Invalidate_I));
 705        }
 706        if (MIPS_CACHE_SYNC_WAR)
 707                __asm__ __volatile__ ("sync");
 708}
 709
 710static void r4k_flush_cache_sigtramp(unsigned long addr)
 711{
 712        r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
 713}
 714
 715static void r4k_flush_icache_all(void)
 716{
 717        if (cpu_has_vtag_icache)
 718                r4k_blast_icache();
 719}
 720
 721static inline void rm7k_erratum31(void)
 722{
 723        const unsigned long ic_lsize = 32;
 724        unsigned long addr;
 725
 726        /* RM7000 erratum #31. The icache is screwed at startup. */
 727        write_c0_taglo(0);
 728        write_c0_taghi(0);
 729
 730        for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
 731                __asm__ __volatile__ (
 732                        ".set push\n\t"
 733                        ".set noreorder\n\t"
 734                        ".set mips3\n\t"
 735                        "cache\t%1, 0(%0)\n\t"
 736                        "cache\t%1, 0x1000(%0)\n\t"
 737                        "cache\t%1, 0x2000(%0)\n\t"
 738                        "cache\t%1, 0x3000(%0)\n\t"
 739                        "cache\t%2, 0(%0)\n\t"
 740                        "cache\t%2, 0x1000(%0)\n\t"
 741                        "cache\t%2, 0x2000(%0)\n\t"
 742                        "cache\t%2, 0x3000(%0)\n\t"
 743                        "cache\t%1, 0(%0)\n\t"
 744                        "cache\t%1, 0x1000(%0)\n\t"
 745                        "cache\t%1, 0x2000(%0)\n\t"
 746                        "cache\t%1, 0x3000(%0)\n\t"
 747                        ".set pop\n"
 748                        :
 749                        : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
 750        }
 751}
 752
 753static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
 754        "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
 755};
 756
 757static void __cpuinit probe_pcache(void)
 758{
 759        struct cpuinfo_mips *c = &current_cpu_data;
 760        unsigned int config = read_c0_config();
 761        unsigned int prid = read_c0_prid();
 762        unsigned long config1;
 763        unsigned int lsize;
 764
 765        switch (c->cputype) {
 766        case CPU_R4600:                 /* QED style two way caches? */
 767        case CPU_R4700:
 768        case CPU_R5000:
 769        case CPU_NEVADA:
 770                icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
 771                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
 772                c->icache.ways = 2;
 773                c->icache.waybit = __ffs(icache_size/2);
 774
 775                dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
 776                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
 777                c->dcache.ways = 2;
 778                c->dcache.waybit= __ffs(dcache_size/2);
 779
 780                c->options |= MIPS_CPU_CACHE_CDEX_P;
 781                break;
 782
 783        case CPU_R5432:
 784        case CPU_R5500:
 785                icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
 786                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
 787                c->icache.ways = 2;
 788                c->icache.waybit= 0;
 789
 790                dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
 791                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
 792                c->dcache.ways = 2;
 793                c->dcache.waybit = 0;
 794
 795                c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
 796                break;
 797
 798        case CPU_TX49XX:
 799                icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
 800                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
 801                c->icache.ways = 4;
 802                c->icache.waybit= 0;
 803
 804                dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
 805                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
 806                c->dcache.ways = 4;
 807                c->dcache.waybit = 0;
 808
 809                c->options |= MIPS_CPU_CACHE_CDEX_P;
 810                c->options |= MIPS_CPU_PREFETCH;
 811                break;
 812
 813        case CPU_R4000PC:
 814        case CPU_R4000SC:
 815        case CPU_R4000MC:
 816        case CPU_R4400PC:
 817        case CPU_R4400SC:
 818        case CPU_R4400MC:
 819        case CPU_R4300:
 820                icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
 821                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
 822                c->icache.ways = 1;
 823                c->icache.waybit = 0;   /* doesn't matter */
 824
 825                dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
 826                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
 827                c->dcache.ways = 1;
 828                c->dcache.waybit = 0;   /* does not matter */
 829
 830                c->options |= MIPS_CPU_CACHE_CDEX_P;
 831                break;
 832
 833        case CPU_R10000:
 834        case CPU_R12000:
 835        case CPU_R14000:
 836                icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
 837                c->icache.linesz = 64;
 838                c->icache.ways = 2;
 839                c->icache.waybit = 0;
 840
 841                dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
 842                c->dcache.linesz = 32;
 843                c->dcache.ways = 2;
 844                c->dcache.waybit = 0;
 845
 846                c->options |= MIPS_CPU_PREFETCH;
 847                break;
 848
 849        case CPU_VR4133:
 850                write_c0_config(config & ~VR41_CONF_P4K);
 851        case CPU_VR4131:
 852                /* Workaround for cache instruction bug of VR4131 */
 853                if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
 854                    c->processor_id == 0x0c82U) {
 855                        config |= 0x00400000U;
 856                        if (c->processor_id == 0x0c80U)
 857                                config |= VR41_CONF_BP;
 858                        write_c0_config(config);
 859                } else
 860                        c->options |= MIPS_CPU_CACHE_CDEX_P;
 861
 862                icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
 863                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
 864                c->icache.ways = 2;
 865                c->icache.waybit = __ffs(icache_size/2);
 866
 867                dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
 868                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
 869                c->dcache.ways = 2;
 870                c->dcache.waybit = __ffs(dcache_size/2);
 871                break;
 872
 873        case CPU_VR41XX:
 874        case CPU_VR4111:
 875        case CPU_VR4121:
 876        case CPU_VR4122:
 877        case CPU_VR4181:
 878        case CPU_VR4181A:
 879                icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
 880                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
 881                c->icache.ways = 1;
 882                c->icache.waybit = 0;   /* doesn't matter */
 883
 884                dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
 885                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
 886                c->dcache.ways = 1;
 887                c->dcache.waybit = 0;   /* does not matter */
 888
 889                c->options |= MIPS_CPU_CACHE_CDEX_P;
 890                break;
 891
 892        case CPU_RM7000:
 893                rm7k_erratum31();
 894
 895        case CPU_RM9000:
 896                icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
 897                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
 898                c->icache.ways = 4;
 899                c->icache.waybit = __ffs(icache_size / c->icache.ways);
 900
 901                dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
 902                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
 903                c->dcache.ways = 4;
 904                c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
 905
 906#if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
 907                c->options |= MIPS_CPU_CACHE_CDEX_P;
 908#endif
 909                c->options |= MIPS_CPU_PREFETCH;
 910                break;
 911
 912        case CPU_LOONGSON2:
 913                icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
 914                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
 915                if (prid & 0x3)
 916                        c->icache.ways = 4;
 917                else
 918                        c->icache.ways = 2;
 919                c->icache.waybit = 0;
 920
 921                dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
 922                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
 923                if (prid & 0x3)
 924                        c->dcache.ways = 4;
 925                else
 926                        c->dcache.ways = 2;
 927                c->dcache.waybit = 0;
 928                break;
 929
 930        default:
 931                if (!(config & MIPS_CONF_M))
 932                        panic("Don't know how to probe P-caches on this cpu.");
 933
 934                /*
 935                 * So we seem to be a MIPS32 or MIPS64 CPU
 936                 * So let's probe the I-cache ...
 937                 */
 938                config1 = read_c0_config1();
 939
 940                if ((lsize = ((config1 >> 19) & 7)))
 941                        c->icache.linesz = 2 << lsize;
 942                else
 943                        c->icache.linesz = lsize;
 944                c->icache.sets = 64 << ((config1 >> 22) & 7);
 945                c->icache.ways = 1 + ((config1 >> 16) & 7);
 946
 947                icache_size = c->icache.sets *
 948                              c->icache.ways *
 949                              c->icache.linesz;
 950                c->icache.waybit = __ffs(icache_size/c->icache.ways);
 951
 952                if (config & 0x8)               /* VI bit */
 953                        c->icache.flags |= MIPS_CACHE_VTAG;
 954
 955                /*
 956                 * Now probe the MIPS32 / MIPS64 data cache.
 957                 */
 958                c->dcache.flags = 0;
 959
 960                if ((lsize = ((config1 >> 10) & 7)))
 961                        c->dcache.linesz = 2 << lsize;
 962                else
 963                        c->dcache.linesz= lsize;
 964                c->dcache.sets = 64 << ((config1 >> 13) & 7);
 965                c->dcache.ways = 1 + ((config1 >> 7) & 7);
 966
 967                dcache_size = c->dcache.sets *
 968                              c->dcache.ways *
 969                              c->dcache.linesz;
 970                c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
 971
 972                c->options |= MIPS_CPU_PREFETCH;
 973                break;
 974        }
 975
 976        /*
 977         * Processor configuration sanity check for the R4000SC erratum
 978         * #5.  With page sizes larger than 32kB there is no possibility
 979         * to get a VCE exception anymore so we don't care about this
 980         * misconfiguration.  The case is rather theoretical anyway;
 981         * presumably no vendor is shipping his hardware in the "bad"
 982         * configuration.
 983         */
 984        if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
 985            !(config & CONF_SC) && c->icache.linesz != 16 &&
 986            PAGE_SIZE <= 0x8000)
 987                panic("Improper R4000SC processor configuration detected");
 988
 989        /* compute a couple of other cache variables */
 990        c->icache.waysize = icache_size / c->icache.ways;
 991        c->dcache.waysize = dcache_size / c->dcache.ways;
 992
 993        c->icache.sets = c->icache.linesz ?
 994                icache_size / (c->icache.linesz * c->icache.ways) : 0;
 995        c->dcache.sets = c->dcache.linesz ?
 996                dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
 997
 998        /*
 999         * R10000 and R12000 P-caches are odd in a positive way.  They're 32kB
1000         * 2-way virtually indexed so normally would suffer from aliases.  So
1001         * normally they'd suffer from aliases but magic in the hardware deals
1002         * with that for us so we don't need to take care ourselves.
1003         */
1004        switch (c->cputype) {
1005        case CPU_20KC:
1006        case CPU_25KF:
1007        case CPU_SB1:
1008        case CPU_SB1A:
1009                c->dcache.flags |= MIPS_CACHE_PINDEX;
1010                break;
1011
1012        case CPU_R10000:
1013        case CPU_R12000:
1014        case CPU_R14000:
1015                break;
1016
1017        case CPU_24K:
1018        case CPU_34K:
1019        case CPU_74K:
1020        case CPU_1004K:
1021                if ((read_c0_config7() & (1 << 16))) {
1022                        /* effectively physically indexed dcache,
1023                           thus no virtual aliases. */
1024                        c->dcache.flags |= MIPS_CACHE_PINDEX;
1025                        break;
1026                }
1027        default:
1028                if (c->dcache.waysize > PAGE_SIZE)
1029                        c->dcache.flags |= MIPS_CACHE_ALIASES;
1030        }
1031
1032        switch (c->cputype) {
1033        case CPU_20KC:
1034                /*
1035                 * Some older 20Kc chips doesn't have the 'VI' bit in
1036                 * the config register.
1037                 */
1038                c->icache.flags |= MIPS_CACHE_VTAG;
1039                break;
1040
1041        case CPU_ALCHEMY:
1042                c->icache.flags |= MIPS_CACHE_IC_F_DC;
1043                break;
1044        }
1045
1046#ifdef  CONFIG_CPU_LOONGSON2
1047        /*
1048         * LOONGSON2 has 4 way icache, but when using indexed cache op,
1049         * one op will act on all 4 ways
1050         */
1051        c->icache.ways = 1;
1052#endif
1053
1054        printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1055               icache_size >> 10,
1056               c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
1057               way_string[c->icache.ways], c->icache.linesz);
1058
1059        printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1060               dcache_size >> 10, way_string[c->dcache.ways],
1061               (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1062               (c->dcache.flags & MIPS_CACHE_ALIASES) ?
1063                        "cache aliases" : "no aliases",
1064               c->dcache.linesz);
1065}
1066
1067/*
1068 * If you even _breathe_ on this function, look at the gcc output and make sure
1069 * it does not pop things on and off the stack for the cache sizing loop that
1070 * executes in KSEG1 space or else you will crash and burn badly.  You have
1071 * been warned.
1072 */
1073static int __cpuinit probe_scache(void)
1074{
1075        unsigned long flags, addr, begin, end, pow2;
1076        unsigned int config = read_c0_config();
1077        struct cpuinfo_mips *c = &current_cpu_data;
1078        int tmp;
1079
1080        if (config & CONF_SC)
1081                return 0;
1082
1083        begin = (unsigned long) &_stext;
1084        begin &= ~((4 * 1024 * 1024) - 1);
1085        end = begin + (4 * 1024 * 1024);
1086
1087        /*
1088         * This is such a bitch, you'd think they would make it easy to do
1089         * this.  Away you daemons of stupidity!
1090         */
1091        local_irq_save(flags);
1092
1093        /* Fill each size-multiple cache line with a valid tag. */
1094        pow2 = (64 * 1024);
1095        for (addr = begin; addr < end; addr = (begin + pow2)) {
1096                unsigned long *p = (unsigned long *) addr;
1097                __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1098                pow2 <<= 1;
1099        }
1100
1101        /* Load first line with zero (therefore invalid) tag. */
1102        write_c0_taglo(0);
1103        write_c0_taghi(0);
1104        __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1105        cache_op(Index_Store_Tag_I, begin);
1106        cache_op(Index_Store_Tag_D, begin);
1107        cache_op(Index_Store_Tag_SD, begin);
1108
1109        /* Now search for the wrap around point. */
1110        pow2 = (128 * 1024);
1111        tmp = 0;
1112        for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1113                cache_op(Index_Load_Tag_SD, addr);
1114                __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1115                if (!read_c0_taglo())
1116                        break;
1117                pow2 <<= 1;
1118        }
1119        local_irq_restore(flags);
1120        addr -= begin;
1121
1122        scache_size = addr;
1123        c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1124        c->scache.ways = 1;
1125        c->dcache.waybit = 0;           /* does not matter */
1126
1127        return 1;
1128}
1129
1130#if defined(CONFIG_CPU_LOONGSON2)
1131static void __init loongson2_sc_init(void)
1132{
1133        struct cpuinfo_mips *c = &current_cpu_data;
1134
1135        scache_size = 512*1024;
1136        c->scache.linesz = 32;
1137        c->scache.ways = 4;
1138        c->scache.waybit = 0;
1139        c->scache.waysize = scache_size / (c->scache.ways);
1140        c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1141        pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1142               scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1143
1144        c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1145}
1146#endif
1147
1148extern int r5k_sc_init(void);
1149extern int rm7k_sc_init(void);
1150extern int mips_sc_init(void);
1151
1152static void __cpuinit setup_scache(void)
1153{
1154        struct cpuinfo_mips *c = &current_cpu_data;
1155        unsigned int config = read_c0_config();
1156        int sc_present = 0;
1157
1158        /*
1159         * Do the probing thing on R4000SC and R4400SC processors.  Other
1160         * processors don't have a S-cache that would be relevant to the
1161         * Linux memory management.
1162         */
1163        switch (c->cputype) {
1164        case CPU_R4000SC:
1165        case CPU_R4000MC:
1166        case CPU_R4400SC:
1167        case CPU_R4400MC:
1168                sc_present = run_uncached(probe_scache);
1169                if (sc_present)
1170                        c->options |= MIPS_CPU_CACHE_CDEX_S;
1171                break;
1172
1173        case CPU_R10000:
1174        case CPU_R12000:
1175        case CPU_R14000:
1176                scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1177                c->scache.linesz = 64 << ((config >> 13) & 1);
1178                c->scache.ways = 2;
1179                c->scache.waybit= 0;
1180                sc_present = 1;
1181                break;
1182
1183        case CPU_R5000:
1184        case CPU_NEVADA:
1185#ifdef CONFIG_R5000_CPU_SCACHE
1186                r5k_sc_init();
1187#endif
1188                return;
1189
1190        case CPU_RM7000:
1191        case CPU_RM9000:
1192#ifdef CONFIG_RM7000_CPU_SCACHE
1193                rm7k_sc_init();
1194#endif
1195                return;
1196
1197#if defined(CONFIG_CPU_LOONGSON2)
1198        case CPU_LOONGSON2:
1199                loongson2_sc_init();
1200                return;
1201#endif
1202
1203        default:
1204                if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
1205                    c->isa_level == MIPS_CPU_ISA_M32R2 ||
1206                    c->isa_level == MIPS_CPU_ISA_M64R1 ||
1207                    c->isa_level == MIPS_CPU_ISA_M64R2) {
1208#ifdef CONFIG_MIPS_CPU_SCACHE
1209                        if (mips_sc_init ()) {
1210                                scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1211                                printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1212                                       scache_size >> 10,
1213                                       way_string[c->scache.ways], c->scache.linesz);
1214                        }
1215#else
1216                        if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1217                                panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1218#endif
1219                        return;
1220                }
1221                sc_present = 0;
1222        }
1223
1224        if (!sc_present)
1225                return;
1226
1227        /* compute a couple of other cache variables */
1228        c->scache.waysize = scache_size / c->scache.ways;
1229
1230        c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1231
1232        printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1233               scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1234
1235        c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1236}
1237
1238void au1x00_fixup_config_od(void)
1239{
1240        /*
1241         * c0_config.od (bit 19) was write only (and read as 0)
1242         * on the early revisions of Alchemy SOCs.  It disables the bus
1243         * transaction overlapping and needs to be set to fix various errata.
1244         */
1245        switch (read_c0_prid()) {
1246        case 0x00030100: /* Au1000 DA */
1247        case 0x00030201: /* Au1000 HA */
1248        case 0x00030202: /* Au1000 HB */
1249        case 0x01030200: /* Au1500 AB */
1250        /*
1251         * Au1100 errata actually keeps silence about this bit, so we set it
1252         * just in case for those revisions that require it to be set according
1253         * to the (now gone) cpu table.
1254         */
1255        case 0x02030200: /* Au1100 AB */
1256        case 0x02030201: /* Au1100 BA */
1257        case 0x02030202: /* Au1100 BC */
1258                set_c0_config(1 << 19);
1259                break;
1260        }
1261}
1262
1263/* CP0 hazard avoidance. */
1264#define NXP_BARRIER()                                                   \
1265         __asm__ __volatile__(                                          \
1266        ".set noreorder\n\t"                                            \
1267        "nop; nop; nop; nop; nop; nop;\n\t"                             \
1268        ".set reorder\n\t")
1269
1270static void nxp_pr4450_fixup_config(void)
1271{
1272        unsigned long config0;
1273
1274        config0 = read_c0_config();
1275
1276        /* clear all three cache coherency fields */
1277        config0 &= ~(0x7 | (7 << 25) | (7 << 28));
1278        config0 |= (((_page_cachable_default >> _CACHE_SHIFT) <<  0) |
1279                    ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
1280                    ((_page_cachable_default >> _CACHE_SHIFT) << 28));
1281        write_c0_config(config0);
1282        NXP_BARRIER();
1283}
1284
1285static int __cpuinitdata cca = -1;
1286
1287static int __init cca_setup(char *str)
1288{
1289        get_option(&str, &cca);
1290
1291        return 1;
1292}
1293
1294__setup("cca=", cca_setup);
1295
1296static void __cpuinit coherency_setup(void)
1297{
1298        if (cca < 0 || cca > 7)
1299                cca = read_c0_config() & CONF_CM_CMASK;
1300        _page_cachable_default = cca << _CACHE_SHIFT;
1301
1302        pr_debug("Using cache attribute %d\n", cca);
1303        change_c0_config(CONF_CM_CMASK, cca);
1304
1305        /*
1306         * c0_status.cu=0 specifies that updates by the sc instruction use
1307         * the coherency mode specified by the TLB; 1 means cachable
1308         * coherent update on write will be used.  Not all processors have
1309         * this bit and; some wire it to zero, others like Toshiba had the
1310         * silly idea of putting something else there ...
1311         */
1312        switch (current_cpu_type()) {
1313        case CPU_R4000PC:
1314        case CPU_R4000SC:
1315        case CPU_R4000MC:
1316        case CPU_R4400PC:
1317        case CPU_R4400SC:
1318        case CPU_R4400MC:
1319                clear_c0_config(CONF_CU);
1320                break;
1321        /*
1322         * We need to catch the early Alchemy SOCs with
1323         * the write-only co_config.od bit and set it back to one on:
1324         * Au1000 rev DA, HA, HB;  Au1100 AB, BA, BC, Au1500 AB
1325         */
1326        case CPU_ALCHEMY:
1327                au1x00_fixup_config_od();
1328                break;
1329
1330        case PRID_IMP_PR4450:
1331                nxp_pr4450_fixup_config();
1332                break;
1333        }
1334}
1335
1336#if defined(CONFIG_DMA_NONCOHERENT)
1337
1338static int __cpuinitdata coherentio;
1339
1340static int __init setcoherentio(char *str)
1341{
1342        coherentio = 1;
1343
1344        return 1;
1345}
1346
1347__setup("coherentio", setcoherentio);
1348#endif
1349
1350void __cpuinit r4k_cache_init(void)
1351{
1352        extern void build_clear_page(void);
1353        extern void build_copy_page(void);
1354        extern char __weak except_vec2_generic;
1355        extern char __weak except_vec2_sb1;
1356        struct cpuinfo_mips *c = &current_cpu_data;
1357
1358        switch (c->cputype) {
1359        case CPU_SB1:
1360        case CPU_SB1A:
1361                set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1362                break;
1363
1364        default:
1365                set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1366                break;
1367        }
1368
1369        probe_pcache();
1370        setup_scache();
1371
1372        r4k_blast_dcache_page_setup();
1373        r4k_blast_dcache_page_indexed_setup();
1374        r4k_blast_dcache_setup();
1375        r4k_blast_icache_page_setup();
1376        r4k_blast_icache_page_indexed_setup();
1377        r4k_blast_icache_setup();
1378        r4k_blast_scache_page_setup();
1379        r4k_blast_scache_page_indexed_setup();
1380        r4k_blast_scache_setup();
1381
1382        /*
1383         * Some MIPS32 and MIPS64 processors have physically indexed caches.
1384         * This code supports virtually indexed processors and will be
1385         * unnecessarily inefficient on physically indexed processors.
1386         */
1387        if (c->dcache.linesz)
1388                shm_align_mask = max_t( unsigned long,
1389                                        c->dcache.sets * c->dcache.linesz - 1,
1390                                        PAGE_SIZE - 1);
1391        else
1392                shm_align_mask = PAGE_SIZE-1;
1393
1394        __flush_cache_vmap      = r4k__flush_cache_vmap;
1395        __flush_cache_vunmap    = r4k__flush_cache_vunmap;
1396
1397        flush_cache_all         = cache_noop;
1398        __flush_cache_all       = r4k___flush_cache_all;
1399        flush_cache_mm          = r4k_flush_cache_mm;
1400        flush_cache_page        = r4k_flush_cache_page;
1401        flush_cache_range       = r4k_flush_cache_range;
1402
1403        flush_cache_sigtramp    = r4k_flush_cache_sigtramp;
1404        flush_icache_all        = r4k_flush_icache_all;
1405        local_flush_data_cache_page     = local_r4k_flush_data_cache_page;
1406        flush_data_cache_page   = r4k_flush_data_cache_page;
1407        flush_icache_range      = r4k_flush_icache_range;
1408        local_flush_icache_range        = local_r4k_flush_icache_range;
1409
1410#if defined(CONFIG_DMA_NONCOHERENT)
1411        if (coherentio) {
1412                _dma_cache_wback_inv    = (void *)cache_noop;
1413                _dma_cache_wback        = (void *)cache_noop;
1414                _dma_cache_inv          = (void *)cache_noop;
1415        } else {
1416                _dma_cache_wback_inv    = r4k_dma_cache_wback_inv;
1417                _dma_cache_wback        = r4k_dma_cache_wback_inv;
1418                _dma_cache_inv          = r4k_dma_cache_inv;
1419        }
1420#endif
1421
1422        build_clear_page();
1423        build_copy_page();
1424#if !defined(CONFIG_MIPS_CMP)
1425        local_r4k___flush_cache_all(NULL);
1426#endif
1427        coherency_setup();
1428}
1429