linux/arch/mips/mm/c-r4k.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
   7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
   8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
   9 */
  10#include <linux/cpu_pm.h>
  11#include <linux/hardirq.h>
  12#include <linux/init.h>
  13#include <linux/highmem.h>
  14#include <linux/kernel.h>
  15#include <linux/linkage.h>
  16#include <linux/preempt.h>
  17#include <linux/sched.h>
  18#include <linux/smp.h>
  19#include <linux/mm.h>
  20#include <linux/export.h>
  21#include <linux/bitops.h>
  22
  23#include <asm/bcache.h>
  24#include <asm/bootinfo.h>
  25#include <asm/cache.h>
  26#include <asm/cacheops.h>
  27#include <asm/cpu.h>
  28#include <asm/cpu-features.h>
  29#include <asm/cpu-type.h>
  30#include <asm/io.h>
  31#include <asm/page.h>
  32#include <asm/pgtable.h>
  33#include <asm/r4kcache.h>
  34#include <asm/sections.h>
  35#include <asm/mmu_context.h>
  36#include <asm/war.h>
  37#include <asm/cacheflush.h> /* for run_uncached() */
  38#include <asm/traps.h>
  39#include <asm/dma-coherence.h>
  40#include <asm/mips-cps.h>
  41
  42/*
  43 * Bits describing what cache ops an SMP callback function may perform.
  44 *
  45 * R4K_HIT   -  Virtual user or kernel address based cache operations. The
  46 *              active_mm must be checked before using user addresses, falling
  47 *              back to kmap.
  48 * R4K_INDEX -  Index based cache operations.
  49 */
  50
  51#define R4K_HIT         BIT(0)
  52#define R4K_INDEX       BIT(1)
  53
  54/**
  55 * r4k_op_needs_ipi() - Decide if a cache op needs to be done on every core.
  56 * @type:       Type of cache operations (R4K_HIT or R4K_INDEX).
  57 *
  58 * Decides whether a cache op needs to be performed on every core in the system.
  59 * This may change depending on the @type of cache operation, as well as the set
  60 * of online CPUs, so preemption should be disabled by the caller to prevent CPU
  61 * hotplug from changing the result.
  62 *
  63 * Returns:     1 if the cache operation @type should be done on every core in
  64 *              the system.
  65 *              0 if the cache operation @type is globalized and only needs to
  66 *              be performed on a simple CPU.
  67 */
  68static inline bool r4k_op_needs_ipi(unsigned int type)
  69{
  70        /* The MIPS Coherence Manager (CM) globalizes address-based cache ops */
  71        if (type == R4K_HIT && mips_cm_present())
  72                return false;
  73
  74        /*
  75         * Hardware doesn't globalize the required cache ops, so SMP calls may
  76         * be needed, but only if there are foreign CPUs (non-siblings with
  77         * separate caches).
  78         */
  79        /* cpu_foreign_map[] undeclared when !CONFIG_SMP */
  80#ifdef CONFIG_SMP
  81        return !cpumask_empty(&cpu_foreign_map[0]);
  82#else
  83        return false;
  84#endif
  85}
  86
  87/*
  88 * Special Variant of smp_call_function for use by cache functions:
  89 *
  90 *  o No return value
  91 *  o collapses to normal function call on UP kernels
  92 *  o collapses to normal function call on systems with a single shared
  93 *    primary cache.
  94 *  o doesn't disable interrupts on the local CPU
  95 */
  96static inline void r4k_on_each_cpu(unsigned int type,
  97                                   void (*func)(void *info), void *info)
  98{
  99        preempt_disable();
 100        if (r4k_op_needs_ipi(type))
 101                smp_call_function_many(&cpu_foreign_map[smp_processor_id()],
 102                                       func, info, 1);
 103        func(info);
 104        preempt_enable();
 105}
 106
 107/*
 108 * Must die.
 109 */
 110static unsigned long icache_size __read_mostly;
 111static unsigned long dcache_size __read_mostly;
 112static unsigned long vcache_size __read_mostly;
 113static unsigned long scache_size __read_mostly;
 114
 115/*
 116 * Dummy cache handling routines for machines without boardcaches
 117 */
 118static void cache_noop(void) {}
 119
 120static struct bcache_ops no_sc_ops = {
 121        .bc_enable = (void *)cache_noop,
 122        .bc_disable = (void *)cache_noop,
 123        .bc_wback_inv = (void *)cache_noop,
 124        .bc_inv = (void *)cache_noop
 125};
 126
 127struct bcache_ops *bcops = &no_sc_ops;
 128
 129#define cpu_is_r4600_v1_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002010)
 130#define cpu_is_r4600_v2_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002020)
 131
 132#define R4600_HIT_CACHEOP_WAR_IMPL                                      \
 133do {                                                                    \
 134        if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())            \
 135                *(volatile unsigned long *)CKSEG1;                      \
 136        if (R4600_V1_HIT_CACHEOP_WAR)                                   \
 137                __asm__ __volatile__("nop;nop;nop;nop");                \
 138} while (0)
 139
 140static void (*r4k_blast_dcache_page)(unsigned long addr);
 141
 142static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
 143{
 144        R4600_HIT_CACHEOP_WAR_IMPL;
 145        blast_dcache32_page(addr);
 146}
 147
 148static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
 149{
 150        blast_dcache64_page(addr);
 151}
 152
 153static inline void r4k_blast_dcache_page_dc128(unsigned long addr)
 154{
 155        blast_dcache128_page(addr);
 156}
 157
 158static void r4k_blast_dcache_page_setup(void)
 159{
 160        unsigned long  dc_lsize = cpu_dcache_line_size();
 161
 162        switch (dc_lsize) {
 163        case 0:
 164                r4k_blast_dcache_page = (void *)cache_noop;
 165                break;
 166        case 16:
 167                r4k_blast_dcache_page = blast_dcache16_page;
 168                break;
 169        case 32:
 170                r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
 171                break;
 172        case 64:
 173                r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
 174                break;
 175        case 128:
 176                r4k_blast_dcache_page = r4k_blast_dcache_page_dc128;
 177                break;
 178        default:
 179                break;
 180        }
 181}
 182
 183#ifndef CONFIG_EVA
 184#define r4k_blast_dcache_user_page  r4k_blast_dcache_page
 185#else
 186
 187static void (*r4k_blast_dcache_user_page)(unsigned long addr);
 188
 189static void r4k_blast_dcache_user_page_setup(void)
 190{
 191        unsigned long  dc_lsize = cpu_dcache_line_size();
 192
 193        if (dc_lsize == 0)
 194                r4k_blast_dcache_user_page = (void *)cache_noop;
 195        else if (dc_lsize == 16)
 196                r4k_blast_dcache_user_page = blast_dcache16_user_page;
 197        else if (dc_lsize == 32)
 198                r4k_blast_dcache_user_page = blast_dcache32_user_page;
 199        else if (dc_lsize == 64)
 200                r4k_blast_dcache_user_page = blast_dcache64_user_page;
 201}
 202
 203#endif
 204
 205static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
 206
 207static void r4k_blast_dcache_page_indexed_setup(void)
 208{
 209        unsigned long dc_lsize = cpu_dcache_line_size();
 210
 211        if (dc_lsize == 0)
 212                r4k_blast_dcache_page_indexed = (void *)cache_noop;
 213        else if (dc_lsize == 16)
 214                r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
 215        else if (dc_lsize == 32)
 216                r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
 217        else if (dc_lsize == 64)
 218                r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
 219        else if (dc_lsize == 128)
 220                r4k_blast_dcache_page_indexed = blast_dcache128_page_indexed;
 221}
 222
 223void (* r4k_blast_dcache)(void);
 224EXPORT_SYMBOL(r4k_blast_dcache);
 225
 226static void r4k_blast_dcache_setup(void)
 227{
 228        unsigned long dc_lsize = cpu_dcache_line_size();
 229
 230        if (dc_lsize == 0)
 231                r4k_blast_dcache = (void *)cache_noop;
 232        else if (dc_lsize == 16)
 233                r4k_blast_dcache = blast_dcache16;
 234        else if (dc_lsize == 32)
 235                r4k_blast_dcache = blast_dcache32;
 236        else if (dc_lsize == 64)
 237                r4k_blast_dcache = blast_dcache64;
 238        else if (dc_lsize == 128)
 239                r4k_blast_dcache = blast_dcache128;
 240}
 241
 242/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
 243#define JUMP_TO_ALIGN(order) \
 244        __asm__ __volatile__( \
 245                "b\t1f\n\t" \
 246                ".align\t" #order "\n\t" \
 247                "1:\n\t" \
 248                )
 249#define CACHE32_UNROLL32_ALIGN  JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
 250#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
 251
 252static inline void blast_r4600_v1_icache32(void)
 253{
 254        unsigned long flags;
 255
 256        local_irq_save(flags);
 257        blast_icache32();
 258        local_irq_restore(flags);
 259}
 260
 261static inline void tx49_blast_icache32(void)
 262{
 263        unsigned long start = INDEX_BASE;
 264        unsigned long end = start + current_cpu_data.icache.waysize;
 265        unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
 266        unsigned long ws_end = current_cpu_data.icache.ways <<
 267                               current_cpu_data.icache.waybit;
 268        unsigned long ws, addr;
 269
 270        CACHE32_UNROLL32_ALIGN2;
 271        /* I'm in even chunk.  blast odd chunks */
 272        for (ws = 0; ws < ws_end; ws += ws_inc)
 273                for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
 274                        cache32_unroll32(addr|ws, Index_Invalidate_I);
 275        CACHE32_UNROLL32_ALIGN;
 276        /* I'm in odd chunk.  blast even chunks */
 277        for (ws = 0; ws < ws_end; ws += ws_inc)
 278                for (addr = start; addr < end; addr += 0x400 * 2)
 279                        cache32_unroll32(addr|ws, Index_Invalidate_I);
 280}
 281
 282static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
 283{
 284        unsigned long flags;
 285
 286        local_irq_save(flags);
 287        blast_icache32_page_indexed(page);
 288        local_irq_restore(flags);
 289}
 290
 291static inline void tx49_blast_icache32_page_indexed(unsigned long page)
 292{
 293        unsigned long indexmask = current_cpu_data.icache.waysize - 1;
 294        unsigned long start = INDEX_BASE + (page & indexmask);
 295        unsigned long end = start + PAGE_SIZE;
 296        unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
 297        unsigned long ws_end = current_cpu_data.icache.ways <<
 298                               current_cpu_data.icache.waybit;
 299        unsigned long ws, addr;
 300
 301        CACHE32_UNROLL32_ALIGN2;
 302        /* I'm in even chunk.  blast odd chunks */
 303        for (ws = 0; ws < ws_end; ws += ws_inc)
 304                for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
 305                        cache32_unroll32(addr|ws, Index_Invalidate_I);
 306        CACHE32_UNROLL32_ALIGN;
 307        /* I'm in odd chunk.  blast even chunks */
 308        for (ws = 0; ws < ws_end; ws += ws_inc)
 309                for (addr = start; addr < end; addr += 0x400 * 2)
 310                        cache32_unroll32(addr|ws, Index_Invalidate_I);
 311}
 312
 313static void (* r4k_blast_icache_page)(unsigned long addr);
 314
 315static void r4k_blast_icache_page_setup(void)
 316{
 317        unsigned long ic_lsize = cpu_icache_line_size();
 318
 319        if (ic_lsize == 0)
 320                r4k_blast_icache_page = (void *)cache_noop;
 321        else if (ic_lsize == 16)
 322                r4k_blast_icache_page = blast_icache16_page;
 323        else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2)
 324                r4k_blast_icache_page = loongson2_blast_icache32_page;
 325        else if (ic_lsize == 32)
 326                r4k_blast_icache_page = blast_icache32_page;
 327        else if (ic_lsize == 64)
 328                r4k_blast_icache_page = blast_icache64_page;
 329        else if (ic_lsize == 128)
 330                r4k_blast_icache_page = blast_icache128_page;
 331}
 332
 333#ifndef CONFIG_EVA
 334#define r4k_blast_icache_user_page  r4k_blast_icache_page
 335#else
 336
 337static void (*r4k_blast_icache_user_page)(unsigned long addr);
 338
 339static void r4k_blast_icache_user_page_setup(void)
 340{
 341        unsigned long ic_lsize = cpu_icache_line_size();
 342
 343        if (ic_lsize == 0)
 344                r4k_blast_icache_user_page = (void *)cache_noop;
 345        else if (ic_lsize == 16)
 346                r4k_blast_icache_user_page = blast_icache16_user_page;
 347        else if (ic_lsize == 32)
 348                r4k_blast_icache_user_page = blast_icache32_user_page;
 349        else if (ic_lsize == 64)
 350                r4k_blast_icache_user_page = blast_icache64_user_page;
 351}
 352
 353#endif
 354
 355static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
 356
 357static void r4k_blast_icache_page_indexed_setup(void)
 358{
 359        unsigned long ic_lsize = cpu_icache_line_size();
 360
 361        if (ic_lsize == 0)
 362                r4k_blast_icache_page_indexed = (void *)cache_noop;
 363        else if (ic_lsize == 16)
 364                r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
 365        else if (ic_lsize == 32) {
 366                if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
 367                        r4k_blast_icache_page_indexed =
 368                                blast_icache32_r4600_v1_page_indexed;
 369                else if (TX49XX_ICACHE_INDEX_INV_WAR)
 370                        r4k_blast_icache_page_indexed =
 371                                tx49_blast_icache32_page_indexed;
 372                else if (current_cpu_type() == CPU_LOONGSON2)
 373                        r4k_blast_icache_page_indexed =
 374                                loongson2_blast_icache32_page_indexed;
 375                else
 376                        r4k_blast_icache_page_indexed =
 377                                blast_icache32_page_indexed;
 378        } else if (ic_lsize == 64)
 379                r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
 380}
 381
 382void (* r4k_blast_icache)(void);
 383EXPORT_SYMBOL(r4k_blast_icache);
 384
 385static void r4k_blast_icache_setup(void)
 386{
 387        unsigned long ic_lsize = cpu_icache_line_size();
 388
 389        if (ic_lsize == 0)
 390                r4k_blast_icache = (void *)cache_noop;
 391        else if (ic_lsize == 16)
 392                r4k_blast_icache = blast_icache16;
 393        else if (ic_lsize == 32) {
 394                if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
 395                        r4k_blast_icache = blast_r4600_v1_icache32;
 396                else if (TX49XX_ICACHE_INDEX_INV_WAR)
 397                        r4k_blast_icache = tx49_blast_icache32;
 398                else if (current_cpu_type() == CPU_LOONGSON2)
 399                        r4k_blast_icache = loongson2_blast_icache32;
 400                else
 401                        r4k_blast_icache = blast_icache32;
 402        } else if (ic_lsize == 64)
 403                r4k_blast_icache = blast_icache64;
 404        else if (ic_lsize == 128)
 405                r4k_blast_icache = blast_icache128;
 406}
 407
 408static void (* r4k_blast_scache_page)(unsigned long addr);
 409
 410static void r4k_blast_scache_page_setup(void)
 411{
 412        unsigned long sc_lsize = cpu_scache_line_size();
 413
 414        if (scache_size == 0)
 415                r4k_blast_scache_page = (void *)cache_noop;
 416        else if (sc_lsize == 16)
 417                r4k_blast_scache_page = blast_scache16_page;
 418        else if (sc_lsize == 32)
 419                r4k_blast_scache_page = blast_scache32_page;
 420        else if (sc_lsize == 64)
 421                r4k_blast_scache_page = blast_scache64_page;
 422        else if (sc_lsize == 128)
 423                r4k_blast_scache_page = blast_scache128_page;
 424}
 425
 426static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
 427
 428static void r4k_blast_scache_page_indexed_setup(void)
 429{
 430        unsigned long sc_lsize = cpu_scache_line_size();
 431
 432        if (scache_size == 0)
 433                r4k_blast_scache_page_indexed = (void *)cache_noop;
 434        else if (sc_lsize == 16)
 435                r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
 436        else if (sc_lsize == 32)
 437                r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
 438        else if (sc_lsize == 64)
 439                r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
 440        else if (sc_lsize == 128)
 441                r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
 442}
 443
 444static void (* r4k_blast_scache)(void);
 445
 446static void r4k_blast_scache_setup(void)
 447{
 448        unsigned long sc_lsize = cpu_scache_line_size();
 449
 450        if (scache_size == 0)
 451                r4k_blast_scache = (void *)cache_noop;
 452        else if (sc_lsize == 16)
 453                r4k_blast_scache = blast_scache16;
 454        else if (sc_lsize == 32)
 455                r4k_blast_scache = blast_scache32;
 456        else if (sc_lsize == 64)
 457                r4k_blast_scache = blast_scache64;
 458        else if (sc_lsize == 128)
 459                r4k_blast_scache = blast_scache128;
 460}
 461
 462static void (*r4k_blast_scache_node)(long node);
 463
 464static void r4k_blast_scache_node_setup(void)
 465{
 466        unsigned long sc_lsize = cpu_scache_line_size();
 467
 468        if (current_cpu_type() != CPU_LOONGSON3)
 469                r4k_blast_scache_node = (void *)cache_noop;
 470        else if (sc_lsize == 16)
 471                r4k_blast_scache_node = blast_scache16_node;
 472        else if (sc_lsize == 32)
 473                r4k_blast_scache_node = blast_scache32_node;
 474        else if (sc_lsize == 64)
 475                r4k_blast_scache_node = blast_scache64_node;
 476        else if (sc_lsize == 128)
 477                r4k_blast_scache_node = blast_scache128_node;
 478}
 479
 480static inline void local_r4k___flush_cache_all(void * args)
 481{
 482        switch (current_cpu_type()) {
 483        case CPU_LOONGSON2:
 484        case CPU_R4000SC:
 485        case CPU_R4000MC:
 486        case CPU_R4400SC:
 487        case CPU_R4400MC:
 488        case CPU_R10000:
 489        case CPU_R12000:
 490        case CPU_R14000:
 491        case CPU_R16000:
 492                /*
 493                 * These caches are inclusive caches, that is, if something
 494                 * is not cached in the S-cache, we know it also won't be
 495                 * in one of the primary caches.
 496                 */
 497                r4k_blast_scache();
 498                break;
 499
 500        case CPU_LOONGSON3:
 501                /* Use get_ebase_cpunum() for both NUMA=y/n */
 502                r4k_blast_scache_node(get_ebase_cpunum() >> 2);
 503                break;
 504
 505        case CPU_BMIPS5000:
 506                r4k_blast_scache();
 507                __sync();
 508                break;
 509
 510        default:
 511                r4k_blast_dcache();
 512                r4k_blast_icache();
 513                break;
 514        }
 515}
 516
 517static void r4k___flush_cache_all(void)
 518{
 519        r4k_on_each_cpu(R4K_INDEX, local_r4k___flush_cache_all, NULL);
 520}
 521
 522/**
 523 * has_valid_asid() - Determine if an mm already has an ASID.
 524 * @mm:         Memory map.
 525 * @type:       R4K_HIT or R4K_INDEX, type of cache op.
 526 *
 527 * Determines whether @mm already has an ASID on any of the CPUs which cache ops
 528 * of type @type within an r4k_on_each_cpu() call will affect. If
 529 * r4k_on_each_cpu() does an SMP call to a single VPE in each core, then the
 530 * scope of the operation is confined to sibling CPUs, otherwise all online CPUs
 531 * will need to be checked.
 532 *
 533 * Must be called in non-preemptive context.
 534 *
 535 * Returns:     1 if the CPUs affected by @type cache ops have an ASID for @mm.
 536 *              0 otherwise.
 537 */
 538static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type)
 539{
 540        unsigned int i;
 541        const cpumask_t *mask = cpu_present_mask;
 542
 543        if (cpu_has_mmid)
 544                return cpu_context(0, mm) != 0;
 545
 546        /* cpu_sibling_map[] undeclared when !CONFIG_SMP */
 547#ifdef CONFIG_SMP
 548        /*
 549         * If r4k_on_each_cpu does SMP calls, it does them to a single VPE in
 550         * each foreign core, so we only need to worry about siblings.
 551         * Otherwise we need to worry about all present CPUs.
 552         */
 553        if (r4k_op_needs_ipi(type))
 554                mask = &cpu_sibling_map[smp_processor_id()];
 555#endif
 556        for_each_cpu(i, mask)
 557                if (cpu_context(i, mm))
 558                        return 1;
 559        return 0;
 560}
 561
 562static void r4k__flush_cache_vmap(void)
 563{
 564        r4k_blast_dcache();
 565}
 566
 567static void r4k__flush_cache_vunmap(void)
 568{
 569        r4k_blast_dcache();
 570}
 571
 572/*
 573 * Note: flush_tlb_range() assumes flush_cache_range() sufficiently flushes
 574 * whole caches when vma is executable.
 575 */
 576static inline void local_r4k_flush_cache_range(void * args)
 577{
 578        struct vm_area_struct *vma = args;
 579        int exec = vma->vm_flags & VM_EXEC;
 580
 581        if (!has_valid_asid(vma->vm_mm, R4K_INDEX))
 582                return;
 583
 584        /*
 585         * If dcache can alias, we must blast it since mapping is changing.
 586         * If executable, we must ensure any dirty lines are written back far
 587         * enough to be visible to icache.
 588         */
 589        if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
 590                r4k_blast_dcache();
 591        /* If executable, blast stale lines from icache */
 592        if (exec)
 593                r4k_blast_icache();
 594}
 595
 596static void r4k_flush_cache_range(struct vm_area_struct *vma,
 597        unsigned long start, unsigned long end)
 598{
 599        int exec = vma->vm_flags & VM_EXEC;
 600
 601        if (cpu_has_dc_aliases || exec)
 602                r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_range, vma);
 603}
 604
 605static inline void local_r4k_flush_cache_mm(void * args)
 606{
 607        struct mm_struct *mm = args;
 608
 609        if (!has_valid_asid(mm, R4K_INDEX))
 610                return;
 611
 612        /*
 613         * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
 614         * only flush the primary caches but R1x000 behave sane ...
 615         * R4000SC and R4400SC indexed S-cache ops also invalidate primary
 616         * caches, so we can bail out early.
 617         */
 618        if (current_cpu_type() == CPU_R4000SC ||
 619            current_cpu_type() == CPU_R4000MC ||
 620            current_cpu_type() == CPU_R4400SC ||
 621            current_cpu_type() == CPU_R4400MC) {
 622                r4k_blast_scache();
 623                return;
 624        }
 625
 626        r4k_blast_dcache();
 627}
 628
 629static void r4k_flush_cache_mm(struct mm_struct *mm)
 630{
 631        if (!cpu_has_dc_aliases)
 632                return;
 633
 634        r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_mm, mm);
 635}
 636
 637struct flush_cache_page_args {
 638        struct vm_area_struct *vma;
 639        unsigned long addr;
 640        unsigned long pfn;
 641};
 642
 643static inline void local_r4k_flush_cache_page(void *args)
 644{
 645        struct flush_cache_page_args *fcp_args = args;
 646        struct vm_area_struct *vma = fcp_args->vma;
 647        unsigned long addr = fcp_args->addr;
 648        struct page *page = pfn_to_page(fcp_args->pfn);
 649        int exec = vma->vm_flags & VM_EXEC;
 650        struct mm_struct *mm = vma->vm_mm;
 651        int map_coherent = 0;
 652        pgd_t *pgdp;
 653        pud_t *pudp;
 654        pmd_t *pmdp;
 655        pte_t *ptep;
 656        void *vaddr;
 657
 658        /*
 659         * If owns no valid ASID yet, cannot possibly have gotten
 660         * this page into the cache.
 661         */
 662        if (!has_valid_asid(mm, R4K_HIT))
 663                return;
 664
 665        addr &= PAGE_MASK;
 666        pgdp = pgd_offset(mm, addr);
 667        pudp = pud_offset(pgdp, addr);
 668        pmdp = pmd_offset(pudp, addr);
 669        ptep = pte_offset(pmdp, addr);
 670
 671        /*
 672         * If the page isn't marked valid, the page cannot possibly be
 673         * in the cache.
 674         */
 675        if (!(pte_present(*ptep)))
 676                return;
 677
 678        if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
 679                vaddr = NULL;
 680        else {
 681                /*
 682                 * Use kmap_coherent or kmap_atomic to do flushes for
 683                 * another ASID than the current one.
 684                 */
 685                map_coherent = (cpu_has_dc_aliases &&
 686                                page_mapcount(page) &&
 687                                !Page_dcache_dirty(page));
 688                if (map_coherent)
 689                        vaddr = kmap_coherent(page, addr);
 690                else
 691                        vaddr = kmap_atomic(page);
 692                addr = (unsigned long)vaddr;
 693        }
 694
 695        if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
 696                vaddr ? r4k_blast_dcache_page(addr) :
 697                        r4k_blast_dcache_user_page(addr);
 698                if (exec && !cpu_icache_snoops_remote_store)
 699                        r4k_blast_scache_page(addr);
 700        }
 701        if (exec) {
 702                if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
 703                        drop_mmu_context(mm);
 704                } else
 705                        vaddr ? r4k_blast_icache_page(addr) :
 706                                r4k_blast_icache_user_page(addr);
 707        }
 708
 709        if (vaddr) {
 710                if (map_coherent)
 711                        kunmap_coherent();
 712                else
 713                        kunmap_atomic(vaddr);
 714        }
 715}
 716
 717static void r4k_flush_cache_page(struct vm_area_struct *vma,
 718        unsigned long addr, unsigned long pfn)
 719{
 720        struct flush_cache_page_args args;
 721
 722        args.vma = vma;
 723        args.addr = addr;
 724        args.pfn = pfn;
 725
 726        r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_page, &args);
 727}
 728
 729static inline void local_r4k_flush_data_cache_page(void * addr)
 730{
 731        r4k_blast_dcache_page((unsigned long) addr);
 732}
 733
 734static void r4k_flush_data_cache_page(unsigned long addr)
 735{
 736        if (in_atomic())
 737                local_r4k_flush_data_cache_page((void *)addr);
 738        else
 739                r4k_on_each_cpu(R4K_HIT, local_r4k_flush_data_cache_page,
 740                                (void *) addr);
 741}
 742
 743struct flush_icache_range_args {
 744        unsigned long start;
 745        unsigned long end;
 746        unsigned int type;
 747        bool user;
 748};
 749
 750static inline void __local_r4k_flush_icache_range(unsigned long start,
 751                                                  unsigned long end,
 752                                                  unsigned int type,
 753                                                  bool user)
 754{
 755        if (!cpu_has_ic_fills_f_dc) {
 756                if (type == R4K_INDEX ||
 757                    (type & R4K_INDEX && end - start >= dcache_size)) {
 758                        r4k_blast_dcache();
 759                } else {
 760                        R4600_HIT_CACHEOP_WAR_IMPL;
 761                        if (user)
 762                                protected_blast_dcache_range(start, end);
 763                        else
 764                                blast_dcache_range(start, end);
 765                }
 766        }
 767
 768        if (type == R4K_INDEX ||
 769            (type & R4K_INDEX && end - start > icache_size))
 770                r4k_blast_icache();
 771        else {
 772                switch (boot_cpu_type()) {
 773                case CPU_LOONGSON2:
 774                        protected_loongson2_blast_icache_range(start, end);
 775                        break;
 776
 777                default:
 778                        if (user)
 779                                protected_blast_icache_range(start, end);
 780                        else
 781                                blast_icache_range(start, end);
 782                        break;
 783                }
 784        }
 785}
 786
 787static inline void local_r4k_flush_icache_range(unsigned long start,
 788                                                unsigned long end)
 789{
 790        __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, false);
 791}
 792
 793static inline void local_r4k_flush_icache_user_range(unsigned long start,
 794                                                     unsigned long end)
 795{
 796        __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, true);
 797}
 798
 799static inline void local_r4k_flush_icache_range_ipi(void *args)
 800{
 801        struct flush_icache_range_args *fir_args = args;
 802        unsigned long start = fir_args->start;
 803        unsigned long end = fir_args->end;
 804        unsigned int type = fir_args->type;
 805        bool user = fir_args->user;
 806
 807        __local_r4k_flush_icache_range(start, end, type, user);
 808}
 809
 810static void __r4k_flush_icache_range(unsigned long start, unsigned long end,
 811                                     bool user)
 812{
 813        struct flush_icache_range_args args;
 814        unsigned long size, cache_size;
 815
 816        args.start = start;
 817        args.end = end;
 818        args.type = R4K_HIT | R4K_INDEX;
 819        args.user = user;
 820
 821        /*
 822         * Indexed cache ops require an SMP call.
 823         * Consider if that can or should be avoided.
 824         */
 825        preempt_disable();
 826        if (r4k_op_needs_ipi(R4K_INDEX) && !r4k_op_needs_ipi(R4K_HIT)) {
 827                /*
 828                 * If address-based cache ops don't require an SMP call, then
 829                 * use them exclusively for small flushes.
 830                 */
 831                size = end - start;
 832                cache_size = icache_size;
 833                if (!cpu_has_ic_fills_f_dc) {
 834                        size *= 2;
 835                        cache_size += dcache_size;
 836                }
 837                if (size <= cache_size)
 838                        args.type &= ~R4K_INDEX;
 839        }
 840        r4k_on_each_cpu(args.type, local_r4k_flush_icache_range_ipi, &args);
 841        preempt_enable();
 842        instruction_hazard();
 843}
 844
 845static void r4k_flush_icache_range(unsigned long start, unsigned long end)
 846{
 847        return __r4k_flush_icache_range(start, end, false);
 848}
 849
 850static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
 851{
 852        return __r4k_flush_icache_range(start, end, true);
 853}
 854
 855#ifdef CONFIG_DMA_NONCOHERENT
 856
 857static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
 858{
 859        /* Catch bad driver code */
 860        if (WARN_ON(size == 0))
 861                return;
 862
 863        preempt_disable();
 864        if (cpu_has_inclusive_pcaches) {
 865                if (size >= scache_size) {
 866                        if (current_cpu_type() != CPU_LOONGSON3)
 867                                r4k_blast_scache();
 868                        else
 869                                r4k_blast_scache_node(pa_to_nid(addr));
 870                } else {
 871                        blast_scache_range(addr, addr + size);
 872                }
 873                preempt_enable();
 874                __sync();
 875                return;
 876        }
 877
 878        /*
 879         * Either no secondary cache or the available caches don't have the
 880         * subset property so we have to flush the primary caches
 881         * explicitly.
 882         * If we would need IPI to perform an INDEX-type operation, then
 883         * we have to use the HIT-type alternative as IPI cannot be used
 884         * here due to interrupts possibly being disabled.
 885         */
 886        if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
 887                r4k_blast_dcache();
 888        } else {
 889                R4600_HIT_CACHEOP_WAR_IMPL;
 890                blast_dcache_range(addr, addr + size);
 891        }
 892        preempt_enable();
 893
 894        bc_wback_inv(addr, size);
 895        __sync();
 896}
 897
 898static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
 899{
 900        /* Catch bad driver code */
 901        if (WARN_ON(size == 0))
 902                return;
 903
 904        preempt_disable();
 905        if (cpu_has_inclusive_pcaches) {
 906                if (size >= scache_size) {
 907                        if (current_cpu_type() != CPU_LOONGSON3)
 908                                r4k_blast_scache();
 909                        else
 910                                r4k_blast_scache_node(pa_to_nid(addr));
 911                } else {
 912                        /*
 913                         * There is no clearly documented alignment requirement
 914                         * for the cache instruction on MIPS processors and
 915                         * some processors, among them the RM5200 and RM7000
 916                         * QED processors will throw an address error for cache
 917                         * hit ops with insufficient alignment.  Solved by
 918                         * aligning the address to cache line size.
 919                         */
 920                        blast_inv_scache_range(addr, addr + size);
 921                }
 922                preempt_enable();
 923                __sync();
 924                return;
 925        }
 926
 927        if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
 928                r4k_blast_dcache();
 929        } else {
 930                R4600_HIT_CACHEOP_WAR_IMPL;
 931                blast_inv_dcache_range(addr, addr + size);
 932        }
 933        preempt_enable();
 934
 935        bc_inv(addr, size);
 936        __sync();
 937}
 938#endif /* CONFIG_DMA_NONCOHERENT */
 939
 940static void r4k_flush_icache_all(void)
 941{
 942        if (cpu_has_vtag_icache)
 943                r4k_blast_icache();
 944}
 945
 946struct flush_kernel_vmap_range_args {
 947        unsigned long   vaddr;
 948        int             size;
 949};
 950
 951static inline void local_r4k_flush_kernel_vmap_range_index(void *args)
 952{
 953        /*
 954         * Aliases only affect the primary caches so don't bother with
 955         * S-caches or T-caches.
 956         */
 957        r4k_blast_dcache();
 958}
 959
 960static inline void local_r4k_flush_kernel_vmap_range(void *args)
 961{
 962        struct flush_kernel_vmap_range_args *vmra = args;
 963        unsigned long vaddr = vmra->vaddr;
 964        int size = vmra->size;
 965
 966        /*
 967         * Aliases only affect the primary caches so don't bother with
 968         * S-caches or T-caches.
 969         */
 970        R4600_HIT_CACHEOP_WAR_IMPL;
 971        blast_dcache_range(vaddr, vaddr + size);
 972}
 973
 974static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
 975{
 976        struct flush_kernel_vmap_range_args args;
 977
 978        args.vaddr = (unsigned long) vaddr;
 979        args.size = size;
 980
 981        if (size >= dcache_size)
 982                r4k_on_each_cpu(R4K_INDEX,
 983                                local_r4k_flush_kernel_vmap_range_index, NULL);
 984        else
 985                r4k_on_each_cpu(R4K_HIT, local_r4k_flush_kernel_vmap_range,
 986                                &args);
 987}
 988
 989static inline void rm7k_erratum31(void)
 990{
 991        const unsigned long ic_lsize = 32;
 992        unsigned long addr;
 993
 994        /* RM7000 erratum #31. The icache is screwed at startup. */
 995        write_c0_taglo(0);
 996        write_c0_taghi(0);
 997
 998        for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
 999                __asm__ __volatile__ (
1000                        ".set push\n\t"
1001                        ".set noreorder\n\t"
1002                        ".set mips3\n\t"
1003                        "cache\t%1, 0(%0)\n\t"
1004                        "cache\t%1, 0x1000(%0)\n\t"
1005                        "cache\t%1, 0x2000(%0)\n\t"
1006                        "cache\t%1, 0x3000(%0)\n\t"
1007                        "cache\t%2, 0(%0)\n\t"
1008                        "cache\t%2, 0x1000(%0)\n\t"
1009                        "cache\t%2, 0x2000(%0)\n\t"
1010                        "cache\t%2, 0x3000(%0)\n\t"
1011                        "cache\t%1, 0(%0)\n\t"
1012                        "cache\t%1, 0x1000(%0)\n\t"
1013                        "cache\t%1, 0x2000(%0)\n\t"
1014                        "cache\t%1, 0x3000(%0)\n\t"
1015                        ".set pop\n"
1016                        :
1017                        : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
1018        }
1019}
1020
1021static inline int alias_74k_erratum(struct cpuinfo_mips *c)
1022{
1023        unsigned int imp = c->processor_id & PRID_IMP_MASK;
1024        unsigned int rev = c->processor_id & PRID_REV_MASK;
1025        int present = 0;
1026
1027        /*
1028         * Early versions of the 74K do not update the cache tags on a
1029         * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
1030         * aliases.  In this case it is better to treat the cache as always
1031         * having aliases.  Also disable the synonym tag update feature
1032         * where available.  In this case no opportunistic tag update will
1033         * happen where a load causes a virtual address miss but a physical
1034         * address hit during a D-cache look-up.
1035         */
1036        switch (imp) {
1037        case PRID_IMP_74K:
1038                if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
1039                        present = 1;
1040                if (rev == PRID_REV_ENCODE_332(2, 4, 0))
1041                        write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
1042                break;
1043        case PRID_IMP_1074K:
1044                if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
1045                        present = 1;
1046                        write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
1047                }
1048                break;
1049        default:
1050                BUG();
1051        }
1052
1053        return present;
1054}
1055
1056static void b5k_instruction_hazard(void)
1057{
1058        __sync();
1059        __sync();
1060        __asm__ __volatile__(
1061        "       nop; nop; nop; nop; nop; nop; nop; nop\n"
1062        "       nop; nop; nop; nop; nop; nop; nop; nop\n"
1063        "       nop; nop; nop; nop; nop; nop; nop; nop\n"
1064        "       nop; nop; nop; nop; nop; nop; nop; nop\n"
1065        : : : "memory");
1066}
1067
1068static char *way_string[] = { NULL, "direct mapped", "2-way",
1069        "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
1070        "9-way", "10-way", "11-way", "12-way",
1071        "13-way", "14-way", "15-way", "16-way",
1072};
1073
1074static void probe_pcache(void)
1075{
1076        struct cpuinfo_mips *c = &current_cpu_data;
1077        unsigned int config = read_c0_config();
1078        unsigned int prid = read_c0_prid();
1079        int has_74k_erratum = 0;
1080        unsigned long config1;
1081        unsigned int lsize;
1082
1083        switch (current_cpu_type()) {
1084        case CPU_R4600:                 /* QED style two way caches? */
1085        case CPU_R4700:
1086        case CPU_R5000:
1087        case CPU_NEVADA:
1088                icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1089                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1090                c->icache.ways = 2;
1091                c->icache.waybit = __ffs(icache_size/2);
1092
1093                dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1094                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1095                c->dcache.ways = 2;
1096                c->dcache.waybit= __ffs(dcache_size/2);
1097
1098                c->options |= MIPS_CPU_CACHE_CDEX_P;
1099                break;
1100
1101        case CPU_R5432:
1102        case CPU_R5500:
1103                icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1104                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1105                c->icache.ways = 2;
1106                c->icache.waybit= 0;
1107
1108                dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1109                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1110                c->dcache.ways = 2;
1111                c->dcache.waybit = 0;
1112
1113                c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
1114                break;
1115
1116        case CPU_TX49XX:
1117                icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1118                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1119                c->icache.ways = 4;
1120                c->icache.waybit= 0;
1121
1122                dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1123                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1124                c->dcache.ways = 4;
1125                c->dcache.waybit = 0;
1126
1127                c->options |= MIPS_CPU_CACHE_CDEX_P;
1128                c->options |= MIPS_CPU_PREFETCH;
1129                break;
1130
1131        case CPU_R4000PC:
1132        case CPU_R4000SC:
1133        case CPU_R4000MC:
1134        case CPU_R4400PC:
1135        case CPU_R4400SC:
1136        case CPU_R4400MC:
1137        case CPU_R4300:
1138                icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1139                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1140                c->icache.ways = 1;
1141                c->icache.waybit = 0;   /* doesn't matter */
1142
1143                dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1144                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1145                c->dcache.ways = 1;
1146                c->dcache.waybit = 0;   /* does not matter */
1147
1148                c->options |= MIPS_CPU_CACHE_CDEX_P;
1149                break;
1150
1151        case CPU_R10000:
1152        case CPU_R12000:
1153        case CPU_R14000:
1154        case CPU_R16000:
1155                icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
1156                c->icache.linesz = 64;
1157                c->icache.ways = 2;
1158                c->icache.waybit = 0;
1159
1160                dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
1161                c->dcache.linesz = 32;
1162                c->dcache.ways = 2;
1163                c->dcache.waybit = 0;
1164
1165                c->options |= MIPS_CPU_PREFETCH;
1166                break;
1167
1168        case CPU_VR4133:
1169                write_c0_config(config & ~VR41_CONF_P4K);
1170                /* fall through */
1171        case CPU_VR4131:
1172                /* Workaround for cache instruction bug of VR4131 */
1173                if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
1174                    c->processor_id == 0x0c82U) {
1175                        config |= 0x00400000U;
1176                        if (c->processor_id == 0x0c80U)
1177                                config |= VR41_CONF_BP;
1178                        write_c0_config(config);
1179                } else
1180                        c->options |= MIPS_CPU_CACHE_CDEX_P;
1181
1182                icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
1183                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1184                c->icache.ways = 2;
1185                c->icache.waybit = __ffs(icache_size/2);
1186
1187                dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
1188                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1189                c->dcache.ways = 2;
1190                c->dcache.waybit = __ffs(dcache_size/2);
1191                break;
1192
1193        case CPU_VR41XX:
1194        case CPU_VR4111:
1195        case CPU_VR4121:
1196        case CPU_VR4122:
1197        case CPU_VR4181:
1198        case CPU_VR4181A:
1199                icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
1200                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1201                c->icache.ways = 1;
1202                c->icache.waybit = 0;   /* doesn't matter */
1203
1204                dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
1205                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1206                c->dcache.ways = 1;
1207                c->dcache.waybit = 0;   /* does not matter */
1208
1209                c->options |= MIPS_CPU_CACHE_CDEX_P;
1210                break;
1211
1212        case CPU_RM7000:
1213                rm7k_erratum31();
1214
1215                icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1216                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1217                c->icache.ways = 4;
1218                c->icache.waybit = __ffs(icache_size / c->icache.ways);
1219
1220                dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1221                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1222                c->dcache.ways = 4;
1223                c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
1224
1225                c->options |= MIPS_CPU_CACHE_CDEX_P;
1226                c->options |= MIPS_CPU_PREFETCH;
1227                break;
1228
1229        case CPU_LOONGSON2:
1230                icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1231                c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1232                if (prid & 0x3)
1233                        c->icache.ways = 4;
1234                else
1235                        c->icache.ways = 2;
1236                c->icache.waybit = 0;
1237
1238                dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1239                c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1240                if (prid & 0x3)
1241                        c->dcache.ways = 4;
1242                else
1243                        c->dcache.ways = 2;
1244                c->dcache.waybit = 0;
1245                break;
1246
1247        case CPU_LOONGSON3:
1248                config1 = read_c0_config1();
1249                lsize = (config1 >> 19) & 7;
1250                if (lsize)
1251                        c->icache.linesz = 2 << lsize;
1252                else
1253                        c->icache.linesz = 0;
1254                c->icache.sets = 64 << ((config1 >> 22) & 7);
1255                c->icache.ways = 1 + ((config1 >> 16) & 7);
1256                icache_size = c->icache.sets *
1257                                          c->icache.ways *
1258                                          c->icache.linesz;
1259                c->icache.waybit = 0;
1260
1261                lsize = (config1 >> 10) & 7;
1262                if (lsize)
1263                        c->dcache.linesz = 2 << lsize;
1264                else
1265                        c->dcache.linesz = 0;
1266                c->dcache.sets = 64 << ((config1 >> 13) & 7);
1267                c->dcache.ways = 1 + ((config1 >> 7) & 7);
1268                dcache_size = c->dcache.sets *
1269                                          c->dcache.ways *
1270                                          c->dcache.linesz;
1271                c->dcache.waybit = 0;
1272                if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0)
1273                        c->options |= MIPS_CPU_PREFETCH;
1274                break;
1275
1276        case CPU_CAVIUM_OCTEON3:
1277                /* For now lie about the number of ways. */
1278                c->icache.linesz = 128;
1279                c->icache.sets = 16;
1280                c->icache.ways = 8;
1281                c->icache.flags |= MIPS_CACHE_VTAG;
1282                icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
1283
1284                c->dcache.linesz = 128;
1285                c->dcache.ways = 8;
1286                c->dcache.sets = 8;
1287                dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
1288                c->options |= MIPS_CPU_PREFETCH;
1289                break;
1290
1291        default:
1292                if (!(config & MIPS_CONF_M))
1293                        panic("Don't know how to probe P-caches on this cpu.");
1294
1295                /*
1296                 * So we seem to be a MIPS32 or MIPS64 CPU
1297                 * So let's probe the I-cache ...
1298                 */
1299                config1 = read_c0_config1();
1300
1301                lsize = (config1 >> 19) & 7;
1302
1303                /* IL == 7 is reserved */
1304                if (lsize == 7)
1305                        panic("Invalid icache line size");
1306
1307                c->icache.linesz = lsize ? 2 << lsize : 0;
1308
1309                c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
1310                c->icache.ways = 1 + ((config1 >> 16) & 7);
1311
1312                icache_size = c->icache.sets *
1313                              c->icache.ways *
1314                              c->icache.linesz;
1315                c->icache.waybit = __ffs(icache_size/c->icache.ways);
1316
1317                if (config & MIPS_CONF_VI)
1318                        c->icache.flags |= MIPS_CACHE_VTAG;
1319
1320                /*
1321                 * Now probe the MIPS32 / MIPS64 data cache.
1322                 */
1323                c->dcache.flags = 0;
1324
1325                lsize = (config1 >> 10) & 7;
1326
1327                /* DL == 7 is reserved */
1328                if (lsize == 7)
1329                        panic("Invalid dcache line size");
1330
1331                c->dcache.linesz = lsize ? 2 << lsize : 0;
1332
1333                c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
1334                c->dcache.ways = 1 + ((config1 >> 7) & 7);
1335
1336                dcache_size = c->dcache.sets *
1337                              c->dcache.ways *
1338                              c->dcache.linesz;
1339                c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
1340
1341                c->options |= MIPS_CPU_PREFETCH;
1342                break;
1343        }
1344
1345        /*
1346         * Processor configuration sanity check for the R4000SC erratum
1347         * #5.  With page sizes larger than 32kB there is no possibility
1348         * to get a VCE exception anymore so we don't care about this
1349         * misconfiguration.  The case is rather theoretical anyway;
1350         * presumably no vendor is shipping his hardware in the "bad"
1351         * configuration.
1352         */
1353        if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 &&
1354            (prid & PRID_REV_MASK) < PRID_REV_R4400 &&
1355            !(config & CONF_SC) && c->icache.linesz != 16 &&
1356            PAGE_SIZE <= 0x8000)
1357                panic("Improper R4000SC processor configuration detected");
1358
1359        /* compute a couple of other cache variables */
1360        c->icache.waysize = icache_size / c->icache.ways;
1361        c->dcache.waysize = dcache_size / c->dcache.ways;
1362
1363        c->icache.sets = c->icache.linesz ?
1364                icache_size / (c->icache.linesz * c->icache.ways) : 0;
1365        c->dcache.sets = c->dcache.linesz ?
1366                dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
1367
1368        /*
1369         * R1x000 P-caches are odd in a positive way.  They're 32kB 2-way
1370         * virtually indexed so normally would suffer from aliases.  So
1371         * normally they'd suffer from aliases but magic in the hardware deals
1372         * with that for us so we don't need to take care ourselves.
1373         */
1374        switch (current_cpu_type()) {
1375        case CPU_20KC:
1376        case CPU_25KF:
1377        case CPU_I6400:
1378        case CPU_I6500:
1379        case CPU_SB1:
1380        case CPU_SB1A:
1381        case CPU_XLR:
1382                c->dcache.flags |= MIPS_CACHE_PINDEX;
1383                break;
1384
1385        case CPU_R10000:
1386        case CPU_R12000:
1387        case CPU_R14000:
1388        case CPU_R16000:
1389                break;
1390
1391        case CPU_74K:
1392        case CPU_1074K:
1393                has_74k_erratum = alias_74k_erratum(c);
1394                /* Fall through. */
1395        case CPU_M14KC:
1396        case CPU_M14KEC:
1397        case CPU_24K:
1398        case CPU_34K:
1399        case CPU_1004K:
1400        case CPU_INTERAPTIV:
1401        case CPU_P5600:
1402        case CPU_PROAPTIV:
1403        case CPU_M5150:
1404        case CPU_QEMU_GENERIC:
1405        case CPU_P6600:
1406        case CPU_M6250:
1407                if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
1408                    (c->icache.waysize > PAGE_SIZE))
1409                        c->icache.flags |= MIPS_CACHE_ALIASES;
1410                if (!has_74k_erratum && (read_c0_config7() & MIPS_CONF7_AR)) {
1411                        /*
1412                         * Effectively physically indexed dcache,
1413                         * thus no virtual aliases.
1414                        */
1415                        c->dcache.flags |= MIPS_CACHE_PINDEX;
1416                        break;
1417                }
1418                /* fall through */
1419        default:
1420                if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
1421                        c->dcache.flags |= MIPS_CACHE_ALIASES;
1422        }
1423
1424        /* Physically indexed caches don't suffer from virtual aliasing */
1425        if (c->dcache.flags & MIPS_CACHE_PINDEX)
1426                c->dcache.flags &= ~MIPS_CACHE_ALIASES;
1427
1428        /*
1429         * In systems with CM the icache fills from L2 or closer caches, and
1430         * thus sees remote stores without needing to write them back any
1431         * further than that.
1432         */
1433        if (mips_cm_present())
1434                c->icache.flags |= MIPS_IC_SNOOPS_REMOTE;
1435
1436        switch (current_cpu_type()) {
1437        case CPU_20KC:
1438                /*
1439                 * Some older 20Kc chips doesn't have the 'VI' bit in
1440                 * the config register.
1441                 */
1442                c->icache.flags |= MIPS_CACHE_VTAG;
1443                break;
1444
1445        case CPU_ALCHEMY:
1446        case CPU_I6400:
1447        case CPU_I6500:
1448                c->icache.flags |= MIPS_CACHE_IC_F_DC;
1449                break;
1450
1451        case CPU_BMIPS5000:
1452                c->icache.flags |= MIPS_CACHE_IC_F_DC;
1453                /* Cache aliases are handled in hardware; allow HIGHMEM */
1454                c->dcache.flags &= ~MIPS_CACHE_ALIASES;
1455                break;
1456
1457        case CPU_LOONGSON2:
1458                /*
1459                 * LOONGSON2 has 4 way icache, but when using indexed cache op,
1460                 * one op will act on all 4 ways
1461                 */
1462                c->icache.ways = 1;
1463        }
1464
1465        printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1466               icache_size >> 10,
1467               c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
1468               way_string[c->icache.ways], c->icache.linesz);
1469
1470        printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1471               dcache_size >> 10, way_string[c->dcache.ways],
1472               (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1473               (c->dcache.flags & MIPS_CACHE_ALIASES) ?
1474                        "cache aliases" : "no aliases",
1475               c->dcache.linesz);
1476}
1477
1478static void probe_vcache(void)
1479{
1480        struct cpuinfo_mips *c = &current_cpu_data;
1481        unsigned int config2, lsize;
1482
1483        if (current_cpu_type() != CPU_LOONGSON3)
1484                return;
1485
1486        config2 = read_c0_config2();
1487        if ((lsize = ((config2 >> 20) & 15)))
1488                c->vcache.linesz = 2 << lsize;
1489        else
1490                c->vcache.linesz = lsize;
1491
1492        c->vcache.sets = 64 << ((config2 >> 24) & 15);
1493        c->vcache.ways = 1 + ((config2 >> 16) & 15);
1494
1495        vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
1496
1497        c->vcache.waybit = 0;
1498        c->vcache.waysize = vcache_size / c->vcache.ways;
1499
1500        pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
1501                vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
1502}
1503
1504/*
1505 * If you even _breathe_ on this function, look at the gcc output and make sure
1506 * it does not pop things on and off the stack for the cache sizing loop that
1507 * executes in KSEG1 space or else you will crash and burn badly.  You have
1508 * been warned.
1509 */
1510static int probe_scache(void)
1511{
1512        unsigned long flags, addr, begin, end, pow2;
1513        unsigned int config = read_c0_config();
1514        struct cpuinfo_mips *c = &current_cpu_data;
1515
1516        if (config & CONF_SC)
1517                return 0;
1518
1519        begin = (unsigned long) &_stext;
1520        begin &= ~((4 * 1024 * 1024) - 1);
1521        end = begin + (4 * 1024 * 1024);
1522
1523        /*
1524         * This is such a bitch, you'd think they would make it easy to do
1525         * this.  Away you daemons of stupidity!
1526         */
1527        local_irq_save(flags);
1528
1529        /* Fill each size-multiple cache line with a valid tag. */
1530        pow2 = (64 * 1024);
1531        for (addr = begin; addr < end; addr = (begin + pow2)) {
1532                unsigned long *p = (unsigned long *) addr;
1533                __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1534                pow2 <<= 1;
1535        }
1536
1537        /* Load first line with zero (therefore invalid) tag. */
1538        write_c0_taglo(0);
1539        write_c0_taghi(0);
1540        __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1541        cache_op(Index_Store_Tag_I, begin);
1542        cache_op(Index_Store_Tag_D, begin);
1543        cache_op(Index_Store_Tag_SD, begin);
1544
1545        /* Now search for the wrap around point. */
1546        pow2 = (128 * 1024);
1547        for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1548                cache_op(Index_Load_Tag_SD, addr);
1549                __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1550                if (!read_c0_taglo())
1551                        break;
1552                pow2 <<= 1;
1553        }
1554        local_irq_restore(flags);
1555        addr -= begin;
1556
1557        scache_size = addr;
1558        c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1559        c->scache.ways = 1;
1560        c->scache.waybit = 0;           /* does not matter */
1561
1562        return 1;
1563}
1564
1565static void __init loongson2_sc_init(void)
1566{
1567        struct cpuinfo_mips *c = &current_cpu_data;
1568
1569        scache_size = 512*1024;
1570        c->scache.linesz = 32;
1571        c->scache.ways = 4;
1572        c->scache.waybit = 0;
1573        c->scache.waysize = scache_size / (c->scache.ways);
1574        c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1575        pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1576               scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1577
1578        c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1579}
1580
1581static void __init loongson3_sc_init(void)
1582{
1583        struct cpuinfo_mips *c = &current_cpu_data;
1584        unsigned int config2, lsize;
1585
1586        config2 = read_c0_config2();
1587        lsize = (config2 >> 4) & 15;
1588        if (lsize)
1589                c->scache.linesz = 2 << lsize;
1590        else
1591                c->scache.linesz = 0;
1592        c->scache.sets = 64 << ((config2 >> 8) & 15);
1593        c->scache.ways = 1 + (config2 & 15);
1594
1595        scache_size = c->scache.sets *
1596                                  c->scache.ways *
1597                                  c->scache.linesz;
1598        /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
1599        scache_size *= 4;
1600        c->scache.waybit = 0;
1601        c->scache.waysize = scache_size / c->scache.ways;
1602        pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1603               scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1604        if (scache_size)
1605                c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1606        return;
1607}
1608
1609extern int r5k_sc_init(void);
1610extern int rm7k_sc_init(void);
1611extern int mips_sc_init(void);
1612
1613static void setup_scache(void)
1614{
1615        struct cpuinfo_mips *c = &current_cpu_data;
1616        unsigned int config = read_c0_config();
1617        int sc_present = 0;
1618
1619        /*
1620         * Do the probing thing on R4000SC and R4400SC processors.  Other
1621         * processors don't have a S-cache that would be relevant to the
1622         * Linux memory management.
1623         */
1624        switch (current_cpu_type()) {
1625        case CPU_R4000SC:
1626        case CPU_R4000MC:
1627        case CPU_R4400SC:
1628        case CPU_R4400MC:
1629                sc_present = run_uncached(probe_scache);
1630                if (sc_present)
1631                        c->options |= MIPS_CPU_CACHE_CDEX_S;
1632                break;
1633
1634        case CPU_R10000:
1635        case CPU_R12000:
1636        case CPU_R14000:
1637        case CPU_R16000:
1638                scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1639                c->scache.linesz = 64 << ((config >> 13) & 1);
1640                c->scache.ways = 2;
1641                c->scache.waybit= 0;
1642                sc_present = 1;
1643                break;
1644
1645        case CPU_R5000:
1646        case CPU_NEVADA:
1647#ifdef CONFIG_R5000_CPU_SCACHE
1648                r5k_sc_init();
1649#endif
1650                return;
1651
1652        case CPU_RM7000:
1653#ifdef CONFIG_RM7000_CPU_SCACHE
1654                rm7k_sc_init();
1655#endif
1656                return;
1657
1658        case CPU_LOONGSON2:
1659                loongson2_sc_init();
1660                return;
1661
1662        case CPU_LOONGSON3:
1663                loongson3_sc_init();
1664                return;
1665
1666        case CPU_CAVIUM_OCTEON3:
1667        case CPU_XLP:
1668                /* don't need to worry about L2, fully coherent */
1669                return;
1670
1671        default:
1672                if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
1673                                    MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
1674                                    MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) {
1675#ifdef CONFIG_MIPS_CPU_SCACHE
1676                        if (mips_sc_init ()) {
1677                                scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1678                                printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1679                                       scache_size >> 10,
1680                                       way_string[c->scache.ways], c->scache.linesz);
1681                        }
1682#else
1683                        if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1684                                panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1685#endif
1686                        return;
1687                }
1688                sc_present = 0;
1689        }
1690
1691        if (!sc_present)
1692                return;
1693
1694        /* compute a couple of other cache variables */
1695        c->scache.waysize = scache_size / c->scache.ways;
1696
1697        c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1698
1699        printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1700               scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1701
1702        c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1703}
1704
1705void au1x00_fixup_config_od(void)
1706{
1707        /*
1708         * c0_config.od (bit 19) was write only (and read as 0)
1709         * on the early revisions of Alchemy SOCs.  It disables the bus
1710         * transaction overlapping and needs to be set to fix various errata.
1711         */
1712        switch (read_c0_prid()) {
1713        case 0x00030100: /* Au1000 DA */
1714        case 0x00030201: /* Au1000 HA */
1715        case 0x00030202: /* Au1000 HB */
1716        case 0x01030200: /* Au1500 AB */
1717        /*
1718         * Au1100 errata actually keeps silence about this bit, so we set it
1719         * just in case for those revisions that require it to be set according
1720         * to the (now gone) cpu table.
1721         */
1722        case 0x02030200: /* Au1100 AB */
1723        case 0x02030201: /* Au1100 BA */
1724        case 0x02030202: /* Au1100 BC */
1725                set_c0_config(1 << 19);
1726                break;
1727        }
1728}
1729
1730/* CP0 hazard avoidance. */
1731#define NXP_BARRIER()                                                   \
1732         __asm__ __volatile__(                                          \
1733        ".set noreorder\n\t"                                            \
1734        "nop; nop; nop; nop; nop; nop;\n\t"                             \
1735        ".set reorder\n\t")
1736
1737static void nxp_pr4450_fixup_config(void)
1738{
1739        unsigned long config0;
1740
1741        config0 = read_c0_config();
1742
1743        /* clear all three cache coherency fields */
1744        config0 &= ~(0x7 | (7 << 25) | (7 << 28));
1745        config0 |= (((_page_cachable_default >> _CACHE_SHIFT) <<  0) |
1746                    ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
1747                    ((_page_cachable_default >> _CACHE_SHIFT) << 28));
1748        write_c0_config(config0);
1749        NXP_BARRIER();
1750}
1751
1752static int cca = -1;
1753
1754static int __init cca_setup(char *str)
1755{
1756        get_option(&str, &cca);
1757
1758        return 0;
1759}
1760
1761early_param("cca", cca_setup);
1762
1763static void coherency_setup(void)
1764{
1765        if (cca < 0 || cca > 7)
1766                cca = read_c0_config() & CONF_CM_CMASK;
1767        _page_cachable_default = cca << _CACHE_SHIFT;
1768
1769        pr_debug("Using cache attribute %d\n", cca);
1770        change_c0_config(CONF_CM_CMASK, cca);
1771
1772        /*
1773         * c0_status.cu=0 specifies that updates by the sc instruction use
1774         * the coherency mode specified by the TLB; 1 means cachable
1775         * coherent update on write will be used.  Not all processors have
1776         * this bit and; some wire it to zero, others like Toshiba had the
1777         * silly idea of putting something else there ...
1778         */
1779        switch (current_cpu_type()) {
1780        case CPU_R4000PC:
1781        case CPU_R4000SC:
1782        case CPU_R4000MC:
1783        case CPU_R4400PC:
1784        case CPU_R4400SC:
1785        case CPU_R4400MC:
1786                clear_c0_config(CONF_CU);
1787                break;
1788        /*
1789         * We need to catch the early Alchemy SOCs with
1790         * the write-only co_config.od bit and set it back to one on:
1791         * Au1000 rev DA, HA, HB;  Au1100 AB, BA, BC, Au1500 AB
1792         */
1793        case CPU_ALCHEMY:
1794                au1x00_fixup_config_od();
1795                break;
1796
1797        case PRID_IMP_PR4450:
1798                nxp_pr4450_fixup_config();
1799                break;
1800        }
1801}
1802
1803static void r4k_cache_error_setup(void)
1804{
1805        extern char __weak except_vec2_generic;
1806        extern char __weak except_vec2_sb1;
1807
1808        switch (current_cpu_type()) {
1809        case CPU_SB1:
1810        case CPU_SB1A:
1811                set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1812                break;
1813
1814        default:
1815                set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1816                break;
1817        }
1818}
1819
1820void r4k_cache_init(void)
1821{
1822        extern void build_clear_page(void);
1823        extern void build_copy_page(void);
1824        struct cpuinfo_mips *c = &current_cpu_data;
1825
1826        probe_pcache();
1827        probe_vcache();
1828        setup_scache();
1829
1830        r4k_blast_dcache_page_setup();
1831        r4k_blast_dcache_page_indexed_setup();
1832        r4k_blast_dcache_setup();
1833        r4k_blast_icache_page_setup();
1834        r4k_blast_icache_page_indexed_setup();
1835        r4k_blast_icache_setup();
1836        r4k_blast_scache_page_setup();
1837        r4k_blast_scache_page_indexed_setup();
1838        r4k_blast_scache_setup();
1839        r4k_blast_scache_node_setup();
1840#ifdef CONFIG_EVA
1841        r4k_blast_dcache_user_page_setup();
1842        r4k_blast_icache_user_page_setup();
1843#endif
1844
1845        /*
1846         * Some MIPS32 and MIPS64 processors have physically indexed caches.
1847         * This code supports virtually indexed processors and will be
1848         * unnecessarily inefficient on physically indexed processors.
1849         */
1850        if (c->dcache.linesz && cpu_has_dc_aliases)
1851                shm_align_mask = max_t( unsigned long,
1852                                        c->dcache.sets * c->dcache.linesz - 1,
1853                                        PAGE_SIZE - 1);
1854        else
1855                shm_align_mask = PAGE_SIZE-1;
1856
1857        __flush_cache_vmap      = r4k__flush_cache_vmap;
1858        __flush_cache_vunmap    = r4k__flush_cache_vunmap;
1859
1860        flush_cache_all         = cache_noop;
1861        __flush_cache_all       = r4k___flush_cache_all;
1862        flush_cache_mm          = r4k_flush_cache_mm;
1863        flush_cache_page        = r4k_flush_cache_page;
1864        flush_cache_range       = r4k_flush_cache_range;
1865
1866        __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
1867
1868        flush_icache_all        = r4k_flush_icache_all;
1869        local_flush_data_cache_page     = local_r4k_flush_data_cache_page;
1870        flush_data_cache_page   = r4k_flush_data_cache_page;
1871        flush_icache_range      = r4k_flush_icache_range;
1872        local_flush_icache_range        = local_r4k_flush_icache_range;
1873        __flush_icache_user_range       = r4k_flush_icache_user_range;
1874        __local_flush_icache_user_range = local_r4k_flush_icache_user_range;
1875
1876#ifdef CONFIG_DMA_NONCOHERENT
1877#ifdef CONFIG_DMA_MAYBE_COHERENT
1878        if (coherentio == IO_COHERENCE_ENABLED ||
1879            (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio)) {
1880                _dma_cache_wback_inv    = (void *)cache_noop;
1881                _dma_cache_wback        = (void *)cache_noop;
1882                _dma_cache_inv          = (void *)cache_noop;
1883        } else
1884#endif /* CONFIG_DMA_MAYBE_COHERENT */
1885        {
1886                _dma_cache_wback_inv    = r4k_dma_cache_wback_inv;
1887                _dma_cache_wback        = r4k_dma_cache_wback_inv;
1888                _dma_cache_inv          = r4k_dma_cache_inv;
1889        }
1890#endif /* CONFIG_DMA_NONCOHERENT */
1891
1892        build_clear_page();
1893        build_copy_page();
1894
1895        /*
1896         * We want to run CMP kernels on core with and without coherent
1897         * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
1898         * or not to flush caches.
1899         */
1900        local_r4k___flush_cache_all(NULL);
1901
1902        coherency_setup();
1903        board_cache_error_setup = r4k_cache_error_setup;
1904
1905        /*
1906         * Per-CPU overrides
1907         */
1908        switch (current_cpu_type()) {
1909        case CPU_BMIPS4350:
1910        case CPU_BMIPS4380:
1911                /* No IPI is needed because all CPUs share the same D$ */
1912                flush_data_cache_page = r4k_blast_dcache_page;
1913                break;
1914        case CPU_BMIPS5000:
1915                /* We lose our superpowers if L2 is disabled */
1916                if (c->scache.flags & MIPS_CACHE_NOT_PRESENT)
1917                        break;
1918
1919                /* I$ fills from D$ just by emptying the write buffers */
1920                flush_cache_page = (void *)b5k_instruction_hazard;
1921                flush_cache_range = (void *)b5k_instruction_hazard;
1922                local_flush_data_cache_page = (void *)b5k_instruction_hazard;
1923                flush_data_cache_page = (void *)b5k_instruction_hazard;
1924                flush_icache_range = (void *)b5k_instruction_hazard;
1925                local_flush_icache_range = (void *)b5k_instruction_hazard;
1926
1927
1928                /* Optimization: an L2 flush implicitly flushes the L1 */
1929                current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES;
1930                break;
1931        case CPU_LOONGSON3:
1932                /* Loongson-3 maintains cache coherency by hardware */
1933                __flush_cache_all       = cache_noop;
1934                __flush_cache_vmap      = cache_noop;
1935                __flush_cache_vunmap    = cache_noop;
1936                __flush_kernel_vmap_range = (void *)cache_noop;
1937                flush_cache_mm          = (void *)cache_noop;
1938                flush_cache_page        = (void *)cache_noop;
1939                flush_cache_range       = (void *)cache_noop;
1940                flush_icache_all        = (void *)cache_noop;
1941                flush_data_cache_page   = (void *)cache_noop;
1942                local_flush_data_cache_page     = (void *)cache_noop;
1943                break;
1944        }
1945}
1946
1947static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd,
1948                               void *v)
1949{
1950        switch (cmd) {
1951        case CPU_PM_ENTER_FAILED:
1952        case CPU_PM_EXIT:
1953                coherency_setup();
1954                break;
1955        }
1956
1957        return NOTIFY_OK;
1958}
1959
1960static struct notifier_block r4k_cache_pm_notifier_block = {
1961        .notifier_call = r4k_cache_pm_notifier,
1962};
1963
1964int __init r4k_cache_init_pm(void)
1965{
1966        return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block);
1967}
1968arch_initcall(r4k_cache_init_pm);
1969