linux/arch/mips/mm/tlbex.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Synthesize TLB refill handlers at runtime.
   7 *
   8 * Copyright (C) 2004, 2005, 2006, 2008  Thiemo Seufer
   9 * Copyright (C) 2005, 2007, 2008, 2009  Maciej W. Rozycki
  10 * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
  11 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
  12 * Copyright (C) 2011  MIPS Technologies, Inc.
  13 *
  14 * ... and the days got worse and worse and now you see
  15 * I've gone completly out of my mind.
  16 *
  17 * They're coming to take me a away haha
  18 * they're coming to take me a away hoho hihi haha
  19 * to the funny farm where code is beautiful all the time ...
  20 *
  21 * (Condolences to Napoleon XIV)
  22 */
  23
  24#include <linux/bug.h>
  25#include <linux/kernel.h>
  26#include <linux/types.h>
  27#include <linux/smp.h>
  28#include <linux/string.h>
  29#include <linux/cache.h>
  30
  31#include <asm/cacheflush.h>
  32#include <asm/cpu-type.h>
  33#include <asm/pgtable.h>
  34#include <asm/war.h>
  35#include <asm/uasm.h>
  36#include <asm/setup.h>
  37
  38/*
  39 * TLB load/store/modify handlers.
  40 *
  41 * Only the fastpath gets synthesized at runtime, the slowpath for
  42 * do_page_fault remains normal asm.
  43 */
  44extern void tlb_do_page_fault_0(void);
  45extern void tlb_do_page_fault_1(void);
  46
  47struct work_registers {
  48        int r1;
  49        int r2;
  50        int r3;
  51};
  52
  53struct tlb_reg_save {
  54        unsigned long a;
  55        unsigned long b;
  56} ____cacheline_aligned_in_smp;
  57
  58static struct tlb_reg_save handler_reg_save[NR_CPUS];
  59
  60static inline int r45k_bvahwbug(void)
  61{
  62        /* XXX: We should probe for the presence of this bug, but we don't. */
  63        return 0;
  64}
  65
  66static inline int r4k_250MHZhwbug(void)
  67{
  68        /* XXX: We should probe for the presence of this bug, but we don't. */
  69        return 0;
  70}
  71
  72static inline int __maybe_unused bcm1250_m3_war(void)
  73{
  74        return BCM1250_M3_WAR;
  75}
  76
  77static inline int __maybe_unused r10000_llsc_war(void)
  78{
  79        return R10000_LLSC_WAR;
  80}
  81
  82static int use_bbit_insns(void)
  83{
  84        switch (current_cpu_type()) {
  85        case CPU_CAVIUM_OCTEON:
  86        case CPU_CAVIUM_OCTEON_PLUS:
  87        case CPU_CAVIUM_OCTEON2:
  88        case CPU_CAVIUM_OCTEON3:
  89                return 1;
  90        default:
  91                return 0;
  92        }
  93}
  94
  95static int use_lwx_insns(void)
  96{
  97        switch (current_cpu_type()) {
  98        case CPU_CAVIUM_OCTEON2:
  99        case CPU_CAVIUM_OCTEON3:
 100                return 1;
 101        default:
 102                return 0;
 103        }
 104}
 105#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
 106    CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
 107static bool scratchpad_available(void)
 108{
 109        return true;
 110}
 111static int scratchpad_offset(int i)
 112{
 113        /*
 114         * CVMSEG starts at address -32768 and extends for
 115         * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
 116         */
 117        i += 1; /* Kernel use starts at the top and works down. */
 118        return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
 119}
 120#else
 121static bool scratchpad_available(void)
 122{
 123        return false;
 124}
 125static int scratchpad_offset(int i)
 126{
 127        BUG();
 128        /* Really unreachable, but evidently some GCC want this. */
 129        return 0;
 130}
 131#endif
 132/*
 133 * Found by experiment: At least some revisions of the 4kc throw under
 134 * some circumstances a machine check exception, triggered by invalid
 135 * values in the index register.  Delaying the tlbp instruction until
 136 * after the next branch,  plus adding an additional nop in front of
 137 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
 138 * why; it's not an issue caused by the core RTL.
 139 *
 140 */
 141static int m4kc_tlbp_war(void)
 142{
 143        return (current_cpu_data.processor_id & 0xffff00) ==
 144               (PRID_COMP_MIPS | PRID_IMP_4KC);
 145}
 146
 147/* Handle labels (which must be positive integers). */
 148enum label_id {
 149        label_second_part = 1,
 150        label_leave,
 151        label_vmalloc,
 152        label_vmalloc_done,
 153        label_tlbw_hazard_0,
 154        label_split = label_tlbw_hazard_0 + 8,
 155        label_tlbl_goaround1,
 156        label_tlbl_goaround2,
 157        label_nopage_tlbl,
 158        label_nopage_tlbs,
 159        label_nopage_tlbm,
 160        label_smp_pgtable_change,
 161        label_r3000_write_probe_fail,
 162        label_large_segbits_fault,
 163#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 164        label_tlb_huge_update,
 165#endif
 166};
 167
 168UASM_L_LA(_second_part)
 169UASM_L_LA(_leave)
 170UASM_L_LA(_vmalloc)
 171UASM_L_LA(_vmalloc_done)
 172/* _tlbw_hazard_x is handled differently.  */
 173UASM_L_LA(_split)
 174UASM_L_LA(_tlbl_goaround1)
 175UASM_L_LA(_tlbl_goaround2)
 176UASM_L_LA(_nopage_tlbl)
 177UASM_L_LA(_nopage_tlbs)
 178UASM_L_LA(_nopage_tlbm)
 179UASM_L_LA(_smp_pgtable_change)
 180UASM_L_LA(_r3000_write_probe_fail)
 181UASM_L_LA(_large_segbits_fault)
 182#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 183UASM_L_LA(_tlb_huge_update)
 184#endif
 185
 186static int hazard_instance;
 187
 188static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
 189{
 190        switch (instance) {
 191        case 0 ... 7:
 192                uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
 193                return;
 194        default:
 195                BUG();
 196        }
 197}
 198
 199static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
 200{
 201        switch (instance) {
 202        case 0 ... 7:
 203                uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
 204                break;
 205        default:
 206                BUG();
 207        }
 208}
 209
 210/*
 211 * pgtable bits are assigned dynamically depending on processor feature
 212 * and statically based on kernel configuration.  This spits out the actual
 213 * values the kernel is using.  Required to make sense from disassembled
 214 * TLB exception handlers.
 215 */
 216static void output_pgtable_bits_defines(void)
 217{
 218#define pr_define(fmt, ...)                                     \
 219        pr_debug("#define " fmt, ##__VA_ARGS__)
 220
 221        pr_debug("#include <asm/asm.h>\n");
 222        pr_debug("#include <asm/regdef.h>\n");
 223        pr_debug("\n");
 224
 225        pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
 226        pr_define("_PAGE_READ_SHIFT %d\n", _PAGE_READ_SHIFT);
 227        pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
 228        pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
 229        pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
 230#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 231        pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
 232        pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
 233#endif
 234        if (cpu_has_rixi) {
 235#ifdef _PAGE_NO_EXEC_SHIFT
 236                pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
 237#endif
 238#ifdef _PAGE_NO_READ_SHIFT
 239                pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
 240#endif
 241        }
 242        pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
 243        pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
 244        pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
 245        pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
 246        pr_debug("\n");
 247}
 248
 249static inline void dump_handler(const char *symbol, const u32 *handler, int count)
 250{
 251        int i;
 252
 253        pr_debug("LEAF(%s)\n", symbol);
 254
 255        pr_debug("\t.set push\n");
 256        pr_debug("\t.set noreorder\n");
 257
 258        for (i = 0; i < count; i++)
 259                pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]);
 260
 261        pr_debug("\t.set\tpop\n");
 262
 263        pr_debug("\tEND(%s)\n", symbol);
 264}
 265
 266/* The only general purpose registers allowed in TLB handlers. */
 267#define K0              26
 268#define K1              27
 269
 270/* Some CP0 registers */
 271#define C0_INDEX        0, 0
 272#define C0_ENTRYLO0     2, 0
 273#define C0_TCBIND       2, 2
 274#define C0_ENTRYLO1     3, 0
 275#define C0_CONTEXT      4, 0
 276#define C0_PAGEMASK     5, 0
 277#define C0_BADVADDR     8, 0
 278#define C0_ENTRYHI      10, 0
 279#define C0_EPC          14, 0
 280#define C0_XCONTEXT     20, 0
 281
 282#ifdef CONFIG_64BIT
 283# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
 284#else
 285# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
 286#endif
 287
 288/* The worst case length of the handler is around 18 instructions for
 289 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
 290 * Maximum space available is 32 instructions for R3000 and 64
 291 * instructions for R4000.
 292 *
 293 * We deliberately chose a buffer size of 128, so we won't scribble
 294 * over anything important on overflow before we panic.
 295 */
 296static u32 tlb_handler[128];
 297
 298/* simply assume worst case size for labels and relocs */
 299static struct uasm_label labels[128];
 300static struct uasm_reloc relocs[128];
 301
 302static int check_for_high_segbits;
 303
 304static unsigned int kscratch_used_mask;
 305
 306static inline int __maybe_unused c0_kscratch(void)
 307{
 308        switch (current_cpu_type()) {
 309        case CPU_XLP:
 310        case CPU_XLR:
 311                return 22;
 312        default:
 313                return 31;
 314        }
 315}
 316
 317static int allocate_kscratch(void)
 318{
 319        int r;
 320        unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
 321
 322        r = ffs(a);
 323
 324        if (r == 0)
 325                return -1;
 326
 327        r--; /* make it zero based */
 328
 329        kscratch_used_mask |= (1 << r);
 330
 331        return r;
 332}
 333
 334static int scratch_reg;
 335static int pgd_reg;
 336enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
 337
 338static struct work_registers build_get_work_registers(u32 **p)
 339{
 340        struct work_registers r;
 341
 342        if (scratch_reg >= 0) {
 343                /* Save in CPU local C0_KScratch? */
 344                UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
 345                r.r1 = K0;
 346                r.r2 = K1;
 347                r.r3 = 1;
 348                return r;
 349        }
 350
 351        if (num_possible_cpus() > 1) {
 352                /* Get smp_processor_id */
 353                UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG);
 354                UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);
 355
 356                /* handler_reg_save index in K0 */
 357                UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
 358
 359                UASM_i_LA(p, K1, (long)&handler_reg_save);
 360                UASM_i_ADDU(p, K0, K0, K1);
 361        } else {
 362                UASM_i_LA(p, K0, (long)&handler_reg_save);
 363        }
 364        /* K0 now points to save area, save $1 and $2  */
 365        UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
 366        UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
 367
 368        r.r1 = K1;
 369        r.r2 = 1;
 370        r.r3 = 2;
 371        return r;
 372}
 373
 374static void build_restore_work_registers(u32 **p)
 375{
 376        if (scratch_reg >= 0) {
 377                UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
 378                return;
 379        }
 380        /* K0 already points to save area, restore $1 and $2  */
 381        UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
 382        UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
 383}
 384
 385#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
 386
 387/*
 388 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
 389 * we cannot do r3000 under these circumstances.
 390 *
 391 * Declare pgd_current here instead of including mmu_context.h to avoid type
 392 * conflicts for tlbmiss_handler_setup_pgd
 393 */
 394extern unsigned long pgd_current[];
 395
 396/*
 397 * The R3000 TLB handler is simple.
 398 */
 399static void build_r3000_tlb_refill_handler(void)
 400{
 401        long pgdc = (long)pgd_current;
 402        u32 *p;
 403
 404        memset(tlb_handler, 0, sizeof(tlb_handler));
 405        p = tlb_handler;
 406
 407        uasm_i_mfc0(&p, K0, C0_BADVADDR);
 408        uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
 409        uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
 410        uasm_i_srl(&p, K0, K0, 22); /* load delay */
 411        uasm_i_sll(&p, K0, K0, 2);
 412        uasm_i_addu(&p, K1, K1, K0);
 413        uasm_i_mfc0(&p, K0, C0_CONTEXT);
 414        uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
 415        uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
 416        uasm_i_addu(&p, K1, K1, K0);
 417        uasm_i_lw(&p, K0, 0, K1);
 418        uasm_i_nop(&p); /* load delay */
 419        uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
 420        uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
 421        uasm_i_tlbwr(&p); /* cp0 delay */
 422        uasm_i_jr(&p, K1);
 423        uasm_i_rfe(&p); /* branch delay */
 424
 425        if (p > tlb_handler + 32)
 426                panic("TLB refill handler space exceeded");
 427
 428        pr_debug("Wrote TLB refill handler (%u instructions).\n",
 429                 (unsigned int)(p - tlb_handler));
 430
 431        memcpy((void *)ebase, tlb_handler, 0x80);
 432
 433        dump_handler("r3000_tlb_refill", (u32 *)ebase, 32);
 434}
 435#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
 436
 437/*
 438 * The R4000 TLB handler is much more complicated. We have two
 439 * consecutive handler areas with 32 instructions space each.
 440 * Since they aren't used at the same time, we can overflow in the
 441 * other one.To keep things simple, we first assume linear space,
 442 * then we relocate it to the final handler layout as needed.
 443 */
 444static u32 final_handler[64];
 445
 446/*
 447 * Hazards
 448 *
 449 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
 450 * 2. A timing hazard exists for the TLBP instruction.
 451 *
 452 *      stalling_instruction
 453 *      TLBP
 454 *
 455 * The JTLB is being read for the TLBP throughout the stall generated by the
 456 * previous instruction. This is not really correct as the stalling instruction
 457 * can modify the address used to access the JTLB.  The failure symptom is that
 458 * the TLBP instruction will use an address created for the stalling instruction
 459 * and not the address held in C0_ENHI and thus report the wrong results.
 460 *
 461 * The software work-around is to not allow the instruction preceding the TLBP
 462 * to stall - make it an NOP or some other instruction guaranteed not to stall.
 463 *
 464 * Errata 2 will not be fixed.  This errata is also on the R5000.
 465 *
 466 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
 467 */
 468static void __maybe_unused build_tlb_probe_entry(u32 **p)
 469{
 470        switch (current_cpu_type()) {
 471        /* Found by experiment: R4600 v2.0/R4700 needs this, too.  */
 472        case CPU_R4600:
 473        case CPU_R4700:
 474        case CPU_R5000:
 475        case CPU_NEVADA:
 476                uasm_i_nop(p);
 477                uasm_i_tlbp(p);
 478                break;
 479
 480        default:
 481                uasm_i_tlbp(p);
 482                break;
 483        }
 484}
 485
 486/*
 487 * Write random or indexed TLB entry, and care about the hazards from
 488 * the preceding mtc0 and for the following eret.
 489 */
 490enum tlb_write_entry { tlb_random, tlb_indexed };
 491
 492static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
 493                                  struct uasm_reloc **r,
 494                                  enum tlb_write_entry wmode)
 495{
 496        void(*tlbw)(u32 **) = NULL;
 497
 498        switch (wmode) {
 499        case tlb_random: tlbw = uasm_i_tlbwr; break;
 500        case tlb_indexed: tlbw = uasm_i_tlbwi; break;
 501        }
 502
 503        if (cpu_has_mips_r2) {
 504                /*
 505                 * The architecture spec says an ehb is required here,
 506                 * but a number of cores do not have the hazard and
 507                 * using an ehb causes an expensive pipeline stall.
 508                 */
 509                switch (current_cpu_type()) {
 510                case CPU_M14KC:
 511                case CPU_74K:
 512                case CPU_1074K:
 513                case CPU_PROAPTIV:
 514                case CPU_P5600:
 515                case CPU_M5150:
 516                        break;
 517
 518                default:
 519                        uasm_i_ehb(p);
 520                        break;
 521                }
 522                tlbw(p);
 523                return;
 524        }
 525
 526        switch (current_cpu_type()) {
 527        case CPU_R4000PC:
 528        case CPU_R4000SC:
 529        case CPU_R4000MC:
 530        case CPU_R4400PC:
 531        case CPU_R4400SC:
 532        case CPU_R4400MC:
 533                /*
 534                 * This branch uses up a mtc0 hazard nop slot and saves
 535                 * two nops after the tlbw instruction.
 536                 */
 537                uasm_bgezl_hazard(p, r, hazard_instance);
 538                tlbw(p);
 539                uasm_bgezl_label(l, p, hazard_instance);
 540                hazard_instance++;
 541                uasm_i_nop(p);
 542                break;
 543
 544        case CPU_R4600:
 545        case CPU_R4700:
 546                uasm_i_nop(p);
 547                tlbw(p);
 548                uasm_i_nop(p);
 549                break;
 550
 551        case CPU_R5000:
 552        case CPU_NEVADA:
 553                uasm_i_nop(p); /* QED specifies 2 nops hazard */
 554                uasm_i_nop(p); /* QED specifies 2 nops hazard */
 555                tlbw(p);
 556                break;
 557
 558        case CPU_R4300:
 559        case CPU_5KC:
 560        case CPU_TX49XX:
 561        case CPU_PR4450:
 562        case CPU_XLR:
 563                uasm_i_nop(p);
 564                tlbw(p);
 565                break;
 566
 567        case CPU_R10000:
 568        case CPU_R12000:
 569        case CPU_R14000:
 570        case CPU_4KC:
 571        case CPU_4KEC:
 572        case CPU_M14KC:
 573        case CPU_M14KEC:
 574        case CPU_SB1:
 575        case CPU_SB1A:
 576        case CPU_4KSC:
 577        case CPU_20KC:
 578        case CPU_25KF:
 579        case CPU_BMIPS32:
 580        case CPU_BMIPS3300:
 581        case CPU_BMIPS4350:
 582        case CPU_BMIPS4380:
 583        case CPU_BMIPS5000:
 584        case CPU_LOONGSON2:
 585        case CPU_LOONGSON3:
 586        case CPU_R5500:
 587                if (m4kc_tlbp_war())
 588                        uasm_i_nop(p);
 589        case CPU_ALCHEMY:
 590                tlbw(p);
 591                break;
 592
 593        case CPU_RM7000:
 594                uasm_i_nop(p);
 595                uasm_i_nop(p);
 596                uasm_i_nop(p);
 597                uasm_i_nop(p);
 598                tlbw(p);
 599                break;
 600
 601        case CPU_VR4111:
 602        case CPU_VR4121:
 603        case CPU_VR4122:
 604        case CPU_VR4181:
 605        case CPU_VR4181A:
 606                uasm_i_nop(p);
 607                uasm_i_nop(p);
 608                tlbw(p);
 609                uasm_i_nop(p);
 610                uasm_i_nop(p);
 611                break;
 612
 613        case CPU_VR4131:
 614        case CPU_VR4133:
 615        case CPU_R5432:
 616                uasm_i_nop(p);
 617                uasm_i_nop(p);
 618                tlbw(p);
 619                break;
 620
 621        case CPU_JZRISC:
 622                tlbw(p);
 623                uasm_i_nop(p);
 624                break;
 625
 626        default:
 627                panic("No TLB refill handler yet (CPU type: %d)",
 628                      current_cpu_type());
 629                break;
 630        }
 631}
 632
 633static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
 634                                                        unsigned int reg)
 635{
 636        if (cpu_has_rixi) {
 637                UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
 638        } else {
 639#ifdef CONFIG_64BIT_PHYS_ADDR
 640                uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
 641#else
 642                UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
 643#endif
 644        }
 645}
 646
 647#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 648
 649static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
 650                                   unsigned int tmp, enum label_id lid,
 651                                   int restore_scratch)
 652{
 653        if (restore_scratch) {
 654                /* Reset default page size */
 655                if (PM_DEFAULT_MASK >> 16) {
 656                        uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
 657                        uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
 658                        uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 659                        uasm_il_b(p, r, lid);
 660                } else if (PM_DEFAULT_MASK) {
 661                        uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
 662                        uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 663                        uasm_il_b(p, r, lid);
 664                } else {
 665                        uasm_i_mtc0(p, 0, C0_PAGEMASK);
 666                        uasm_il_b(p, r, lid);
 667                }
 668                if (scratch_reg >= 0)
 669                        UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
 670                else
 671                        UASM_i_LW(p, 1, scratchpad_offset(0), 0);
 672        } else {
 673                /* Reset default page size */
 674                if (PM_DEFAULT_MASK >> 16) {
 675                        uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
 676                        uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
 677                        uasm_il_b(p, r, lid);
 678                        uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 679                } else if (PM_DEFAULT_MASK) {
 680                        uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
 681                        uasm_il_b(p, r, lid);
 682                        uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 683                } else {
 684                        uasm_il_b(p, r, lid);
 685                        uasm_i_mtc0(p, 0, C0_PAGEMASK);
 686                }
 687        }
 688}
 689
 690static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
 691                                       struct uasm_reloc **r,
 692                                       unsigned int tmp,
 693                                       enum tlb_write_entry wmode,
 694                                       int restore_scratch)
 695{
 696        /* Set huge page tlb entry size */
 697        uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
 698        uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
 699        uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 700
 701        build_tlb_write_entry(p, l, r, wmode);
 702
 703        build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
 704}
 705
 706/*
 707 * Check if Huge PTE is present, if so then jump to LABEL.
 708 */
 709static void
 710build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
 711                  unsigned int pmd, int lid)
 712{
 713        UASM_i_LW(p, tmp, 0, pmd);
 714        if (use_bbit_insns()) {
 715                uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
 716        } else {
 717                uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
 718                uasm_il_bnez(p, r, tmp, lid);
 719        }
 720}
 721
 722static void build_huge_update_entries(u32 **p, unsigned int pte,
 723                                      unsigned int tmp)
 724{
 725        int small_sequence;
 726
 727        /*
 728         * A huge PTE describes an area the size of the
 729         * configured huge page size. This is twice the
 730         * of the large TLB entry size we intend to use.
 731         * A TLB entry half the size of the configured
 732         * huge page size is configured into entrylo0
 733         * and entrylo1 to cover the contiguous huge PTE
 734         * address space.
 735         */
 736        small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
 737
 738        /* We can clobber tmp.  It isn't used after this.*/
 739        if (!small_sequence)
 740                uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
 741
 742        build_convert_pte_to_entrylo(p, pte);
 743        UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
 744        /* convert to entrylo1 */
 745        if (small_sequence)
 746                UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
 747        else
 748                UASM_i_ADDU(p, pte, pte, tmp);
 749
 750        UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
 751}
 752
 753static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
 754                                    struct uasm_label **l,
 755                                    unsigned int pte,
 756                                    unsigned int ptr)
 757{
 758#ifdef CONFIG_SMP
 759        UASM_i_SC(p, pte, 0, ptr);
 760        uasm_il_beqz(p, r, pte, label_tlb_huge_update);
 761        UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
 762#else
 763        UASM_i_SW(p, pte, 0, ptr);
 764#endif
 765        build_huge_update_entries(p, pte, ptr);
 766        build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
 767}
 768#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
 769
 770#ifdef CONFIG_64BIT
 771/*
 772 * TMP and PTR are scratch.
 773 * TMP will be clobbered, PTR will hold the pmd entry.
 774 */
 775static void
 776build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 777                 unsigned int tmp, unsigned int ptr)
 778{
 779#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
 780        long pgdc = (long)pgd_current;
 781#endif
 782        /*
 783         * The vmalloc handling is not in the hotpath.
 784         */
 785        uasm_i_dmfc0(p, tmp, C0_BADVADDR);
 786
 787        if (check_for_high_segbits) {
 788                /*
 789                 * The kernel currently implicitely assumes that the
 790                 * MIPS SEGBITS parameter for the processor is
 791                 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
 792                 * allocate virtual addresses outside the maximum
 793                 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
 794                 * that doesn't prevent user code from accessing the
 795                 * higher xuseg addresses.  Here, we make sure that
 796                 * everything but the lower xuseg addresses goes down
 797                 * the module_alloc/vmalloc path.
 798                 */
 799                uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
 800                uasm_il_bnez(p, r, ptr, label_vmalloc);
 801        } else {
 802                uasm_il_bltz(p, r, tmp, label_vmalloc);
 803        }
 804        /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
 805
 806        if (pgd_reg != -1) {
 807                /* pgd is in pgd_reg */
 808                UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
 809        } else {
 810#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
 811                /*
 812                 * &pgd << 11 stored in CONTEXT [23..63].
 813                 */
 814                UASM_i_MFC0(p, ptr, C0_CONTEXT);
 815
 816                /* Clear lower 23 bits of context. */
 817                uasm_i_dins(p, ptr, 0, 0, 23);
 818
 819                /* 1 0  1 0 1  << 6  xkphys cached */
 820                uasm_i_ori(p, ptr, ptr, 0x540);
 821                uasm_i_drotr(p, ptr, ptr, 11);
 822#elif defined(CONFIG_SMP)
 823                UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
 824                uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
 825                UASM_i_LA_mostly(p, tmp, pgdc);
 826                uasm_i_daddu(p, ptr, ptr, tmp);
 827                uasm_i_dmfc0(p, tmp, C0_BADVADDR);
 828                uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
 829#else
 830                UASM_i_LA_mostly(p, ptr, pgdc);
 831                uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
 832#endif
 833        }
 834
 835        uasm_l_vmalloc_done(l, *p);
 836
 837        /* get pgd offset in bytes */
 838        uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
 839
 840        uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
 841        uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
 842#ifndef __PAGETABLE_PMD_FOLDED
 843        uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
 844        uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
 845        uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
 846        uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
 847        uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
 848#endif
 849}
 850
 851/*
 852 * BVADDR is the faulting address, PTR is scratch.
 853 * PTR will hold the pgd for vmalloc.
 854 */
 855static void
 856build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 857                        unsigned int bvaddr, unsigned int ptr,
 858                        enum vmalloc64_mode mode)
 859{
 860        long swpd = (long)swapper_pg_dir;
 861        int single_insn_swpd;
 862        int did_vmalloc_branch = 0;
 863
 864        single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
 865
 866        uasm_l_vmalloc(l, *p);
 867
 868        if (mode != not_refill && check_for_high_segbits) {
 869                if (single_insn_swpd) {
 870                        uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
 871                        uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
 872                        did_vmalloc_branch = 1;
 873                        /* fall through */
 874                } else {
 875                        uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
 876                }
 877        }
 878        if (!did_vmalloc_branch) {
 879                if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
 880                        uasm_il_b(p, r, label_vmalloc_done);
 881                        uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
 882                } else {
 883                        UASM_i_LA_mostly(p, ptr, swpd);
 884                        uasm_il_b(p, r, label_vmalloc_done);
 885                        if (uasm_in_compat_space_p(swpd))
 886                                uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
 887                        else
 888                                uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
 889                }
 890        }
 891        if (mode != not_refill && check_for_high_segbits) {
 892                uasm_l_large_segbits_fault(l, *p);
 893                /*
 894                 * We get here if we are an xsseg address, or if we are
 895                 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
 896                 *
 897                 * Ignoring xsseg (assume disabled so would generate
 898                 * (address errors?), the only remaining possibility
 899                 * is the upper xuseg addresses.  On processors with
 900                 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
 901                 * addresses would have taken an address error. We try
 902                 * to mimic that here by taking a load/istream page
 903                 * fault.
 904                 */
 905                UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
 906                uasm_i_jr(p, ptr);
 907
 908                if (mode == refill_scratch) {
 909                        if (scratch_reg >= 0)
 910                                UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
 911                        else
 912                                UASM_i_LW(p, 1, scratchpad_offset(0), 0);
 913                } else {
 914                        uasm_i_nop(p);
 915                }
 916        }
 917}
 918
 919#else /* !CONFIG_64BIT */
 920
 921/*
 922 * TMP and PTR are scratch.
 923 * TMP will be clobbered, PTR will hold the pgd entry.
 924 */
 925static void __maybe_unused
 926build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
 927{
 928        if (pgd_reg != -1) {
 929                /* pgd is in pgd_reg */
 930                uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg);
 931                uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
 932        } else {
 933                long pgdc = (long)pgd_current;
 934
 935                /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
 936#ifdef CONFIG_SMP
 937                uasm_i_mfc0(p, ptr, SMP_CPUID_REG);
 938                UASM_i_LA_mostly(p, tmp, pgdc);
 939                uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
 940                uasm_i_addu(p, ptr, tmp, ptr);
 941#else
 942                UASM_i_LA_mostly(p, ptr, pgdc);
 943#endif
 944                uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
 945                uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
 946        }
 947        uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
 948        uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
 949        uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
 950}
 951
 952#endif /* !CONFIG_64BIT */
 953
 954static void build_adjust_context(u32 **p, unsigned int ctx)
 955{
 956        unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
 957        unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
 958
 959        switch (current_cpu_type()) {
 960        case CPU_VR41XX:
 961        case CPU_VR4111:
 962        case CPU_VR4121:
 963        case CPU_VR4122:
 964        case CPU_VR4131:
 965        case CPU_VR4181:
 966        case CPU_VR4181A:
 967        case CPU_VR4133:
 968                shift += 2;
 969                break;
 970
 971        default:
 972                break;
 973        }
 974
 975        if (shift)
 976                UASM_i_SRL(p, ctx, ctx, shift);
 977        uasm_i_andi(p, ctx, ctx, mask);
 978}
 979
 980static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
 981{
 982        /*
 983         * Bug workaround for the Nevada. It seems as if under certain
 984         * circumstances the move from cp0_context might produce a
 985         * bogus result when the mfc0 instruction and its consumer are
 986         * in a different cacheline or a load instruction, probably any
 987         * memory reference, is between them.
 988         */
 989        switch (current_cpu_type()) {
 990        case CPU_NEVADA:
 991                UASM_i_LW(p, ptr, 0, ptr);
 992                GET_CONTEXT(p, tmp); /* get context reg */
 993                break;
 994
 995        default:
 996                GET_CONTEXT(p, tmp); /* get context reg */
 997                UASM_i_LW(p, ptr, 0, ptr);
 998                break;
 999        }
1000
1001        build_adjust_context(p, tmp);
1002        UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
1003}
1004
1005static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
1006{
1007        /*
1008         * 64bit address support (36bit on a 32bit CPU) in a 32bit
1009         * Kernel is a special case. Only a few CPUs use it.
1010         */
1011#ifdef CONFIG_64BIT_PHYS_ADDR
1012        if (cpu_has_64bits) {
1013                uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
1014                uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
1015                if (cpu_has_rixi) {
1016                        UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1017                        UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1018                        UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
1019                } else {
1020                        uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
1021                        UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1022                        uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
1023                }
1024                UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
1025        } else {
1026                int pte_off_even = sizeof(pte_t) / 2;
1027                int pte_off_odd = pte_off_even + sizeof(pte_t);
1028
1029                /* The pte entries are pre-shifted */
1030                uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
1031                UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1032                uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
1033                UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
1034        }
1035#else
1036        UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
1037        UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
1038        if (r45k_bvahwbug())
1039                build_tlb_probe_entry(p);
1040        if (cpu_has_rixi) {
1041                UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1042                if (r4k_250MHZhwbug())
1043                        UASM_i_MTC0(p, 0, C0_ENTRYLO0);
1044                UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1045                UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
1046        } else {
1047                UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
1048                if (r4k_250MHZhwbug())
1049                        UASM_i_MTC0(p, 0, C0_ENTRYLO0);
1050                UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1051                UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
1052                if (r45k_bvahwbug())
1053                        uasm_i_mfc0(p, tmp, C0_INDEX);
1054        }
1055        if (r4k_250MHZhwbug())
1056                UASM_i_MTC0(p, 0, C0_ENTRYLO1);
1057        UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
1058#endif
1059}
1060
1061struct mips_huge_tlb_info {
1062        int huge_pte;
1063        int restore_scratch;
1064};
1065
1066static struct mips_huge_tlb_info
1067build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1068                               struct uasm_reloc **r, unsigned int tmp,
1069                               unsigned int ptr, int c0_scratch_reg)
1070{
1071        struct mips_huge_tlb_info rv;
1072        unsigned int even, odd;
1073        int vmalloc_branch_delay_filled = 0;
1074        const int scratch = 1; /* Our extra working register */
1075
1076        rv.huge_pte = scratch;
1077        rv.restore_scratch = 0;
1078
1079        if (check_for_high_segbits) {
1080                UASM_i_MFC0(p, tmp, C0_BADVADDR);
1081
1082                if (pgd_reg != -1)
1083                        UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
1084                else
1085                        UASM_i_MFC0(p, ptr, C0_CONTEXT);
1086
1087                if (c0_scratch_reg >= 0)
1088                        UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1089                else
1090                        UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1091
1092                uasm_i_dsrl_safe(p, scratch, tmp,
1093                                 PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
1094                uasm_il_bnez(p, r, scratch, label_vmalloc);
1095
1096                if (pgd_reg == -1) {
1097                        vmalloc_branch_delay_filled = 1;
1098                        /* Clear lower 23 bits of context. */
1099                        uasm_i_dins(p, ptr, 0, 0, 23);
1100                }
1101        } else {
1102                if (pgd_reg != -1)
1103                        UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
1104                else
1105                        UASM_i_MFC0(p, ptr, C0_CONTEXT);
1106
1107                UASM_i_MFC0(p, tmp, C0_BADVADDR);
1108
1109                if (c0_scratch_reg >= 0)
1110                        UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1111                else
1112                        UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1113
1114                if (pgd_reg == -1)
1115                        /* Clear lower 23 bits of context. */
1116                        uasm_i_dins(p, ptr, 0, 0, 23);
1117
1118                uasm_il_bltz(p, r, tmp, label_vmalloc);
1119        }
1120
1121        if (pgd_reg == -1) {
1122                vmalloc_branch_delay_filled = 1;
1123                /* 1 0  1 0 1  << 6  xkphys cached */
1124                uasm_i_ori(p, ptr, ptr, 0x540);
1125                uasm_i_drotr(p, ptr, ptr, 11);
1126        }
1127
1128#ifdef __PAGETABLE_PMD_FOLDED
1129#define LOC_PTEP scratch
1130#else
1131#define LOC_PTEP ptr
1132#endif
1133
1134        if (!vmalloc_branch_delay_filled)
1135                /* get pgd offset in bytes */
1136                uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1137
1138        uasm_l_vmalloc_done(l, *p);
1139
1140        /*
1141         *                         tmp          ptr
1142         * fall-through case =   badvaddr  *pgd_current
1143         * vmalloc case      =   badvaddr  swapper_pg_dir
1144         */
1145
1146        if (vmalloc_branch_delay_filled)
1147                /* get pgd offset in bytes */
1148                uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1149
1150#ifdef __PAGETABLE_PMD_FOLDED
1151        GET_CONTEXT(p, tmp); /* get context reg */
1152#endif
1153        uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
1154
1155        if (use_lwx_insns()) {
1156                UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
1157        } else {
1158                uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
1159                uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
1160        }
1161
1162#ifndef __PAGETABLE_PMD_FOLDED
1163        /* get pmd offset in bytes */
1164        uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
1165        uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
1166        GET_CONTEXT(p, tmp); /* get context reg */
1167
1168        if (use_lwx_insns()) {
1169                UASM_i_LWX(p, scratch, scratch, ptr);
1170        } else {
1171                uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
1172                UASM_i_LW(p, scratch, 0, ptr);
1173        }
1174#endif
1175        /* Adjust the context during the load latency. */
1176        build_adjust_context(p, tmp);
1177
1178#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1179        uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
1180        /*
1181         * The in the LWX case we don't want to do the load in the
1182         * delay slot.  It cannot issue in the same cycle and may be
1183         * speculative and unneeded.
1184         */
1185        if (use_lwx_insns())
1186                uasm_i_nop(p);
1187#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
1188
1189
1190        /* build_update_entries */
1191        if (use_lwx_insns()) {
1192                even = ptr;
1193                odd = tmp;
1194                UASM_i_LWX(p, even, scratch, tmp);
1195                UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
1196                UASM_i_LWX(p, odd, scratch, tmp);
1197        } else {
1198                UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
1199                even = tmp;
1200                odd = ptr;
1201                UASM_i_LW(p, even, 0, ptr); /* get even pte */
1202                UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
1203        }
1204        if (cpu_has_rixi) {
1205                uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL));
1206                UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1207                uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL));
1208        } else {
1209                uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
1210                UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1211                uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
1212        }
1213        UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
1214
1215        if (c0_scratch_reg >= 0) {
1216                UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1217                build_tlb_write_entry(p, l, r, tlb_random);
1218                uasm_l_leave(l, *p);
1219                rv.restore_scratch = 1;
1220        } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13)  {
1221                build_tlb_write_entry(p, l, r, tlb_random);
1222                uasm_l_leave(l, *p);
1223                UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1224        } else {
1225                UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1226                build_tlb_write_entry(p, l, r, tlb_random);
1227                uasm_l_leave(l, *p);
1228                rv.restore_scratch = 1;
1229        }
1230
1231        uasm_i_eret(p); /* return from trap */
1232
1233        return rv;
1234}
1235
1236/*
1237 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
1238 * because EXL == 0.  If we wrap, we can also use the 32 instruction
1239 * slots before the XTLB refill exception handler which belong to the
1240 * unused TLB refill exception.
1241 */
1242#define MIPS64_REFILL_INSNS 32
1243
1244static void build_r4000_tlb_refill_handler(void)
1245{
1246        u32 *p = tlb_handler;
1247        struct uasm_label *l = labels;
1248        struct uasm_reloc *r = relocs;
1249        u32 *f;
1250        unsigned int final_len;
1251        struct mips_huge_tlb_info htlb_info __maybe_unused;
1252        enum vmalloc64_mode vmalloc_mode __maybe_unused;
1253
1254        memset(tlb_handler, 0, sizeof(tlb_handler));
1255        memset(labels, 0, sizeof(labels));
1256        memset(relocs, 0, sizeof(relocs));
1257        memset(final_handler, 0, sizeof(final_handler));
1258
1259        if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) {
1260                htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
1261                                                          scratch_reg);
1262                vmalloc_mode = refill_scratch;
1263        } else {
1264                htlb_info.huge_pte = K0;
1265                htlb_info.restore_scratch = 0;
1266                vmalloc_mode = refill_noscratch;
1267                /*
1268                 * create the plain linear handler
1269                 */
1270                if (bcm1250_m3_war()) {
1271                        unsigned int segbits = 44;
1272
1273                        uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1274                        uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1275                        uasm_i_xor(&p, K0, K0, K1);
1276                        uasm_i_dsrl_safe(&p, K1, K0, 62);
1277                        uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1278                        uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1279                        uasm_i_or(&p, K0, K0, K1);
1280                        uasm_il_bnez(&p, &r, K0, label_leave);
1281                        /* No need for uasm_i_nop */
1282                }
1283
1284#ifdef CONFIG_64BIT
1285                build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
1286#else
1287                build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
1288#endif
1289
1290#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1291                build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
1292#endif
1293
1294                build_get_ptep(&p, K0, K1);
1295                build_update_entries(&p, K0, K1);
1296                build_tlb_write_entry(&p, &l, &r, tlb_random);
1297                uasm_l_leave(&l, p);
1298                uasm_i_eret(&p); /* return from trap */
1299        }
1300#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1301        uasm_l_tlb_huge_update(&l, p);
1302        build_huge_update_entries(&p, htlb_info.huge_pte, K1);
1303        build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
1304                                   htlb_info.restore_scratch);
1305#endif
1306
1307#ifdef CONFIG_64BIT
1308        build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
1309#endif
1310
1311        /*
1312         * Overflow check: For the 64bit handler, we need at least one
1313         * free instruction slot for the wrap-around branch. In worst
1314         * case, if the intended insertion point is a delay slot, we
1315         * need three, with the second nop'ed and the third being
1316         * unused.
1317         */
1318        switch (boot_cpu_type()) {
1319        default:
1320                if (sizeof(long) == 4) {
1321        case CPU_LOONGSON2:
1322                /* Loongson2 ebase is different than r4k, we have more space */
1323                        if ((p - tlb_handler) > 64)
1324                                panic("TLB refill handler space exceeded");
1325                        /*
1326                         * Now fold the handler in the TLB refill handler space.
1327                         */
1328                        f = final_handler;
1329                        /* Simplest case, just copy the handler. */
1330                        uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1331                        final_len = p - tlb_handler;
1332                        break;
1333                } else {
1334                        if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
1335                            || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
1336                                && uasm_insn_has_bdelay(relocs,
1337                                                        tlb_handler + MIPS64_REFILL_INSNS - 3)))
1338                                panic("TLB refill handler space exceeded");
1339                        /*
1340                         * Now fold the handler in the TLB refill handler space.
1341                         */
1342                        f = final_handler + MIPS64_REFILL_INSNS;
1343                        if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
1344                                /* Just copy the handler. */
1345                                uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1346                                final_len = p - tlb_handler;
1347                        } else {
1348#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1349                                const enum label_id ls = label_tlb_huge_update;
1350#else
1351                                const enum label_id ls = label_vmalloc;
1352#endif
1353                                u32 *split;
1354                                int ov = 0;
1355                                int i;
1356
1357                                for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
1358                                        ;
1359                                BUG_ON(i == ARRAY_SIZE(labels));
1360                                split = labels[i].addr;
1361
1362                                /*
1363                                 * See if we have overflown one way or the other.
1364                                 */
1365                                if (split > tlb_handler + MIPS64_REFILL_INSNS ||
1366                                    split < p - MIPS64_REFILL_INSNS)
1367                                        ov = 1;
1368
1369                                if (ov) {
1370                                        /*
1371                                         * Split two instructions before the end.  One
1372                                         * for the branch and one for the instruction
1373                                         * in the delay slot.
1374                                         */
1375                                        split = tlb_handler + MIPS64_REFILL_INSNS - 2;
1376
1377                                        /*
1378                                         * If the branch would fall in a delay slot,
1379                                         * we must back up an additional instruction
1380                                         * so that it is no longer in a delay slot.
1381                                         */
1382                                        if (uasm_insn_has_bdelay(relocs, split - 1))
1383                                                split--;
1384                                }
1385                                /* Copy first part of the handler. */
1386                                uasm_copy_handler(relocs, labels, tlb_handler, split, f);
1387                                f += split - tlb_handler;
1388
1389                                if (ov) {
1390                                        /* Insert branch. */
1391                                        uasm_l_split(&l, final_handler);
1392                                        uasm_il_b(&f, &r, label_split);
1393                                        if (uasm_insn_has_bdelay(relocs, split))
1394                                                uasm_i_nop(&f);
1395                                        else {
1396                                                uasm_copy_handler(relocs, labels,
1397                                                                  split, split + 1, f);
1398                                                uasm_move_labels(labels, f, f + 1, -1);
1399                                                f++;
1400                                                split++;
1401                                        }
1402                                }
1403
1404                                /* Copy the rest of the handler. */
1405                                uasm_copy_handler(relocs, labels, split, p, final_handler);
1406                                final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
1407                                            (p - split);
1408                        }
1409                }
1410                break;
1411        }
1412
1413        uasm_resolve_relocs(relocs, labels);
1414        pr_debug("Wrote TLB refill handler (%u instructions).\n",
1415                 final_len);
1416
1417        memcpy((void *)ebase, final_handler, 0x100);
1418
1419        dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
1420}
1421
1422extern u32 handle_tlbl[], handle_tlbl_end[];
1423extern u32 handle_tlbs[], handle_tlbs_end[];
1424extern u32 handle_tlbm[], handle_tlbm_end[];
1425extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[];
1426extern u32 tlbmiss_handler_setup_pgd_end[];
1427
1428static void build_setup_pgd(void)
1429{
1430        const int a0 = 4;
1431        const int __maybe_unused a1 = 5;
1432        const int __maybe_unused a2 = 6;
1433        u32 *p = tlbmiss_handler_setup_pgd_start;
1434        const int tlbmiss_handler_setup_pgd_size =
1435                tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd_start;
1436#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1437        long pgdc = (long)pgd_current;
1438#endif
1439
1440        memset(tlbmiss_handler_setup_pgd, 0, tlbmiss_handler_setup_pgd_size *
1441                                        sizeof(tlbmiss_handler_setup_pgd[0]));
1442        memset(labels, 0, sizeof(labels));
1443        memset(relocs, 0, sizeof(relocs));
1444        pgd_reg = allocate_kscratch();
1445#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1446        if (pgd_reg == -1) {
1447                struct uasm_label *l = labels;
1448                struct uasm_reloc *r = relocs;
1449
1450                /* PGD << 11 in c0_Context */
1451                /*
1452                 * If it is a ckseg0 address, convert to a physical
1453                 * address.  Shifting right by 29 and adding 4 will
1454                 * result in zero for these addresses.
1455                 *
1456                 */
1457                UASM_i_SRA(&p, a1, a0, 29);
1458                UASM_i_ADDIU(&p, a1, a1, 4);
1459                uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
1460                uasm_i_nop(&p);
1461                uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
1462                uasm_l_tlbl_goaround1(&l, p);
1463                UASM_i_SLL(&p, a0, a0, 11);
1464                uasm_i_jr(&p, 31);
1465                UASM_i_MTC0(&p, a0, C0_CONTEXT);
1466        } else {
1467                /* PGD in c0_KScratch */
1468                uasm_i_jr(&p, 31);
1469                UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
1470        }
1471#else
1472#ifdef CONFIG_SMP
1473        /* Save PGD to pgd_current[smp_processor_id()] */
1474        UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG);
1475        UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT);
1476        UASM_i_LA_mostly(&p, a2, pgdc);
1477        UASM_i_ADDU(&p, a2, a2, a1);
1478        UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
1479#else
1480        UASM_i_LA_mostly(&p, a2, pgdc);
1481        UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
1482#endif /* SMP */
1483        uasm_i_jr(&p, 31);
1484
1485        /* if pgd_reg is allocated, save PGD also to scratch register */
1486        if (pgd_reg != -1)
1487                UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
1488        else
1489                uasm_i_nop(&p);
1490#endif
1491        if (p >= tlbmiss_handler_setup_pgd_end)
1492                panic("tlbmiss_handler_setup_pgd space exceeded");
1493
1494        uasm_resolve_relocs(relocs, labels);
1495        pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
1496                 (unsigned int)(p - tlbmiss_handler_setup_pgd));
1497
1498        dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,
1499                                        tlbmiss_handler_setup_pgd_size);
1500}
1501
1502static void
1503iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
1504{
1505#ifdef CONFIG_SMP
1506# ifdef CONFIG_64BIT_PHYS_ADDR
1507        if (cpu_has_64bits)
1508                uasm_i_lld(p, pte, 0, ptr);
1509        else
1510# endif
1511                UASM_i_LL(p, pte, 0, ptr);
1512#else
1513# ifdef CONFIG_64BIT_PHYS_ADDR
1514        if (cpu_has_64bits)
1515                uasm_i_ld(p, pte, 0, ptr);
1516        else
1517# endif
1518                UASM_i_LW(p, pte, 0, ptr);
1519#endif
1520}
1521
1522static void
1523iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1524        unsigned int mode)
1525{
1526#ifdef CONFIG_64BIT_PHYS_ADDR
1527        unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
1528#endif
1529
1530        uasm_i_ori(p, pte, pte, mode);
1531#ifdef CONFIG_SMP
1532# ifdef CONFIG_64BIT_PHYS_ADDR
1533        if (cpu_has_64bits)
1534                uasm_i_scd(p, pte, 0, ptr);
1535        else
1536# endif
1537                UASM_i_SC(p, pte, 0, ptr);
1538
1539        if (r10000_llsc_war())
1540                uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
1541        else
1542                uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1543
1544# ifdef CONFIG_64BIT_PHYS_ADDR
1545        if (!cpu_has_64bits) {
1546                /* no uasm_i_nop needed */
1547                uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
1548                uasm_i_ori(p, pte, pte, hwmode);
1549                uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
1550                uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1551                /* no uasm_i_nop needed */
1552                uasm_i_lw(p, pte, 0, ptr);
1553        } else
1554                uasm_i_nop(p);
1555# else
1556        uasm_i_nop(p);
1557# endif
1558#else
1559# ifdef CONFIG_64BIT_PHYS_ADDR
1560        if (cpu_has_64bits)
1561                uasm_i_sd(p, pte, 0, ptr);
1562        else
1563# endif
1564                UASM_i_SW(p, pte, 0, ptr);
1565
1566# ifdef CONFIG_64BIT_PHYS_ADDR
1567        if (!cpu_has_64bits) {
1568                uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
1569                uasm_i_ori(p, pte, pte, hwmode);
1570                uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
1571                uasm_i_lw(p, pte, 0, ptr);
1572        }
1573# endif
1574#endif
1575}
1576
1577/*
1578 * Check if PTE is present, if not then jump to LABEL. PTR points to
1579 * the page table where this PTE is located, PTE will be re-loaded
1580 * with it's original value.
1581 */
1582static void
1583build_pte_present(u32 **p, struct uasm_reloc **r,
1584                  int pte, int ptr, int scratch, enum label_id lid)
1585{
1586        int t = scratch >= 0 ? scratch : pte;
1587
1588        if (cpu_has_rixi) {
1589                if (use_bbit_insns()) {
1590                        uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
1591                        uasm_i_nop(p);
1592                } else {
1593                        uasm_i_andi(p, t, pte, _PAGE_PRESENT);
1594                        uasm_il_beqz(p, r, t, lid);
1595                        if (pte == t)
1596                                /* You lose the SMP race :-(*/
1597                                iPTE_LW(p, pte, ptr);
1598                }
1599        } else {
1600                uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
1601                uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
1602                uasm_il_bnez(p, r, t, lid);
1603                if (pte == t)
1604                        /* You lose the SMP race :-(*/
1605                        iPTE_LW(p, pte, ptr);
1606        }
1607}
1608
1609/* Make PTE valid, store result in PTR. */
1610static void
1611build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
1612                 unsigned int ptr)
1613{
1614        unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
1615
1616        iPTE_SW(p, r, pte, ptr, mode);
1617}
1618
1619/*
1620 * Check if PTE can be written to, if not branch to LABEL. Regardless
1621 * restore PTE with value from PTR when done.
1622 */
1623static void
1624build_pte_writable(u32 **p, struct uasm_reloc **r,
1625                   unsigned int pte, unsigned int ptr, int scratch,
1626                   enum label_id lid)
1627{
1628        int t = scratch >= 0 ? scratch : pte;
1629
1630        uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
1631        uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
1632        uasm_il_bnez(p, r, t, lid);
1633        if (pte == t)
1634                /* You lose the SMP race :-(*/
1635                iPTE_LW(p, pte, ptr);
1636        else
1637                uasm_i_nop(p);
1638}
1639
1640/* Make PTE writable, update software status bits as well, then store
1641 * at PTR.
1642 */
1643static void
1644build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
1645                 unsigned int ptr)
1646{
1647        unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
1648                             | _PAGE_DIRTY);
1649
1650        iPTE_SW(p, r, pte, ptr, mode);
1651}
1652
1653/*
1654 * Check if PTE can be modified, if not branch to LABEL. Regardless
1655 * restore PTE with value from PTR when done.
1656 */
1657static void
1658build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1659                     unsigned int pte, unsigned int ptr, int scratch,
1660                     enum label_id lid)
1661{
1662        if (use_bbit_insns()) {
1663                uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
1664                uasm_i_nop(p);
1665        } else {
1666                int t = scratch >= 0 ? scratch : pte;
1667                uasm_i_andi(p, t, pte, _PAGE_WRITE);
1668                uasm_il_beqz(p, r, t, lid);
1669                if (pte == t)
1670                        /* You lose the SMP race :-(*/
1671                        iPTE_LW(p, pte, ptr);
1672        }
1673}
1674
1675#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1676
1677
1678/*
1679 * R3000 style TLB load/store/modify handlers.
1680 */
1681
1682/*
1683 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1684 * Then it returns.
1685 */
1686static void
1687build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
1688{
1689        uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1690        uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
1691        uasm_i_tlbwi(p);
1692        uasm_i_jr(p, tmp);
1693        uasm_i_rfe(p); /* branch delay */
1694}
1695
1696/*
1697 * This places the pte into ENTRYLO0 and writes it with tlbwi
1698 * or tlbwr as appropriate.  This is because the index register
1699 * may have the probe fail bit set as a result of a trap on a
1700 * kseg2 access, i.e. without refill.  Then it returns.
1701 */
1702static void
1703build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1704                             struct uasm_reloc **r, unsigned int pte,
1705                             unsigned int tmp)
1706{
1707        uasm_i_mfc0(p, tmp, C0_INDEX);
1708        uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1709        uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
1710        uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
1711        uasm_i_tlbwi(p); /* cp0 delay */
1712        uasm_i_jr(p, tmp);
1713        uasm_i_rfe(p); /* branch delay */
1714        uasm_l_r3000_write_probe_fail(l, *p);
1715        uasm_i_tlbwr(p); /* cp0 delay */
1716        uasm_i_jr(p, tmp);
1717        uasm_i_rfe(p); /* branch delay */
1718}
1719
1720static void
1721build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1722                                   unsigned int ptr)
1723{
1724        long pgdc = (long)pgd_current;
1725
1726        uasm_i_mfc0(p, pte, C0_BADVADDR);
1727        uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
1728        uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
1729        uasm_i_srl(p, pte, pte, 22); /* load delay */
1730        uasm_i_sll(p, pte, pte, 2);
1731        uasm_i_addu(p, ptr, ptr, pte);
1732        uasm_i_mfc0(p, pte, C0_CONTEXT);
1733        uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
1734        uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
1735        uasm_i_addu(p, ptr, ptr, pte);
1736        uasm_i_lw(p, pte, 0, ptr);
1737        uasm_i_tlbp(p); /* load delay */
1738}
1739
1740static void build_r3000_tlb_load_handler(void)
1741{
1742        u32 *p = handle_tlbl;
1743        const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
1744        struct uasm_label *l = labels;
1745        struct uasm_reloc *r = relocs;
1746
1747        memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0]));
1748        memset(labels, 0, sizeof(labels));
1749        memset(relocs, 0, sizeof(relocs));
1750
1751        build_r3000_tlbchange_handler_head(&p, K0, K1);
1752        build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
1753        uasm_i_nop(&p); /* load delay */
1754        build_make_valid(&p, &r, K0, K1);
1755        build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1756
1757        uasm_l_nopage_tlbl(&l, p);
1758        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1759        uasm_i_nop(&p);
1760
1761        if (p >= handle_tlbl_end)
1762                panic("TLB load handler fastpath space exceeded");
1763
1764        uasm_resolve_relocs(relocs, labels);
1765        pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1766                 (unsigned int)(p - handle_tlbl));
1767
1768        dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size);
1769}
1770
1771static void build_r3000_tlb_store_handler(void)
1772{
1773        u32 *p = handle_tlbs;
1774        const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
1775        struct uasm_label *l = labels;
1776        struct uasm_reloc *r = relocs;
1777
1778        memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0]));
1779        memset(labels, 0, sizeof(labels));
1780        memset(relocs, 0, sizeof(relocs));
1781
1782        build_r3000_tlbchange_handler_head(&p, K0, K1);
1783        build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
1784        uasm_i_nop(&p); /* load delay */
1785        build_make_write(&p, &r, K0, K1);
1786        build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1787
1788        uasm_l_nopage_tlbs(&l, p);
1789        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1790        uasm_i_nop(&p);
1791
1792        if (p >= handle_tlbs_end)
1793                panic("TLB store handler fastpath space exceeded");
1794
1795        uasm_resolve_relocs(relocs, labels);
1796        pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1797                 (unsigned int)(p - handle_tlbs));
1798
1799        dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size);
1800}
1801
1802static void build_r3000_tlb_modify_handler(void)
1803{
1804        u32 *p = handle_tlbm;
1805        const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
1806        struct uasm_label *l = labels;
1807        struct uasm_reloc *r = relocs;
1808
1809        memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0]));
1810        memset(labels, 0, sizeof(labels));
1811        memset(relocs, 0, sizeof(relocs));
1812
1813        build_r3000_tlbchange_handler_head(&p, K0, K1);
1814        build_pte_modifiable(&p, &r, K0, K1,  -1, label_nopage_tlbm);
1815        uasm_i_nop(&p); /* load delay */
1816        build_make_write(&p, &r, K0, K1);
1817        build_r3000_pte_reload_tlbwi(&p, K0, K1);
1818
1819        uasm_l_nopage_tlbm(&l, p);
1820        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1821        uasm_i_nop(&p);
1822
1823        if (p >= handle_tlbm_end)
1824                panic("TLB modify handler fastpath space exceeded");
1825
1826        uasm_resolve_relocs(relocs, labels);
1827        pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1828                 (unsigned int)(p - handle_tlbm));
1829
1830        dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_size);
1831}
1832#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
1833
1834/*
1835 * R4000 style TLB load/store/modify handlers.
1836 */
1837static struct work_registers
1838build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1839                                   struct uasm_reloc **r)
1840{
1841        struct work_registers wr = build_get_work_registers(p);
1842
1843#ifdef CONFIG_64BIT
1844        build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
1845#else
1846        build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
1847#endif
1848
1849#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1850        /*
1851         * For huge tlb entries, pmd doesn't contain an address but
1852         * instead contains the tlb pte. Check the PAGE_HUGE bit and
1853         * see if we need to jump to huge tlb processing.
1854         */
1855        build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
1856#endif
1857
1858        UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
1859        UASM_i_LW(p, wr.r2, 0, wr.r2);
1860        UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
1861        uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
1862        UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
1863
1864#ifdef CONFIG_SMP
1865        uasm_l_smp_pgtable_change(l, *p);
1866#endif
1867        iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
1868        if (!m4kc_tlbp_war())
1869                build_tlb_probe_entry(p);
1870        return wr;
1871}
1872
1873static void
1874build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1875                                   struct uasm_reloc **r, unsigned int tmp,
1876                                   unsigned int ptr)
1877{
1878        uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
1879        uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
1880        build_update_entries(p, tmp, ptr);
1881        build_tlb_write_entry(p, l, r, tlb_indexed);
1882        uasm_l_leave(l, *p);
1883        build_restore_work_registers(p);
1884        uasm_i_eret(p); /* return from trap */
1885
1886#ifdef CONFIG_64BIT
1887        build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
1888#endif
1889}
1890
1891static void build_r4000_tlb_load_handler(void)
1892{
1893        u32 *p = handle_tlbl;
1894        const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
1895        struct uasm_label *l = labels;
1896        struct uasm_reloc *r = relocs;
1897        struct work_registers wr;
1898
1899        memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0]));
1900        memset(labels, 0, sizeof(labels));
1901        memset(relocs, 0, sizeof(relocs));
1902
1903        if (bcm1250_m3_war()) {
1904                unsigned int segbits = 44;
1905
1906                uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1907                uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1908                uasm_i_xor(&p, K0, K0, K1);
1909                uasm_i_dsrl_safe(&p, K1, K0, 62);
1910                uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1911                uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1912                uasm_i_or(&p, K0, K0, K1);
1913                uasm_il_bnez(&p, &r, K0, label_leave);
1914                /* No need for uasm_i_nop */
1915        }
1916
1917        wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
1918        build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
1919        if (m4kc_tlbp_war())
1920                build_tlb_probe_entry(&p);
1921
1922        if (cpu_has_rixi) {
1923                /*
1924                 * If the page is not _PAGE_VALID, RI or XI could not
1925                 * have triggered it.  Skip the expensive test..
1926                 */
1927                if (use_bbit_insns()) {
1928                        uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
1929                                      label_tlbl_goaround1);
1930                } else {
1931                        uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
1932                        uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
1933                }
1934                uasm_i_nop(&p);
1935
1936                uasm_i_tlbr(&p);
1937
1938                switch (current_cpu_type()) {
1939                default:
1940                        if (cpu_has_mips_r2) {
1941                                uasm_i_ehb(&p);
1942
1943                case CPU_CAVIUM_OCTEON:
1944                case CPU_CAVIUM_OCTEON_PLUS:
1945                case CPU_CAVIUM_OCTEON2:
1946                                break;
1947                        }
1948                }
1949
1950                /* Examine  entrylo 0 or 1 based on ptr. */
1951                if (use_bbit_insns()) {
1952                        uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
1953                } else {
1954                        uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
1955                        uasm_i_beqz(&p, wr.r3, 8);
1956                }
1957                /* load it in the delay slot*/
1958                UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
1959                /* load it if ptr is odd */
1960                UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
1961                /*
1962                 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
1963                 * XI must have triggered it.
1964                 */
1965                if (use_bbit_insns()) {
1966                        uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
1967                        uasm_i_nop(&p);
1968                        uasm_l_tlbl_goaround1(&l, p);
1969                } else {
1970                        uasm_i_andi(&p, wr.r3, wr.r3, 2);
1971                        uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
1972                        uasm_i_nop(&p);
1973                }
1974                uasm_l_tlbl_goaround1(&l, p);
1975        }
1976        build_make_valid(&p, &r, wr.r1, wr.r2);
1977        build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
1978
1979#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1980        /*
1981         * This is the entry point when build_r4000_tlbchange_handler_head
1982         * spots a huge page.
1983         */
1984        uasm_l_tlb_huge_update(&l, p);
1985        iPTE_LW(&p, wr.r1, wr.r2);
1986        build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
1987        build_tlb_probe_entry(&p);
1988
1989        if (cpu_has_rixi) {
1990                /*
1991                 * If the page is not _PAGE_VALID, RI or XI could not
1992                 * have triggered it.  Skip the expensive test..
1993                 */
1994                if (use_bbit_insns()) {
1995                        uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
1996                                      label_tlbl_goaround2);
1997                } else {
1998                        uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
1999                        uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
2000                }
2001                uasm_i_nop(&p);
2002
2003                uasm_i_tlbr(&p);
2004
2005                switch (current_cpu_type()) {
2006                default:
2007                        if (cpu_has_mips_r2) {
2008                                uasm_i_ehb(&p);
2009
2010                case CPU_CAVIUM_OCTEON:
2011                case CPU_CAVIUM_OCTEON_PLUS:
2012                case CPU_CAVIUM_OCTEON2:
2013                                break;
2014                        }
2015                }
2016
2017                /* Examine  entrylo 0 or 1 based on ptr. */
2018                if (use_bbit_insns()) {
2019                        uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
2020                } else {
2021                        uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
2022                        uasm_i_beqz(&p, wr.r3, 8);
2023                }
2024                /* load it in the delay slot*/
2025                UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
2026                /* load it if ptr is odd */
2027                UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
2028                /*
2029                 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
2030                 * XI must have triggered it.
2031                 */
2032                if (use_bbit_insns()) {
2033                        uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
2034                } else {
2035                        uasm_i_andi(&p, wr.r3, wr.r3, 2);
2036                        uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
2037                }
2038                if (PM_DEFAULT_MASK == 0)
2039                        uasm_i_nop(&p);
2040                /*
2041                 * We clobbered C0_PAGEMASK, restore it.  On the other branch
2042                 * it is restored in build_huge_tlb_write_entry.
2043                 */
2044                build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
2045
2046                uasm_l_tlbl_goaround2(&l, p);
2047        }
2048        uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
2049        build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
2050#endif
2051
2052        uasm_l_nopage_tlbl(&l, p);
2053        build_restore_work_registers(&p);
2054#ifdef CONFIG_CPU_MICROMIPS
2055        if ((unsigned long)tlb_do_page_fault_0 & 1) {
2056                uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
2057                uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
2058                uasm_i_jr(&p, K0);
2059        } else
2060#endif
2061        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
2062        uasm_i_nop(&p);
2063
2064        if (p >= handle_tlbl_end)
2065                panic("TLB load handler fastpath space exceeded");
2066
2067        uasm_resolve_relocs(relocs, labels);
2068        pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
2069                 (unsigned int)(p - handle_tlbl));
2070
2071        dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size);
2072}
2073
2074static void build_r4000_tlb_store_handler(void)
2075{
2076        u32 *p = handle_tlbs;
2077        const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
2078        struct uasm_label *l = labels;
2079        struct uasm_reloc *r = relocs;
2080        struct work_registers wr;
2081
2082        memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0]));
2083        memset(labels, 0, sizeof(labels));
2084        memset(relocs, 0, sizeof(relocs));
2085
2086        wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2087        build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2088        if (m4kc_tlbp_war())
2089                build_tlb_probe_entry(&p);
2090        build_make_write(&p, &r, wr.r1, wr.r2);
2091        build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2092
2093#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2094        /*
2095         * This is the entry point when
2096         * build_r4000_tlbchange_handler_head spots a huge page.
2097         */
2098        uasm_l_tlb_huge_update(&l, p);
2099        iPTE_LW(&p, wr.r1, wr.r2);
2100        build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2101        build_tlb_probe_entry(&p);
2102        uasm_i_ori(&p, wr.r1, wr.r1,
2103                   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2104        build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
2105#endif
2106
2107        uasm_l_nopage_tlbs(&l, p);
2108        build_restore_work_registers(&p);
2109#ifdef CONFIG_CPU_MICROMIPS
2110        if ((unsigned long)tlb_do_page_fault_1 & 1) {
2111                uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2112                uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2113                uasm_i_jr(&p, K0);
2114        } else
2115#endif
2116        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2117        uasm_i_nop(&p);
2118
2119        if (p >= handle_tlbs_end)
2120                panic("TLB store handler fastpath space exceeded");
2121
2122        uasm_resolve_relocs(relocs, labels);
2123        pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
2124                 (unsigned int)(p - handle_tlbs));
2125
2126        dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size);
2127}
2128
2129static void build_r4000_tlb_modify_handler(void)
2130{
2131        u32 *p = handle_tlbm;
2132        const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
2133        struct uasm_label *l = labels;
2134        struct uasm_reloc *r = relocs;
2135        struct work_registers wr;
2136
2137        memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0]));
2138        memset(labels, 0, sizeof(labels));
2139        memset(relocs, 0, sizeof(relocs));
2140
2141        wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2142        build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
2143        if (m4kc_tlbp_war())
2144                build_tlb_probe_entry(&p);
2145        /* Present and writable bits set, set accessed and dirty bits. */
2146        build_make_write(&p, &r, wr.r1, wr.r2);
2147        build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2148
2149#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2150        /*
2151         * This is the entry point when
2152         * build_r4000_tlbchange_handler_head spots a huge page.
2153         */
2154        uasm_l_tlb_huge_update(&l, p);
2155        iPTE_LW(&p, wr.r1, wr.r2);
2156        build_pte_modifiable(&p, &r, wr.r1, wr.r2,  wr.r3, label_nopage_tlbm);
2157        build_tlb_probe_entry(&p);
2158        uasm_i_ori(&p, wr.r1, wr.r1,
2159                   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2160        build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
2161#endif
2162
2163        uasm_l_nopage_tlbm(&l, p);
2164        build_restore_work_registers(&p);
2165#ifdef CONFIG_CPU_MICROMIPS
2166        if ((unsigned long)tlb_do_page_fault_1 & 1) {
2167                uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2168                uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2169                uasm_i_jr(&p, K0);
2170        } else
2171#endif
2172        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2173        uasm_i_nop(&p);
2174
2175        if (p >= handle_tlbm_end)
2176                panic("TLB modify handler fastpath space exceeded");
2177
2178        uasm_resolve_relocs(relocs, labels);
2179        pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
2180                 (unsigned int)(p - handle_tlbm));
2181
2182        dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size);
2183}
2184
2185static void flush_tlb_handlers(void)
2186{
2187        local_flush_icache_range((unsigned long)handle_tlbl,
2188                           (unsigned long)handle_tlbl_end);
2189        local_flush_icache_range((unsigned long)handle_tlbs,
2190                           (unsigned long)handle_tlbs_end);
2191        local_flush_icache_range((unsigned long)handle_tlbm,
2192                           (unsigned long)handle_tlbm_end);
2193        local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
2194                           (unsigned long)tlbmiss_handler_setup_pgd_end);
2195}
2196
2197void build_tlb_refill_handler(void)
2198{
2199        /*
2200         * The refill handler is generated per-CPU, multi-node systems
2201         * may have local storage for it. The other handlers are only
2202         * needed once.
2203         */
2204        static int run_once = 0;
2205
2206        output_pgtable_bits_defines();
2207
2208#ifdef CONFIG_64BIT
2209        check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
2210#endif
2211
2212        switch (current_cpu_type()) {
2213        case CPU_R2000:
2214        case CPU_R3000:
2215        case CPU_R3000A:
2216        case CPU_R3081E:
2217        case CPU_TX3912:
2218        case CPU_TX3922:
2219        case CPU_TX3927:
2220#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2221                if (cpu_has_local_ebase)
2222                        build_r3000_tlb_refill_handler();
2223                if (!run_once) {
2224                        if (!cpu_has_local_ebase)
2225                                build_r3000_tlb_refill_handler();
2226                        build_setup_pgd();
2227                        build_r3000_tlb_load_handler();
2228                        build_r3000_tlb_store_handler();
2229                        build_r3000_tlb_modify_handler();
2230                        flush_tlb_handlers();
2231                        run_once++;
2232                }
2233#else
2234                panic("No R3000 TLB refill handler");
2235#endif
2236                break;
2237
2238        case CPU_R6000:
2239        case CPU_R6000A:
2240                panic("No R6000 TLB refill handler yet");
2241                break;
2242
2243        case CPU_R8000:
2244                panic("No R8000 TLB refill handler yet");
2245                break;
2246
2247        default:
2248                if (!run_once) {
2249                        scratch_reg = allocate_kscratch();
2250                        build_setup_pgd();
2251                        build_r4000_tlb_load_handler();
2252                        build_r4000_tlb_store_handler();
2253                        build_r4000_tlb_modify_handler();
2254                        if (!cpu_has_local_ebase)
2255                                build_r4000_tlb_refill_handler();
2256                        flush_tlb_handlers();
2257                        run_once++;
2258                }
2259                if (cpu_has_local_ebase)
2260                        build_r4000_tlb_refill_handler();
2261        }
2262}
2263