linux/arch/powerpc/mm/tlb_nohash_low.S
<<
>>
Prefs
   1/*
   2 * This file contains low-level functions for performing various
   3 * types of TLB invalidations on various processors with no hash
   4 * table.
   5 *
   6 * This file implements the following functions for all no-hash
   7 * processors. Some aren't implemented for some variants. Some
   8 * are inline in tlbflush.h
   9 *
  10 *      - tlbil_va
  11 *      - tlbil_pid
  12 *      - tlbil_all
  13 *      - tlbivax_bcast
  14 *
  15 * Code mostly moved over from misc_32.S
  16 *
  17 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  18 *
  19 * Partially rewritten by Cort Dougan (cort@cs.nmt.edu)
  20 * Paul Mackerras, Kumar Gala and Benjamin Herrenschmidt.
  21 *
  22 * This program is free software; you can redistribute it and/or
  23 * modify it under the terms of the GNU General Public License
  24 * as published by the Free Software Foundation; either version
  25 * 2 of the License, or (at your option) any later version.
  26 *
  27 */
  28
  29#include <asm/reg.h>
  30#include <asm/page.h>
  31#include <asm/cputable.h>
  32#include <asm/mmu.h>
  33#include <asm/ppc_asm.h>
  34#include <asm/asm-offsets.h>
  35#include <asm/processor.h>
  36#include <asm/bug.h>
  37
  38#if defined(CONFIG_40x)
  39
  40/*
  41 * 40x implementation needs only tlbil_va
  42 */
  43_GLOBAL(__tlbil_va)
  44        /* We run the search with interrupts disabled because we have to change
  45         * the PID and I don't want to preempt when that happens.
  46         */
  47        mfmsr   r5
  48        mfspr   r6,SPRN_PID
  49        wrteei  0
  50        mtspr   SPRN_PID,r4
  51        tlbsx.  r3, 0, r3
  52        mtspr   SPRN_PID,r6
  53        wrtee   r5
  54        bne     1f
  55        sync
  56        /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is
  57         * clear. Since 25 is the V bit in the TLB_TAG, loading this value
  58         * will invalidate the TLB entry. */
  59        tlbwe   r3, r3, TLB_TAG
  60        isync
  611:      blr
  62
  63#elif defined(CONFIG_8xx)
  64
  65/*
  66 * Nothing to do for 8xx, everything is inline
  67 */
  68
  69#elif defined(CONFIG_44x) /* Includes 47x */
  70
  71/*
  72 * 440 implementation uses tlbsx/we for tlbil_va and a full sweep
  73 * of the TLB for everything else.
  74 */
  75_GLOBAL(__tlbil_va)
  76        mfspr   r5,SPRN_MMUCR
  77        mfmsr   r10
  78
  79        /*
  80         * We write 16 bits of STID since 47x supports that much, we
  81         * will never be passed out of bounds values on 440 (hopefully)
  82         */
  83        rlwimi  r5,r4,0,16,31
  84
  85        /* We have to run the search with interrupts disabled, otherwise
  86         * an interrupt which causes a TLB miss can clobber the MMUCR
  87         * between the mtspr and the tlbsx.
  88         *
  89         * Critical and Machine Check interrupts take care of saving
  90         * and restoring MMUCR, so only normal interrupts have to be
  91         * taken care of.
  92         */
  93        wrteei  0
  94        mtspr   SPRN_MMUCR,r5
  95        tlbsx.  r6,0,r3
  96        bne     10f
  97        sync
  98BEGIN_MMU_FTR_SECTION
  99        b       2f
 100END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
 101        /* On 440 There are only 64 TLB entries, so r3 < 64, which means bit
 102         * 22, is clear.  Since 22 is the V bit in the TLB_PAGEID, loading this
 103         * value will invalidate the TLB entry.
 104         */
 105        tlbwe   r6,r6,PPC44x_TLB_PAGEID
 106        isync
 10710:     wrtee   r10
 108        blr
 1092:
 110#ifdef CONFIG_PPC_47x
 111        oris    r7,r6,0x8000    /* specify way explicitely */
 112        clrrwi  r4,r3,12        /* get an EPN for the hashing with V = 0 */
 113        ori     r4,r4,PPC47x_TLBE_SIZE
 114        tlbwe   r4,r7,0         /* write it */
 115        isync
 116        wrtee   r10
 117        blr
 118#else /* CONFIG_PPC_47x */
 1191:      trap
 120        EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
 121#endif /* !CONFIG_PPC_47x */
 122
 123_GLOBAL(_tlbil_all)
 124_GLOBAL(_tlbil_pid)
 125BEGIN_MMU_FTR_SECTION
 126        b       2f
 127END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
 128        li      r3,0
 129        sync
 130
 131        /* Load high watermark */
 132        lis     r4,tlb_44x_hwater@ha
 133        lwz     r5,tlb_44x_hwater@l(r4)
 134
 1351:      tlbwe   r3,r3,PPC44x_TLB_PAGEID
 136        addi    r3,r3,1
 137        cmpw    0,r3,r5
 138        ble     1b
 139
 140        isync
 141        blr
 1422:
 143#ifdef CONFIG_PPC_47x
 144        /* 476 variant. There's not simple way to do this, hopefully we'll
 145         * try to limit the amount of such full invalidates
 146         */
 147        mfmsr   r11             /* Interrupts off */
 148        wrteei  0
 149        li      r3,-1           /* Current set */
 150        lis     r10,tlb_47x_boltmap@h
 151        ori     r10,r10,tlb_47x_boltmap@l
 152        lis     r7,0x8000       /* Specify way explicitely */
 153
 154        b       9f              /* For each set */
 155
 1561:      li      r9,4            /* Number of ways */
 157        li      r4,0            /* Current way */
 158        li      r6,0            /* Default entry value 0 */
 159        andi.   r0,r8,1         /* Check if way 0 is bolted */
 160        mtctr   r9              /* Load way counter */
 161        bne-    3f              /* Bolted, skip loading it */
 162
 1632:      /* For each way */
 164        or      r5,r3,r4        /* Make way|index for tlbre */
 165        rlwimi  r5,r5,16,8,15   /* Copy index into position */
 166        tlbre   r6,r5,0         /* Read entry */
 1673:      addis   r4,r4,0x2000    /* Next way */
 168        andi.   r0,r6,PPC47x_TLB0_VALID /* Valid entry ? */
 169        beq     4f              /* Nope, skip it */
 170        rlwimi  r7,r5,0,1,2     /* Insert way number */
 171        rlwinm  r6,r6,0,21,19   /* Clear V */
 172        tlbwe   r6,r7,0         /* Write it */
 1734:      bdnz    2b              /* Loop for each way */
 174        srwi    r8,r8,1         /* Next boltmap bit */
 1759:      cmpwi   cr1,r3,255      /* Last set done ? */
 176        addi    r3,r3,1         /* Next set */
 177        beq     cr1,1f          /* End of loop */
 178        andi.   r0,r3,0x1f      /* Need to load a new boltmap word ? */
 179        bne     1b              /* No, loop */
 180        lwz     r8,0(r10)       /* Load boltmap entry */
 181        addi    r10,r10,4       /* Next word */
 182        b       1b              /* Then loop */
 1831:      isync                   /* Sync shadows */
 184        wrtee   r11
 185#else /* CONFIG_PPC_47x */
 1861:      trap
 187        EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
 188#endif /* !CONFIG_PPC_47x */
 189        blr
 190
 191#ifdef CONFIG_PPC_47x
 192
 193/*
 194 * 47x variant of icbt
 195 */
 196# define ICBT(CT,RA,RB) \
 197        .long   0x7c00002c | ((CT) << 21) | ((RA) << 16) | ((RB) << 11)
 198
 199/*
 200 * _tlbivax_bcast is only on 47x. We don't bother doing a runtime
 201 * check though, it will blow up soon enough if we mistakenly try
 202 * to use it on a 440.
 203 */
 204_GLOBAL(_tlbivax_bcast)
 205        mfspr   r5,SPRN_MMUCR
 206        mfmsr   r10
 207        rlwimi  r5,r4,0,16,31
 208        wrteei  0
 209        mtspr   SPRN_MMUCR,r5
 210        isync
 211/*      tlbivax 0,r3 - use .long to avoid binutils deps */
 212        .long 0x7c000624 | (r3 << 11)
 213        isync
 214        eieio
 215        tlbsync
 216BEGIN_FTR_SECTION
 217        b       1f
 218END_FTR_SECTION_IFSET(CPU_FTR_476_DD2)
 219        sync
 220        wrtee   r10
 221        blr
 222/*
 223 * DD2 HW could hang if in instruction fetch happens before msync completes.
 224 * Touch enough instruction cache lines to ensure cache hits
 225 */
 2261:      mflr    r9
 227        bl      2f
 2282:      mflr    r6
 229        li      r7,32
 230        ICBT(0,r6,r7)           /* touch next cache line */
 231        add     r6,r6,r7
 232        ICBT(0,r6,r7)           /* touch next cache line */
 233        add     r6,r6,r7
 234        ICBT(0,r6,r7)           /* touch next cache line */
 235        sync
 236        nop
 237        nop
 238        nop
 239        nop
 240        nop
 241        nop
 242        nop
 243        nop
 244        mtlr    r9
 245        wrtee   r10
 246        blr
 247#endif /* CONFIG_PPC_47x */
 248
 249#elif defined(CONFIG_FSL_BOOKE)
 250/*
 251 * FSL BookE implementations.
 252 *
 253 * Since feature sections are using _SECTION_ELSE we need
 254 * to have the larger code path before the _SECTION_ELSE
 255 */
 256
 257/*
 258 * Flush MMU TLB on the local processor
 259 */
 260_GLOBAL(_tlbil_all)
 261BEGIN_MMU_FTR_SECTION
 262        li      r3,(MMUCSR0_TLBFI)@l
 263        mtspr   SPRN_MMUCSR0, r3
 2641:
 265        mfspr   r3,SPRN_MMUCSR0
 266        andi.   r3,r3,MMUCSR0_TLBFI@l
 267        bne     1b
 268MMU_FTR_SECTION_ELSE
 269        PPC_TLBILX_ALL(0,R0)
 270ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
 271        msync
 272        isync
 273        blr
 274
 275_GLOBAL(_tlbil_pid)
 276BEGIN_MMU_FTR_SECTION
 277        slwi    r3,r3,16
 278        mfmsr   r10
 279        wrteei  0
 280        mfspr   r4,SPRN_MAS6    /* save MAS6 */
 281        mtspr   SPRN_MAS6,r3
 282        PPC_TLBILX_PID(0,R0)
 283        mtspr   SPRN_MAS6,r4    /* restore MAS6 */
 284        wrtee   r10
 285MMU_FTR_SECTION_ELSE
 286        li      r3,(MMUCSR0_TLBFI)@l
 287        mtspr   SPRN_MMUCSR0, r3
 2881:
 289        mfspr   r3,SPRN_MMUCSR0
 290        andi.   r3,r3,MMUCSR0_TLBFI@l
 291        bne     1b
 292ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBILX)
 293        msync
 294        isync
 295        blr
 296
 297/*
 298 * Flush MMU TLB for a particular address, but only on the local processor
 299 * (no broadcast)
 300 */
 301_GLOBAL(__tlbil_va)
 302        mfmsr   r10
 303        wrteei  0
 304        slwi    r4,r4,16
 305        ori     r4,r4,(MAS6_ISIZE(BOOK3E_PAGESZ_4K))@l
 306        mtspr   SPRN_MAS6,r4            /* assume AS=0 for now */
 307BEGIN_MMU_FTR_SECTION
 308        tlbsx   0,r3
 309        mfspr   r4,SPRN_MAS1            /* check valid */
 310        andis.  r3,r4,MAS1_VALID@h
 311        beq     1f
 312        rlwinm  r4,r4,0,1,31
 313        mtspr   SPRN_MAS1,r4
 314        tlbwe
 315MMU_FTR_SECTION_ELSE
 316        PPC_TLBILX_VA(0,R3)
 317ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
 318        msync
 319        isync
 3201:      wrtee   r10
 321        blr
 322#elif defined(CONFIG_PPC_BOOK3E)
 323/*
 324 * New Book3E (>= 2.06) implementation
 325 *
 326 * Note: We may be able to get away without the interrupt masking stuff
 327 * if we save/restore MAS6 on exceptions that might modify it
 328 */
 329_GLOBAL(_tlbil_pid)
 330        slwi    r4,r3,MAS6_SPID_SHIFT
 331        mfmsr   r10
 332        wrteei  0
 333        mtspr   SPRN_MAS6,r4
 334        PPC_TLBILX_PID(0,R0)
 335        wrtee   r10
 336        msync
 337        isync
 338        blr
 339
 340_GLOBAL(_tlbil_pid_noind)
 341        slwi    r4,r3,MAS6_SPID_SHIFT
 342        mfmsr   r10
 343        ori     r4,r4,MAS6_SIND
 344        wrteei  0
 345        mtspr   SPRN_MAS6,r4
 346        PPC_TLBILX_PID(0,R0)
 347        wrtee   r10
 348        msync
 349        isync
 350        blr
 351
 352_GLOBAL(_tlbil_all)
 353        PPC_TLBILX_ALL(0,R0)
 354        msync
 355        isync
 356        blr
 357
 358_GLOBAL(_tlbil_va)
 359        mfmsr   r10
 360        wrteei  0
 361        cmpwi   cr0,r6,0
 362        slwi    r4,r4,MAS6_SPID_SHIFT
 363        rlwimi  r4,r5,MAS6_ISIZE_SHIFT,MAS6_ISIZE_MASK
 364        beq     1f
 365        rlwimi  r4,r6,MAS6_SIND_SHIFT,MAS6_SIND
 3661:      mtspr   SPRN_MAS6,r4            /* assume AS=0 for now */
 367        PPC_TLBILX_VA(0,R3)
 368        msync
 369        isync
 370        wrtee   r10
 371        blr
 372
 373_GLOBAL(_tlbivax_bcast)
 374        mfmsr   r10
 375        wrteei  0
 376        cmpwi   cr0,r6,0
 377        slwi    r4,r4,MAS6_SPID_SHIFT
 378        rlwimi  r4,r5,MAS6_ISIZE_SHIFT,MAS6_ISIZE_MASK
 379        beq     1f
 380        rlwimi  r4,r6,MAS6_SIND_SHIFT,MAS6_SIND
 3811:      mtspr   SPRN_MAS6,r4            /* assume AS=0 for now */
 382        PPC_TLBIVAX(0,R3)
 383        eieio
 384        tlbsync
 385        sync
 386        wrtee   r10
 387        blr
 388
 389_GLOBAL(set_context)
 390#ifdef CONFIG_BDI_SWITCH
 391        /* Context switch the PTE pointer for the Abatron BDI2000.
 392         * The PGDIR is the second parameter.
 393         */
 394        lis     r5, abatron_pteptrs@h
 395        ori     r5, r5, abatron_pteptrs@l
 396        stw     r4, 0x4(r5)
 397#endif
 398        mtspr   SPRN_PID,r3
 399        isync                   /* Force context change */
 400        blr
 401#else
 402#error Unsupported processor type !
 403#endif
 404
 405#if defined(CONFIG_PPC_FSL_BOOK3E)
 406/*
 407 * extern void loadcam_entry(unsigned int index)
 408 *
 409 * Load TLBCAM[index] entry in to the L2 CAM MMU
 410 */
 411_GLOBAL(loadcam_entry)
 412        LOAD_REG_ADDR(r4, TLBCAM)
 413        mulli   r5,r3,TLBCAM_SIZE
 414        add     r3,r5,r4
 415        lwz     r4,TLBCAM_MAS0(r3)
 416        mtspr   SPRN_MAS0,r4
 417        lwz     r4,TLBCAM_MAS1(r3)
 418        mtspr   SPRN_MAS1,r4
 419        PPC_LL  r4,TLBCAM_MAS2(r3)
 420        mtspr   SPRN_MAS2,r4
 421        lwz     r4,TLBCAM_MAS3(r3)
 422        mtspr   SPRN_MAS3,r4
 423BEGIN_MMU_FTR_SECTION
 424        lwz     r4,TLBCAM_MAS7(r3)
 425        mtspr   SPRN_MAS7,r4
 426END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
 427        isync
 428        tlbwe
 429        isync
 430        blr
 431#endif
 432