linux/arch/powerpc/mm/hash_low_32.S
<<
>>
Prefs
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
   5 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
   6 *  Adapted for Power Macintosh by Paul Mackerras.
   7 *  Low-level exception handlers and MMU support
   8 *  rewritten by Paul Mackerras.
   9 *    Copyright (C) 1996 Paul Mackerras.
  10 *
  11 *  This file contains low-level assembler routines for managing
  12 *  the PowerPC MMU hash table.  (PPC 8xx processors don't use a
  13 *  hash table, so this file is not used on them.)
  14 *
  15 *  This program is free software; you can redistribute it and/or
  16 *  modify it under the terms of the GNU General Public License
  17 *  as published by the Free Software Foundation; either version
  18 *  2 of the License, or (at your option) any later version.
  19 *
  20 */
  21
  22#include <asm/reg.h>
  23#include <asm/page.h>
  24#include <asm/pgtable.h>
  25#include <asm/cputable.h>
  26#include <asm/ppc_asm.h>
  27#include <asm/thread_info.h>
  28#include <asm/asm-offsets.h>
  29#include <asm/export.h>
  30
  31#ifdef CONFIG_SMP
  32        .section .bss
  33        .align  2
  34        .globl mmu_hash_lock
  35mmu_hash_lock:
  36        .space  4
  37EXPORT_SYMBOL(mmu_hash_lock)
  38#endif /* CONFIG_SMP */
  39
  40/*
  41 * Load a PTE into the hash table, if possible.
  42 * The address is in r4, and r3 contains an access flag:
  43 * _PAGE_RW (0x400) if a write.
  44 * r9 contains the SRR1 value, from which we use the MSR_PR bit.
  45 * SPRG_THREAD contains the physical address of the current task's thread.
  46 *
  47 * Returns to the caller if the access is illegal or there is no
  48 * mapping for the address.  Otherwise it places an appropriate PTE
  49 * in the hash table and returns from the exception.
  50 * Uses r0, r3 - r8, r10, ctr, lr.
  51 */
  52        .text
  53_GLOBAL(hash_page)
  54        tophys(r7,0)                    /* gets -KERNELBASE into r7 */
  55#ifdef CONFIG_SMP
  56        addis   r8,r7,mmu_hash_lock@h
  57        ori     r8,r8,mmu_hash_lock@l
  58        lis     r0,0x0fff
  59        b       10f
  6011:     lwz     r6,0(r8)
  61        cmpwi   0,r6,0
  62        bne     11b
  6310:     lwarx   r6,0,r8
  64        cmpwi   0,r6,0
  65        bne-    11b
  66        stwcx.  r0,0,r8
  67        bne-    10b
  68        isync
  69#endif
  70        /* Get PTE (linux-style) and check access */
  71        lis     r0,KERNELBASE@h         /* check if kernel address */
  72        cmplw   0,r4,r0
  73        mfspr   r8,SPRN_SPRG_THREAD     /* current task's THREAD (phys) */
  74        ori     r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
  75        lwz     r5,PGDIR(r8)            /* virt page-table root */
  76        blt+    112f                    /* assume user more likely */
  77        lis     r5,swapper_pg_dir@ha    /* if kernel address, use */
  78        addi    r5,r5,swapper_pg_dir@l  /* kernel page table */
  79        rlwimi  r3,r9,32-12,29,29       /* MSR_PR -> _PAGE_USER */
  80112:    add     r5,r5,r7                /* convert to phys addr */
  81#ifndef CONFIG_PTE_64BIT
  82        rlwimi  r5,r4,12,20,29          /* insert top 10 bits of address */
  83        lwz     r8,0(r5)                /* get pmd entry */
  84        rlwinm. r8,r8,0,0,19            /* extract address of pte page */
  85#else
  86        rlwinm  r8,r4,13,19,29          /* Compute pgdir/pmd offset */
  87        lwzx    r8,r8,r5                /* Get L1 entry */
  88        rlwinm. r8,r8,0,0,20            /* extract pt base address */
  89#endif
  90#ifdef CONFIG_SMP
  91        beq-    hash_page_out           /* return if no mapping */
  92#else
  93        /* XXX it seems like the 601 will give a machine fault on the
  94           rfi if its alignment is wrong (bottom 4 bits of address are
  95           8 or 0xc) and we have had a not-taken conditional branch
  96           to the address following the rfi. */
  97        beqlr-
  98#endif
  99#ifndef CONFIG_PTE_64BIT
 100        rlwimi  r8,r4,22,20,29          /* insert next 10 bits of address */
 101#else
 102        rlwimi  r8,r4,23,20,28          /* compute pte address */
 103#endif
 104        rlwinm  r0,r3,32-3,24,24        /* _PAGE_RW access -> _PAGE_DIRTY */
 105        ori     r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
 106
 107        /*
 108         * Update the linux PTE atomically.  We do the lwarx up-front
 109         * because almost always, there won't be a permission violation
 110         * and there won't already be an HPTE, and thus we will have
 111         * to update the PTE to set _PAGE_HASHPTE.  -- paulus.
 112         *
 113         * If PTE_64BIT is set, the low word is the flags word; use that
 114         * word for locking since it contains all the interesting bits.
 115         */
 116#if (PTE_FLAGS_OFFSET != 0)
 117        addi    r8,r8,PTE_FLAGS_OFFSET
 118#endif
 119retry:
 120        lwarx   r6,0,r8                 /* get linux-style pte, flag word */
 121        andc.   r5,r3,r6                /* check access & ~permission */
 122#ifdef CONFIG_SMP
 123        bne-    hash_page_out           /* return if access not permitted */
 124#else
 125        bnelr-
 126#endif
 127        or      r5,r0,r6                /* set accessed/dirty bits */
 128#ifdef CONFIG_PTE_64BIT
 129#ifdef CONFIG_SMP
 130        subf    r10,r6,r8               /* create false data dependency */
 131        subi    r10,r10,PTE_FLAGS_OFFSET
 132        lwzx    r10,r6,r10              /* Get upper PTE word */
 133#else
 134        lwz     r10,-PTE_FLAGS_OFFSET(r8)
 135#endif /* CONFIG_SMP */
 136#endif /* CONFIG_PTE_64BIT */
 137        stwcx.  r5,0,r8                 /* attempt to update PTE */
 138        bne-    retry                   /* retry if someone got there first */
 139
 140        mfsrin  r3,r4                   /* get segment reg for segment */
 141        mfctr   r0
 142        stw     r0,_CTR(r11)
 143        bl      create_hpte             /* add the hash table entry */
 144
 145#ifdef CONFIG_SMP
 146        eieio
 147        addis   r8,r7,mmu_hash_lock@ha
 148        li      r0,0
 149        stw     r0,mmu_hash_lock@l(r8)
 150#endif
 151
 152        /* Return from the exception */
 153        lwz     r5,_CTR(r11)
 154        mtctr   r5
 155        lwz     r0,GPR0(r11)
 156        lwz     r7,GPR7(r11)
 157        lwz     r8,GPR8(r11)
 158        b       fast_exception_return
 159
 160#ifdef CONFIG_SMP
 161hash_page_out:
 162        eieio
 163        addis   r8,r7,mmu_hash_lock@ha
 164        li      r0,0
 165        stw     r0,mmu_hash_lock@l(r8)
 166        blr
 167#endif /* CONFIG_SMP */
 168
 169/*
 170 * Add an entry for a particular page to the hash table.
 171 *
 172 * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
 173 *
 174 * We assume any necessary modifications to the pte (e.g. setting
 175 * the accessed bit) have already been done and that there is actually
 176 * a hash table in use (i.e. we're not on a 603).
 177 */
 178_GLOBAL(add_hash_page)
 179        mflr    r0
 180        stw     r0,4(r1)
 181
 182        /* Convert context and va to VSID */
 183        mulli   r3,r3,897*16            /* multiply context by context skew */
 184        rlwinm  r0,r4,4,28,31           /* get ESID (top 4 bits of va) */
 185        mulli   r0,r0,0x111             /* multiply by ESID skew */
 186        add     r3,r3,r0                /* note create_hpte trims to 24 bits */
 187
 188#ifdef CONFIG_SMP
 189        CURRENT_THREAD_INFO(r8, r1)     /* use cpu number to make tag */
 190        lwz     r8,TI_CPU(r8)           /* to go in mmu_hash_lock */
 191        oris    r8,r8,12
 192#endif /* CONFIG_SMP */
 193
 194        /*
 195         * We disable interrupts here, even on UP, because we don't
 196         * want to race with hash_page, and because we want the
 197         * _PAGE_HASHPTE bit to be a reliable indication of whether
 198         * the HPTE exists (or at least whether one did once).
 199         * We also turn off the MMU for data accesses so that we
 200         * we can't take a hash table miss (assuming the code is
 201         * covered by a BAT).  -- paulus
 202         */
 203        mfmsr   r9
 204        SYNC
 205        rlwinm  r0,r9,0,17,15           /* clear bit 16 (MSR_EE) */
 206        rlwinm  r0,r0,0,28,26           /* clear MSR_DR */
 207        mtmsr   r0
 208        SYNC_601
 209        isync
 210
 211        tophys(r7,0)
 212
 213#ifdef CONFIG_SMP
 214        addis   r6,r7,mmu_hash_lock@ha
 215        addi    r6,r6,mmu_hash_lock@l
 21610:     lwarx   r0,0,r6                 /* take the mmu_hash_lock */
 217        cmpi    0,r0,0
 218        bne-    11f
 219        stwcx.  r8,0,r6
 220        beq+    12f
 22111:     lwz     r0,0(r6)
 222        cmpi    0,r0,0
 223        beq     10b
 224        b       11b
 22512:     isync
 226#endif
 227
 228        /*
 229         * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
 230         * If _PAGE_HASHPTE was already set, we don't replace the existing
 231         * HPTE, so we just unlock and return.
 232         */
 233        mr      r8,r5
 234#ifndef CONFIG_PTE_64BIT
 235        rlwimi  r8,r4,22,20,29
 236#else
 237        rlwimi  r8,r4,23,20,28
 238        addi    r8,r8,PTE_FLAGS_OFFSET
 239#endif
 2401:      lwarx   r6,0,r8
 241        andi.   r0,r6,_PAGE_HASHPTE
 242        bne     9f                      /* if HASHPTE already set, done */
 243#ifdef CONFIG_PTE_64BIT
 244#ifdef CONFIG_SMP
 245        subf    r10,r6,r8               /* create false data dependency */
 246        subi    r10,r10,PTE_FLAGS_OFFSET
 247        lwzx    r10,r6,r10              /* Get upper PTE word */
 248#else
 249        lwz     r10,-PTE_FLAGS_OFFSET(r8)
 250#endif /* CONFIG_SMP */
 251#endif /* CONFIG_PTE_64BIT */
 252        ori     r5,r6,_PAGE_HASHPTE
 253        stwcx.  r5,0,r8
 254        bne-    1b
 255
 256        bl      create_hpte
 257
 2589:
 259#ifdef CONFIG_SMP
 260        addis   r6,r7,mmu_hash_lock@ha
 261        addi    r6,r6,mmu_hash_lock@l
 262        eieio
 263        li      r0,0
 264        stw     r0,0(r6)                /* clear mmu_hash_lock */
 265#endif
 266
 267        /* reenable interrupts and DR */
 268        mtmsr   r9
 269        SYNC_601
 270        isync
 271
 272        lwz     r0,4(r1)
 273        mtlr    r0
 274        blr
 275
 276/*
 277 * This routine adds a hardware PTE to the hash table.
 278 * It is designed to be called with the MMU either on or off.
 279 * r3 contains the VSID, r4 contains the virtual address,
 280 * r5 contains the linux PTE, r6 contains the old value of the
 281 * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
 282 * offset to be added to addresses (0 if the MMU is on,
 283 * -KERNELBASE if it is off).  r10 contains the upper half of
 284 * the PTE if CONFIG_PTE_64BIT.
 285 * On SMP, the caller should have the mmu_hash_lock held.
 286 * We assume that the caller has (or will) set the _PAGE_HASHPTE
 287 * bit in the linux PTE in memory.  The value passed in r6 should
 288 * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
 289 * this routine will skip the search for an existing HPTE.
 290 * This procedure modifies r0, r3 - r6, r8, cr0.
 291 *  -- paulus.
 292 *
 293 * For speed, 4 of the instructions get patched once the size and
 294 * physical address of the hash table are known.  These definitions
 295 * of Hash_base and Hash_bits below are just an example.
 296 */
 297Hash_base = 0xc0180000
 298Hash_bits = 12                          /* e.g. 256kB hash table */
 299Hash_msk = (((1 << Hash_bits) - 1) * 64)
 300
 301/* defines for the PTE format for 32-bit PPCs */
 302#define HPTE_SIZE       8
 303#define PTEG_SIZE       64
 304#define LG_PTEG_SIZE    6
 305#define LDPTEu          lwzu
 306#define LDPTE           lwz
 307#define STPTE           stw
 308#define CMPPTE          cmpw
 309#define PTE_H           0x40
 310#define PTE_V           0x80000000
 311#define TST_V(r)        rlwinm. r,r,0,0,0
 312#define SET_V(r)        oris r,r,PTE_V@h
 313#define CLR_V(r,t)      rlwinm r,r,0,1,31
 314
 315#define HASH_LEFT       31-(LG_PTEG_SIZE+Hash_bits-1)
 316#define HASH_RIGHT      31-LG_PTEG_SIZE
 317
 318_GLOBAL(create_hpte)
 319        /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
 320        rlwinm  r8,r5,32-10,31,31       /* _PAGE_RW -> PP lsb */
 321        rlwinm  r0,r5,32-7,31,31        /* _PAGE_DIRTY -> PP lsb */
 322        and     r8,r8,r0                /* writable if _RW & _DIRTY */
 323        rlwimi  r5,r5,32-1,30,30        /* _PAGE_USER -> PP msb */
 324        rlwimi  r5,r5,32-2,31,31        /* _PAGE_USER -> PP lsb */
 325        ori     r8,r8,0xe04             /* clear out reserved bits */
 326        andc    r8,r5,r8                /* PP = user? (rw&dirty? 2: 3): 0 */
 327BEGIN_FTR_SECTION
 328        rlwinm  r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */
 329END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
 330#ifdef CONFIG_PTE_64BIT
 331        /* Put the XPN bits into the PTE */
 332        rlwimi  r8,r10,8,20,22
 333        rlwimi  r8,r10,2,29,29
 334#endif
 335
 336        /* Construct the high word of the PPC-style PTE (r5) */
 337        rlwinm  r5,r3,7,1,24            /* put VSID in 0x7fffff80 bits */
 338        rlwimi  r5,r4,10,26,31          /* put in API (abbrev page index) */
 339        SET_V(r5)                       /* set V (valid) bit */
 340
 341        /* Get the address of the primary PTE group in the hash table (r3) */
 342_GLOBAL(hash_page_patch_A)
 343        addis   r0,r7,Hash_base@h       /* base address of hash table */
 344        rlwimi  r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
 345        rlwinm  r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
 346        xor     r3,r3,r0                /* make primary hash */
 347        li      r0,8                    /* PTEs/group */
 348
 349        /*
 350         * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
 351         * if it is clear, meaning that the HPTE isn't there already...
 352         */
 353        andi.   r6,r6,_PAGE_HASHPTE
 354        beq+    10f                     /* no PTE: go look for an empty slot */
 355        tlbie   r4
 356
 357        addis   r4,r7,htab_hash_searches@ha
 358        lwz     r6,htab_hash_searches@l(r4)
 359        addi    r6,r6,1                 /* count how many searches we do */
 360        stw     r6,htab_hash_searches@l(r4)
 361
 362        /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
 363        mtctr   r0
 364        addi    r4,r3,-HPTE_SIZE
 3651:      LDPTEu  r6,HPTE_SIZE(r4)        /* get next PTE */
 366        CMPPTE  0,r6,r5
 367        bdnzf   2,1b                    /* loop while ctr != 0 && !cr0.eq */
 368        beq+    found_slot
 369
 370        /* Search the secondary PTEG for a matching PTE */
 371        ori     r5,r5,PTE_H             /* set H (secondary hash) bit */
 372_GLOBAL(hash_page_patch_B)
 373        xoris   r4,r3,Hash_msk>>16      /* compute secondary hash */
 374        xori    r4,r4,(-PTEG_SIZE & 0xffff)
 375        addi    r4,r4,-HPTE_SIZE
 376        mtctr   r0
 3772:      LDPTEu  r6,HPTE_SIZE(r4)
 378        CMPPTE  0,r6,r5
 379        bdnzf   2,2b
 380        beq+    found_slot
 381        xori    r5,r5,PTE_H             /* clear H bit again */
 382
 383        /* Search the primary PTEG for an empty slot */
 38410:     mtctr   r0
 385        addi    r4,r3,-HPTE_SIZE        /* search primary PTEG */
 3861:      LDPTEu  r6,HPTE_SIZE(r4)        /* get next PTE */
 387        TST_V(r6)                       /* test valid bit */
 388        bdnzf   2,1b                    /* loop while ctr != 0 && !cr0.eq */
 389        beq+    found_empty
 390
 391        /* update counter of times that the primary PTEG is full */
 392        addis   r4,r7,primary_pteg_full@ha
 393        lwz     r6,primary_pteg_full@l(r4)
 394        addi    r6,r6,1
 395        stw     r6,primary_pteg_full@l(r4)
 396
 397        /* Search the secondary PTEG for an empty slot */
 398        ori     r5,r5,PTE_H             /* set H (secondary hash) bit */
 399_GLOBAL(hash_page_patch_C)
 400        xoris   r4,r3,Hash_msk>>16      /* compute secondary hash */
 401        xori    r4,r4,(-PTEG_SIZE & 0xffff)
 402        addi    r4,r4,-HPTE_SIZE
 403        mtctr   r0
 4042:      LDPTEu  r6,HPTE_SIZE(r4)
 405        TST_V(r6)
 406        bdnzf   2,2b
 407        beq+    found_empty
 408        xori    r5,r5,PTE_H             /* clear H bit again */
 409
 410        /*
 411         * Choose an arbitrary slot in the primary PTEG to overwrite.
 412         * Since both the primary and secondary PTEGs are full, and we
 413         * have no information that the PTEs in the primary PTEG are
 414         * more important or useful than those in the secondary PTEG,
 415         * and we know there is a definite (although small) speed
 416         * advantage to putting the PTE in the primary PTEG, we always
 417         * put the PTE in the primary PTEG.
 418         *
 419         * In addition, we skip any slot that is mapping kernel text in
 420         * order to avoid a deadlock when not using BAT mappings if
 421         * trying to hash in the kernel hash code itself after it has
 422         * already taken the hash table lock. This works in conjunction
 423         * with pre-faulting of the kernel text.
 424         *
 425         * If the hash table bucket is full of kernel text entries, we'll
 426         * lockup here but that shouldn't happen
 427         */
 428
 4291:      addis   r4,r7,next_slot@ha              /* get next evict slot */
 430        lwz     r6,next_slot@l(r4)
 431        addi    r6,r6,HPTE_SIZE                 /* search for candidate */
 432        andi.   r6,r6,7*HPTE_SIZE
 433        stw     r6,next_slot@l(r4)
 434        add     r4,r3,r6
 435        LDPTE   r0,HPTE_SIZE/2(r4)              /* get PTE second word */
 436        clrrwi  r0,r0,12
 437        lis     r6,etext@h
 438        ori     r6,r6,etext@l                   /* get etext */
 439        tophys(r6,r6)
 440        cmpl    cr0,r0,r6                       /* compare and try again */
 441        blt     1b
 442
 443#ifndef CONFIG_SMP
 444        /* Store PTE in PTEG */
 445found_empty:
 446        STPTE   r5,0(r4)
 447found_slot:
 448        STPTE   r8,HPTE_SIZE/2(r4)
 449
 450#else /* CONFIG_SMP */
 451/*
 452 * Between the tlbie above and updating the hash table entry below,
 453 * another CPU could read the hash table entry and put it in its TLB.
 454 * There are 3 cases:
 455 * 1. using an empty slot
 456 * 2. updating an earlier entry to change permissions (i.e. enable write)
 457 * 3. taking over the PTE for an unrelated address
 458 *
 459 * In each case it doesn't really matter if the other CPUs have the old
 460 * PTE in their TLB.  So we don't need to bother with another tlbie here,
 461 * which is convenient as we've overwritten the register that had the
 462 * address. :-)  The tlbie above is mainly to make sure that this CPU comes
 463 * and gets the new PTE from the hash table.
 464 *
 465 * We do however have to make sure that the PTE is never in an invalid
 466 * state with the V bit set.
 467 */
 468found_empty:
 469found_slot:
 470        CLR_V(r5,r0)            /* clear V (valid) bit in PTE */
 471        STPTE   r5,0(r4)
 472        sync
 473        TLBSYNC
 474        STPTE   r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
 475        sync
 476        SET_V(r5)
 477        STPTE   r5,0(r4)        /* finally set V bit in PTE */
 478#endif /* CONFIG_SMP */
 479
 480        sync            /* make sure pte updates get to memory */
 481        blr
 482
 483        .section .bss
 484        .align  2
 485next_slot:
 486        .space  4
 487primary_pteg_full:
 488        .space  4
 489htab_hash_searches:
 490        .space  4
 491        .previous
 492
 493/*
 494 * Flush the entry for a particular page from the hash table.
 495 *
 496 * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
 497 *                  int count)
 498 *
 499 * We assume that there is a hash table in use (Hash != 0).
 500 */
 501_GLOBAL(flush_hash_pages)
 502        tophys(r7,0)
 503
 504        /*
 505         * We disable interrupts here, even on UP, because we want
 506         * the _PAGE_HASHPTE bit to be a reliable indication of
 507         * whether the HPTE exists (or at least whether one did once).
 508         * We also turn off the MMU for data accesses so that we
 509         * we can't take a hash table miss (assuming the code is
 510         * covered by a BAT).  -- paulus
 511         */
 512        mfmsr   r10
 513        SYNC
 514        rlwinm  r0,r10,0,17,15          /* clear bit 16 (MSR_EE) */
 515        rlwinm  r0,r0,0,28,26           /* clear MSR_DR */
 516        mtmsr   r0
 517        SYNC_601
 518        isync
 519
 520        /* First find a PTE in the range that has _PAGE_HASHPTE set */
 521#ifndef CONFIG_PTE_64BIT
 522        rlwimi  r5,r4,22,20,29
 523#else
 524        rlwimi  r5,r4,23,20,28
 525#endif
 5261:      lwz     r0,PTE_FLAGS_OFFSET(r5)
 527        cmpwi   cr1,r6,1
 528        andi.   r0,r0,_PAGE_HASHPTE
 529        bne     2f
 530        ble     cr1,19f
 531        addi    r4,r4,0x1000
 532        addi    r5,r5,PTE_SIZE
 533        addi    r6,r6,-1
 534        b       1b
 535
 536        /* Convert context and va to VSID */
 5372:      mulli   r3,r3,897*16            /* multiply context by context skew */
 538        rlwinm  r0,r4,4,28,31           /* get ESID (top 4 bits of va) */
 539        mulli   r0,r0,0x111             /* multiply by ESID skew */
 540        add     r3,r3,r0                /* note code below trims to 24 bits */
 541
 542        /* Construct the high word of the PPC-style PTE (r11) */
 543        rlwinm  r11,r3,7,1,24           /* put VSID in 0x7fffff80 bits */
 544        rlwimi  r11,r4,10,26,31         /* put in API (abbrev page index) */
 545        SET_V(r11)                      /* set V (valid) bit */
 546
 547#ifdef CONFIG_SMP
 548        addis   r9,r7,mmu_hash_lock@ha
 549        addi    r9,r9,mmu_hash_lock@l
 550        CURRENT_THREAD_INFO(r8, r1)
 551        add     r8,r8,r7
 552        lwz     r8,TI_CPU(r8)
 553        oris    r8,r8,9
 55410:     lwarx   r0,0,r9
 555        cmpi    0,r0,0
 556        bne-    11f
 557        stwcx.  r8,0,r9
 558        beq+    12f
 55911:     lwz     r0,0(r9)
 560        cmpi    0,r0,0
 561        beq     10b
 562        b       11b
 56312:     isync
 564#endif
 565
 566        /*
 567         * Check the _PAGE_HASHPTE bit in the linux PTE.  If it is
 568         * already clear, we're done (for this pte).  If not,
 569         * clear it (atomically) and proceed.  -- paulus.
 570         */
 571#if (PTE_FLAGS_OFFSET != 0)
 572        addi    r5,r5,PTE_FLAGS_OFFSET
 573#endif
 57433:     lwarx   r8,0,r5                 /* fetch the pte flags word */
 575        andi.   r0,r8,_PAGE_HASHPTE
 576        beq     8f                      /* done if HASHPTE is already clear */
 577        rlwinm  r8,r8,0,31,29           /* clear HASHPTE bit */
 578        stwcx.  r8,0,r5                 /* update the pte */
 579        bne-    33b
 580EXPORT_SYMBOL(flush_hash_pages)
 581
 582        /* Get the address of the primary PTE group in the hash table (r3) */
 583_GLOBAL(flush_hash_patch_A)
 584        addis   r8,r7,Hash_base@h       /* base address of hash table */
 585        rlwimi  r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
 586        rlwinm  r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
 587        xor     r8,r0,r8                /* make primary hash */
 588
 589        /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
 590        li      r0,8                    /* PTEs/group */
 591        mtctr   r0
 592        addi    r12,r8,-HPTE_SIZE
 5931:      LDPTEu  r0,HPTE_SIZE(r12)       /* get next PTE */
 594        CMPPTE  0,r0,r11
 595        bdnzf   2,1b                    /* loop while ctr != 0 && !cr0.eq */
 596        beq+    3f
 597
 598        /* Search the secondary PTEG for a matching PTE */
 599        ori     r11,r11,PTE_H           /* set H (secondary hash) bit */
 600        li      r0,8                    /* PTEs/group */
 601_GLOBAL(flush_hash_patch_B)
 602        xoris   r12,r8,Hash_msk>>16     /* compute secondary hash */
 603        xori    r12,r12,(-PTEG_SIZE & 0xffff)
 604        addi    r12,r12,-HPTE_SIZE
 605        mtctr   r0
 6062:      LDPTEu  r0,HPTE_SIZE(r12)
 607        CMPPTE  0,r0,r11
 608        bdnzf   2,2b
 609        xori    r11,r11,PTE_H           /* clear H again */
 610        bne-    4f                      /* should rarely fail to find it */
 611
 6123:      li      r0,0
 613        STPTE   r0,0(r12)               /* invalidate entry */
 6144:      sync
 615        tlbie   r4                      /* in hw tlb too */
 616        sync
 617
 6188:      ble     cr1,9f                  /* if all ptes checked */
 61981:     addi    r6,r6,-1
 620        addi    r5,r5,PTE_SIZE
 621        addi    r4,r4,0x1000
 622        lwz     r0,0(r5)                /* check next pte */
 623        cmpwi   cr1,r6,1
 624        andi.   r0,r0,_PAGE_HASHPTE
 625        bne     33b
 626        bgt     cr1,81b
 627
 6289:
 629#ifdef CONFIG_SMP
 630        TLBSYNC
 631        li      r0,0
 632        stw     r0,0(r9)                /* clear mmu_hash_lock */
 633#endif
 634
 63519:     mtmsr   r10
 636        SYNC_601
 637        isync
 638        blr
 639
 640/*
 641 * Flush an entry from the TLB
 642 */
 643_GLOBAL(_tlbie)
 644#ifdef CONFIG_SMP
 645        CURRENT_THREAD_INFO(r8, r1)
 646        lwz     r8,TI_CPU(r8)
 647        oris    r8,r8,11
 648        mfmsr   r10
 649        SYNC
 650        rlwinm  r0,r10,0,17,15          /* clear bit 16 (MSR_EE) */
 651        rlwinm  r0,r0,0,28,26           /* clear DR */
 652        mtmsr   r0
 653        SYNC_601
 654        isync
 655        lis     r9,mmu_hash_lock@h
 656        ori     r9,r9,mmu_hash_lock@l
 657        tophys(r9,r9)
 65810:     lwarx   r7,0,r9
 659        cmpwi   0,r7,0
 660        bne-    10b
 661        stwcx.  r8,0,r9
 662        bne-    10b
 663        eieio
 664        tlbie   r3
 665        sync
 666        TLBSYNC
 667        li      r0,0
 668        stw     r0,0(r9)                /* clear mmu_hash_lock */
 669        mtmsr   r10
 670        SYNC_601
 671        isync
 672#else /* CONFIG_SMP */
 673        tlbie   r3
 674        sync
 675#endif /* CONFIG_SMP */
 676        blr
 677
 678/*
 679 * Flush the entire TLB. 603/603e only
 680 */
 681_GLOBAL(_tlbia)
 682#if defined(CONFIG_SMP)
 683        CURRENT_THREAD_INFO(r8, r1)
 684        lwz     r8,TI_CPU(r8)
 685        oris    r8,r8,10
 686        mfmsr   r10
 687        SYNC
 688        rlwinm  r0,r10,0,17,15          /* clear bit 16 (MSR_EE) */
 689        rlwinm  r0,r0,0,28,26           /* clear DR */
 690        mtmsr   r0
 691        SYNC_601
 692        isync
 693        lis     r9,mmu_hash_lock@h
 694        ori     r9,r9,mmu_hash_lock@l
 695        tophys(r9,r9)
 69610:     lwarx   r7,0,r9
 697        cmpwi   0,r7,0
 698        bne-    10b
 699        stwcx.  r8,0,r9
 700        bne-    10b
 701        sync
 702        tlbia
 703        sync
 704        TLBSYNC
 705        li      r0,0
 706        stw     r0,0(r9)                /* clear mmu_hash_lock */
 707        mtmsr   r10
 708        SYNC_601
 709        isync
 710#else /* CONFIG_SMP */
 711        sync
 712        tlbia
 713        sync
 714#endif /* CONFIG_SMP */
 715        blr
 716