linux/arch/powerpc/mm/book3s64/hash_native.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * native hashtable management.
   4 *
   5 * SMP scalability work:
   6 *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
   7 */
   8
   9#undef DEBUG_LOW
  10
  11#include <linux/spinlock.h>
  12#include <linux/bitops.h>
  13#include <linux/of.h>
  14#include <linux/processor.h>
  15#include <linux/threads.h>
  16#include <linux/smp.h>
  17#include <linux/pgtable.h>
  18
  19#include <asm/machdep.h>
  20#include <asm/mmu.h>
  21#include <asm/mmu_context.h>
  22#include <asm/trace.h>
  23#include <asm/tlb.h>
  24#include <asm/cputable.h>
  25#include <asm/udbg.h>
  26#include <asm/kexec.h>
  27#include <asm/ppc-opcode.h>
  28#include <asm/feature-fixups.h>
  29
  30#include <misc/cxl-base.h>
  31
  32#ifdef DEBUG_LOW
  33#define DBG_LOW(fmt...) udbg_printf(fmt)
  34#else
  35#define DBG_LOW(fmt...)
  36#endif
  37
  38#ifdef __BIG_ENDIAN__
  39#define HPTE_LOCK_BIT 3
  40#else
  41#define HPTE_LOCK_BIT (56+3)
  42#endif
  43
  44static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
  45
  46static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
  47{
  48        unsigned long rb;
  49
  50        rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
  51
  52        asm volatile("tlbiel %0" : : "r" (rb));
  53}
  54
  55/*
  56 * tlbiel instruction for hash, set invalidation
  57 * i.e., r=1 and is=01 or is=10 or is=11
  58 */
  59static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
  60                                        unsigned int pid,
  61                                        unsigned int ric, unsigned int prs)
  62{
  63        unsigned long rb;
  64        unsigned long rs;
  65        unsigned int r = 0; /* hash format */
  66
  67        rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
  68        rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
  69
  70        asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
  71                     : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "i"(r)
  72                     : "memory");
  73}
  74
  75
  76static void tlbiel_all_isa206(unsigned int num_sets, unsigned int is)
  77{
  78        unsigned int set;
  79
  80        asm volatile("ptesync": : :"memory");
  81
  82        for (set = 0; set < num_sets; set++)
  83                tlbiel_hash_set_isa206(set, is);
  84
  85        ppc_after_tlbiel_barrier();
  86}
  87
  88static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
  89{
  90        unsigned int set;
  91
  92        asm volatile("ptesync": : :"memory");
  93
  94        /*
  95         * Flush the partition table cache if this is HV mode.
  96         */
  97        if (early_cpu_has_feature(CPU_FTR_HVMODE))
  98                tlbiel_hash_set_isa300(0, is, 0, 2, 0);
  99
 100        /*
 101         * Now invalidate the process table cache. UPRT=0 HPT modes (what
 102         * current hardware implements) do not use the process table, but
 103         * add the flushes anyway.
 104         *
 105         * From ISA v3.0B p. 1078:
 106         *     The following forms are invalid.
 107         *      * PRS=1, R=0, and RIC!=2 (The only process-scoped
 108         *        HPT caching is of the Process Table.)
 109         */
 110        tlbiel_hash_set_isa300(0, is, 0, 2, 1);
 111
 112        /*
 113         * Then flush the sets of the TLB proper. Hash mode uses
 114         * partition scoped TLB translations, which may be flushed
 115         * in !HV mode.
 116         */
 117        for (set = 0; set < num_sets; set++)
 118                tlbiel_hash_set_isa300(set, is, 0, 0, 0);
 119
 120        ppc_after_tlbiel_barrier();
 121
 122        asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
 123}
 124
 125void hash__tlbiel_all(unsigned int action)
 126{
 127        unsigned int is;
 128
 129        switch (action) {
 130        case TLB_INVAL_SCOPE_GLOBAL:
 131                is = 3;
 132                break;
 133        case TLB_INVAL_SCOPE_LPID:
 134                is = 2;
 135                break;
 136        default:
 137                BUG();
 138        }
 139
 140        if (early_cpu_has_feature(CPU_FTR_ARCH_300))
 141                tlbiel_all_isa300(POWER9_TLB_SETS_HASH, is);
 142        else if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
 143                tlbiel_all_isa206(POWER8_TLB_SETS, is);
 144        else if (early_cpu_has_feature(CPU_FTR_ARCH_206))
 145                tlbiel_all_isa206(POWER7_TLB_SETS, is);
 146        else
 147                WARN(1, "%s called on pre-POWER7 CPU\n", __func__);
 148}
 149
 150static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
 151                                                int apsize, int ssize)
 152{
 153        unsigned long va;
 154        unsigned int penc;
 155        unsigned long sllp;
 156
 157        /*
 158         * We need 14 to 65 bits of va for a tlibe of 4K page
 159         * With vpn we ignore the lower VPN_SHIFT bits already.
 160         * And top two bits are already ignored because we can
 161         * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
 162         * of 12.
 163         */
 164        va = vpn << VPN_SHIFT;
 165        /*
 166         * clear top 16 bits of 64bit va, non SLS segment
 167         * Older versions of the architecture (2.02 and earler) require the
 168         * masking of the top 16 bits.
 169         */
 170        if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
 171                va &= ~(0xffffULL << 48);
 172
 173        switch (psize) {
 174        case MMU_PAGE_4K:
 175                /* clear out bits after (52) [0....52.....63] */
 176                va &= ~((1ul << (64 - 52)) - 1);
 177                va |= ssize << 8;
 178                sllp = get_sllp_encoding(apsize);
 179                va |= sllp << 5;
 180                asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
 181                             : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
 182                             : "memory");
 183                break;
 184        default:
 185                /* We need 14 to 14 + i bits of va */
 186                penc = mmu_psize_defs[psize].penc[apsize];
 187                va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
 188                va |= penc << 12;
 189                va |= ssize << 8;
 190                /*
 191                 * AVAL bits:
 192                 * We don't need all the bits, but rest of the bits
 193                 * must be ignored by the processor.
 194                 * vpn cover upto 65 bits of va. (0...65) and we need
 195                 * 58..64 bits of va.
 196                 */
 197                va |= (vpn & 0xfe); /* AVAL */
 198                va |= 1; /* L */
 199                asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
 200                             : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
 201                             : "memory");
 202                break;
 203        }
 204        return va;
 205}
 206
 207static inline void fixup_tlbie_vpn(unsigned long vpn, int psize,
 208                                   int apsize, int ssize)
 209{
 210        if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
 211                /* Radix flush for a hash guest */
 212
 213                unsigned long rb,rs,prs,r,ric;
 214
 215                rb = PPC_BIT(52); /* IS = 2 */
 216                rs = 0;  /* lpid = 0 */
 217                prs = 0; /* partition scoped */
 218                r = 1;   /* radix format */
 219                ric = 0; /* RIC_FLSUH_TLB */
 220
 221                /*
 222                 * Need the extra ptesync to make sure we don't
 223                 * re-order the tlbie
 224                 */
 225                asm volatile("ptesync": : :"memory");
 226                asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
 227                             : : "r"(rb), "i"(r), "i"(prs),
 228                               "i"(ric), "r"(rs) : "memory");
 229        }
 230
 231
 232        if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
 233                /* Need the extra ptesync to ensure we don't reorder tlbie*/
 234                asm volatile("ptesync": : :"memory");
 235                ___tlbie(vpn, psize, apsize, ssize);
 236        }
 237}
 238
 239static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
 240{
 241        unsigned long rb;
 242
 243        rb = ___tlbie(vpn, psize, apsize, ssize);
 244        trace_tlbie(0, 0, rb, 0, 0, 0, 0);
 245}
 246
 247static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
 248{
 249        unsigned long va;
 250        unsigned int penc;
 251        unsigned long sllp;
 252
 253        /* VPN_SHIFT can be atmost 12 */
 254        va = vpn << VPN_SHIFT;
 255        /*
 256         * clear top 16 bits of 64 bit va, non SLS segment
 257         * Older versions of the architecture (2.02 and earler) require the
 258         * masking of the top 16 bits.
 259         */
 260        if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
 261                va &= ~(0xffffULL << 48);
 262
 263        switch (psize) {
 264        case MMU_PAGE_4K:
 265                /* clear out bits after(52) [0....52.....63] */
 266                va &= ~((1ul << (64 - 52)) - 1);
 267                va |= ssize << 8;
 268                sllp = get_sllp_encoding(apsize);
 269                va |= sllp << 5;
 270                asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1)
 271                             : : "r" (va), "i" (CPU_FTR_ARCH_206)
 272                             : "memory");
 273                break;
 274        default:
 275                /* We need 14 to 14 + i bits of va */
 276                penc = mmu_psize_defs[psize].penc[apsize];
 277                va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
 278                va |= penc << 12;
 279                va |= ssize << 8;
 280                /*
 281                 * AVAL bits:
 282                 * We don't need all the bits, but rest of the bits
 283                 * must be ignored by the processor.
 284                 * vpn cover upto 65 bits of va. (0...65) and we need
 285                 * 58..64 bits of va.
 286                 */
 287                va |= (vpn & 0xfe);
 288                va |= 1; /* L */
 289                asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1)
 290                             : : "r" (va), "i" (CPU_FTR_ARCH_206)
 291                             : "memory");
 292                break;
 293        }
 294        trace_tlbie(0, 1, va, 0, 0, 0, 0);
 295
 296}
 297
 298static inline void tlbie(unsigned long vpn, int psize, int apsize,
 299                         int ssize, int local)
 300{
 301        unsigned int use_local;
 302        int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
 303
 304        use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
 305
 306        if (use_local)
 307                use_local = mmu_psize_defs[psize].tlbiel;
 308        if (lock_tlbie && !use_local)
 309                raw_spin_lock(&native_tlbie_lock);
 310        asm volatile("ptesync": : :"memory");
 311        if (use_local) {
 312                __tlbiel(vpn, psize, apsize, ssize);
 313                ppc_after_tlbiel_barrier();
 314        } else {
 315                __tlbie(vpn, psize, apsize, ssize);
 316                fixup_tlbie_vpn(vpn, psize, apsize, ssize);
 317                asm volatile("eieio; tlbsync; ptesync": : :"memory");
 318        }
 319        if (lock_tlbie && !use_local)
 320                raw_spin_unlock(&native_tlbie_lock);
 321}
 322
 323static inline void native_lock_hpte(struct hash_pte *hptep)
 324{
 325        unsigned long *word = (unsigned long *)&hptep->v;
 326
 327        while (1) {
 328                if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
 329                        break;
 330                spin_begin();
 331                while(test_bit(HPTE_LOCK_BIT, word))
 332                        spin_cpu_relax();
 333                spin_end();
 334        }
 335}
 336
 337static inline void native_unlock_hpte(struct hash_pte *hptep)
 338{
 339        unsigned long *word = (unsigned long *)&hptep->v;
 340
 341        clear_bit_unlock(HPTE_LOCK_BIT, word);
 342}
 343
 344static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
 345                        unsigned long pa, unsigned long rflags,
 346                        unsigned long vflags, int psize, int apsize, int ssize)
 347{
 348        struct hash_pte *hptep = htab_address + hpte_group;
 349        unsigned long hpte_v, hpte_r;
 350        int i;
 351
 352        if (!(vflags & HPTE_V_BOLTED)) {
 353                DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
 354                        " rflags=%lx, vflags=%lx, psize=%d)\n",
 355                        hpte_group, vpn, pa, rflags, vflags, psize);
 356        }
 357
 358        for (i = 0; i < HPTES_PER_GROUP; i++) {
 359                if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
 360                        /* retry with lock held */
 361                        native_lock_hpte(hptep);
 362                        if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
 363                                break;
 364                        native_unlock_hpte(hptep);
 365                }
 366
 367                hptep++;
 368        }
 369
 370        if (i == HPTES_PER_GROUP)
 371                return -1;
 372
 373        hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
 374        hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
 375
 376        if (!(vflags & HPTE_V_BOLTED)) {
 377                DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
 378                        i, hpte_v, hpte_r);
 379        }
 380
 381        if (cpu_has_feature(CPU_FTR_ARCH_300)) {
 382                hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
 383                hpte_v = hpte_old_to_new_v(hpte_v);
 384        }
 385
 386        hptep->r = cpu_to_be64(hpte_r);
 387        /* Guarantee the second dword is visible before the valid bit */
 388        eieio();
 389        /*
 390         * Now set the first dword including the valid bit
 391         * NOTE: this also unlocks the hpte
 392         */
 393        hptep->v = cpu_to_be64(hpte_v);
 394
 395        __asm__ __volatile__ ("ptesync" : : : "memory");
 396
 397        return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
 398}
 399
 400static long native_hpte_remove(unsigned long hpte_group)
 401{
 402        struct hash_pte *hptep;
 403        int i;
 404        int slot_offset;
 405        unsigned long hpte_v;
 406
 407        DBG_LOW("    remove(group=%lx)\n", hpte_group);
 408
 409        /* pick a random entry to start at */
 410        slot_offset = mftb() & 0x7;
 411
 412        for (i = 0; i < HPTES_PER_GROUP; i++) {
 413                hptep = htab_address + hpte_group + slot_offset;
 414                hpte_v = be64_to_cpu(hptep->v);
 415
 416                if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
 417                        /* retry with lock held */
 418                        native_lock_hpte(hptep);
 419                        hpte_v = be64_to_cpu(hptep->v);
 420                        if ((hpte_v & HPTE_V_VALID)
 421                            && !(hpte_v & HPTE_V_BOLTED))
 422                                break;
 423                        native_unlock_hpte(hptep);
 424                }
 425
 426                slot_offset++;
 427                slot_offset &= 0x7;
 428        }
 429
 430        if (i == HPTES_PER_GROUP)
 431                return -1;
 432
 433        /* Invalidate the hpte. NOTE: this also unlocks it */
 434        hptep->v = 0;
 435
 436        return i;
 437}
 438
 439static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
 440                                 unsigned long vpn, int bpsize,
 441                                 int apsize, int ssize, unsigned long flags)
 442{
 443        struct hash_pte *hptep = htab_address + slot;
 444        unsigned long hpte_v, want_v;
 445        int ret = 0, local = 0;
 446
 447        want_v = hpte_encode_avpn(vpn, bpsize, ssize);
 448
 449        DBG_LOW("    update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
 450                vpn, want_v & HPTE_V_AVPN, slot, newpp);
 451
 452        hpte_v = hpte_get_old_v(hptep);
 453        /*
 454         * We need to invalidate the TLB always because hpte_remove doesn't do
 455         * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
 456         * random entry from it. When we do that we don't invalidate the TLB
 457         * (hpte_remove) because we assume the old translation is still
 458         * technically "valid".
 459         */
 460        if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
 461                DBG_LOW(" -> miss\n");
 462                ret = -1;
 463        } else {
 464                native_lock_hpte(hptep);
 465                /* recheck with locks held */
 466                hpte_v = hpte_get_old_v(hptep);
 467                if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
 468                             !(hpte_v & HPTE_V_VALID))) {
 469                        ret = -1;
 470                } else {
 471                        DBG_LOW(" -> hit\n");
 472                        /* Update the HPTE */
 473                        hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
 474                                                ~(HPTE_R_PPP | HPTE_R_N)) |
 475                                               (newpp & (HPTE_R_PPP | HPTE_R_N |
 476                                                         HPTE_R_C)));
 477                }
 478                native_unlock_hpte(hptep);
 479        }
 480
 481        if (flags & HPTE_LOCAL_UPDATE)
 482                local = 1;
 483        /*
 484         * Ensure it is out of the tlb too if it is not a nohpte fault
 485         */
 486        if (!(flags & HPTE_NOHPTE_UPDATE))
 487                tlbie(vpn, bpsize, apsize, ssize, local);
 488
 489        return ret;
 490}
 491
 492static long __native_hpte_find(unsigned long want_v, unsigned long slot)
 493{
 494        struct hash_pte *hptep;
 495        unsigned long hpte_v;
 496        unsigned long i;
 497
 498        for (i = 0; i < HPTES_PER_GROUP; i++) {
 499
 500                hptep = htab_address + slot;
 501                hpte_v = hpte_get_old_v(hptep);
 502                if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
 503                        /* HPTE matches */
 504                        return slot;
 505                ++slot;
 506        }
 507
 508        return -1;
 509}
 510
 511static long native_hpte_find(unsigned long vpn, int psize, int ssize)
 512{
 513        unsigned long hpte_group;
 514        unsigned long want_v;
 515        unsigned long hash;
 516        long slot;
 517
 518        hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
 519        want_v = hpte_encode_avpn(vpn, psize, ssize);
 520
 521        /*
 522         * We try to keep bolted entries always in primary hash
 523         * But in some case we can find them in secondary too.
 524         */
 525        hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
 526        slot = __native_hpte_find(want_v, hpte_group);
 527        if (slot < 0) {
 528                /* Try in secondary */
 529                hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
 530                slot = __native_hpte_find(want_v, hpte_group);
 531                if (slot < 0)
 532                        return -1;
 533        }
 534
 535        return slot;
 536}
 537
 538/*
 539 * Update the page protection bits. Intended to be used to create
 540 * guard pages for kernel data structures on pages which are bolted
 541 * in the HPT. Assumes pages being operated on will not be stolen.
 542 *
 543 * No need to lock here because we should be the only user.
 544 */
 545static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
 546                                       int psize, int ssize)
 547{
 548        unsigned long vpn;
 549        unsigned long vsid;
 550        long slot;
 551        struct hash_pte *hptep;
 552
 553        vsid = get_kernel_vsid(ea, ssize);
 554        vpn = hpt_vpn(ea, vsid, ssize);
 555
 556        slot = native_hpte_find(vpn, psize, ssize);
 557        if (slot == -1)
 558                panic("could not find page to bolt\n");
 559        hptep = htab_address + slot;
 560
 561        /* Update the HPTE */
 562        hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
 563                                ~(HPTE_R_PPP | HPTE_R_N)) |
 564                               (newpp & (HPTE_R_PPP | HPTE_R_N)));
 565        /*
 566         * Ensure it is out of the tlb too. Bolted entries base and
 567         * actual page size will be same.
 568         */
 569        tlbie(vpn, psize, psize, ssize, 0);
 570}
 571
 572/*
 573 * Remove a bolted kernel entry. Memory hotplug uses this.
 574 *
 575 * No need to lock here because we should be the only user.
 576 */
 577static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
 578{
 579        unsigned long vpn;
 580        unsigned long vsid;
 581        long slot;
 582        struct hash_pte *hptep;
 583
 584        vsid = get_kernel_vsid(ea, ssize);
 585        vpn = hpt_vpn(ea, vsid, ssize);
 586
 587        slot = native_hpte_find(vpn, psize, ssize);
 588        if (slot == -1)
 589                return -ENOENT;
 590
 591        hptep = htab_address + slot;
 592
 593        VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED));
 594
 595        /* Invalidate the hpte */
 596        hptep->v = 0;
 597
 598        /* Invalidate the TLB */
 599        tlbie(vpn, psize, psize, ssize, 0);
 600        return 0;
 601}
 602
 603
 604static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
 605                                   int bpsize, int apsize, int ssize, int local)
 606{
 607        struct hash_pte *hptep = htab_address + slot;
 608        unsigned long hpte_v;
 609        unsigned long want_v;
 610        unsigned long flags;
 611
 612        local_irq_save(flags);
 613
 614        DBG_LOW("    invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
 615
 616        want_v = hpte_encode_avpn(vpn, bpsize, ssize);
 617        hpte_v = hpte_get_old_v(hptep);
 618
 619        if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
 620                native_lock_hpte(hptep);
 621                /* recheck with locks held */
 622                hpte_v = hpte_get_old_v(hptep);
 623
 624                if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
 625                        /* Invalidate the hpte. NOTE: this also unlocks it */
 626                        hptep->v = 0;
 627                else
 628                        native_unlock_hpte(hptep);
 629        }
 630        /*
 631         * We need to invalidate the TLB always because hpte_remove doesn't do
 632         * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
 633         * random entry from it. When we do that we don't invalidate the TLB
 634         * (hpte_remove) because we assume the old translation is still
 635         * technically "valid".
 636         */
 637        tlbie(vpn, bpsize, apsize, ssize, local);
 638
 639        local_irq_restore(flags);
 640}
 641
 642#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 643static void native_hugepage_invalidate(unsigned long vsid,
 644                                       unsigned long addr,
 645                                       unsigned char *hpte_slot_array,
 646                                       int psize, int ssize, int local)
 647{
 648        int i;
 649        struct hash_pte *hptep;
 650        int actual_psize = MMU_PAGE_16M;
 651        unsigned int max_hpte_count, valid;
 652        unsigned long flags, s_addr = addr;
 653        unsigned long hpte_v, want_v, shift;
 654        unsigned long hidx, vpn = 0, hash, slot;
 655
 656        shift = mmu_psize_defs[psize].shift;
 657        max_hpte_count = 1U << (PMD_SHIFT - shift);
 658
 659        local_irq_save(flags);
 660        for (i = 0; i < max_hpte_count; i++) {
 661                valid = hpte_valid(hpte_slot_array, i);
 662                if (!valid)
 663                        continue;
 664                hidx =  hpte_hash_index(hpte_slot_array, i);
 665
 666                /* get the vpn */
 667                addr = s_addr + (i * (1ul << shift));
 668                vpn = hpt_vpn(addr, vsid, ssize);
 669                hash = hpt_hash(vpn, shift, ssize);
 670                if (hidx & _PTEIDX_SECONDARY)
 671                        hash = ~hash;
 672
 673                slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
 674                slot += hidx & _PTEIDX_GROUP_IX;
 675
 676                hptep = htab_address + slot;
 677                want_v = hpte_encode_avpn(vpn, psize, ssize);
 678                hpte_v = hpte_get_old_v(hptep);
 679
 680                /* Even if we miss, we need to invalidate the TLB */
 681                if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
 682                        /* recheck with locks held */
 683                        native_lock_hpte(hptep);
 684                        hpte_v = hpte_get_old_v(hptep);
 685
 686                        if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
 687                                /*
 688                                 * Invalidate the hpte. NOTE: this also unlocks it
 689                                 */
 690
 691                                hptep->v = 0;
 692                        } else
 693                                native_unlock_hpte(hptep);
 694                }
 695                /*
 696                 * We need to do tlb invalidate for all the address, tlbie
 697                 * instruction compares entry_VA in tlb with the VA specified
 698                 * here
 699                 */
 700                tlbie(vpn, psize, actual_psize, ssize, local);
 701        }
 702        local_irq_restore(flags);
 703}
 704#else
 705static void native_hugepage_invalidate(unsigned long vsid,
 706                                       unsigned long addr,
 707                                       unsigned char *hpte_slot_array,
 708                                       int psize, int ssize, int local)
 709{
 710        WARN(1, "%s called without THP support\n", __func__);
 711}
 712#endif
 713
 714static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
 715                        int *psize, int *apsize, int *ssize, unsigned long *vpn)
 716{
 717        unsigned long avpn, pteg, vpi;
 718        unsigned long hpte_v = be64_to_cpu(hpte->v);
 719        unsigned long hpte_r = be64_to_cpu(hpte->r);
 720        unsigned long vsid, seg_off;
 721        int size, a_size, shift;
 722        /* Look at the 8 bit LP value */
 723        unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
 724
 725        if (cpu_has_feature(CPU_FTR_ARCH_300)) {
 726                hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
 727                hpte_r = hpte_new_to_old_r(hpte_r);
 728        }
 729        if (!(hpte_v & HPTE_V_LARGE)) {
 730                size   = MMU_PAGE_4K;
 731                a_size = MMU_PAGE_4K;
 732        } else {
 733                size = hpte_page_sizes[lp] & 0xf;
 734                a_size = hpte_page_sizes[lp] >> 4;
 735        }
 736        /* This works for all page sizes, and for 256M and 1T segments */
 737        *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
 738        shift = mmu_psize_defs[size].shift;
 739
 740        avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
 741        pteg = slot / HPTES_PER_GROUP;
 742        if (hpte_v & HPTE_V_SECONDARY)
 743                pteg = ~pteg;
 744
 745        switch (*ssize) {
 746        case MMU_SEGSIZE_256M:
 747                /* We only have 28 - 23 bits of seg_off in avpn */
 748                seg_off = (avpn & 0x1f) << 23;
 749                vsid    =  avpn >> 5;
 750                /* We can find more bits from the pteg value */
 751                if (shift < 23) {
 752                        vpi = (vsid ^ pteg) & htab_hash_mask;
 753                        seg_off |= vpi << shift;
 754                }
 755                *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
 756                break;
 757        case MMU_SEGSIZE_1T:
 758                /* We only have 40 - 23 bits of seg_off in avpn */
 759                seg_off = (avpn & 0x1ffff) << 23;
 760                vsid    = avpn >> 17;
 761                if (shift < 23) {
 762                        vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
 763                        seg_off |= vpi << shift;
 764                }
 765                *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
 766                break;
 767        default:
 768                *vpn = size = 0;
 769        }
 770        *psize  = size;
 771        *apsize = a_size;
 772}
 773
 774/*
 775 * clear all mappings on kexec.  All cpus are in real mode (or they will
 776 * be when they isi), and we are the only one left.  We rely on our kernel
 777 * mapping being 0xC0's and the hardware ignoring those two real bits.
 778 *
 779 * This must be called with interrupts disabled.
 780 *
 781 * Taking the native_tlbie_lock is unsafe here due to the possibility of
 782 * lockdep being on. On pre POWER5 hardware, not taking the lock could
 783 * cause deadlock. POWER5 and newer not taking the lock is fine. This only
 784 * gets called during boot before secondary CPUs have come up and during
 785 * crashdump and all bets are off anyway.
 786 *
 787 * TODO: add batching support when enabled.  remember, no dynamic memory here,
 788 * although there is the control page available...
 789 */
 790static void native_hpte_clear(void)
 791{
 792        unsigned long vpn = 0;
 793        unsigned long slot, slots;
 794        struct hash_pte *hptep = htab_address;
 795        unsigned long hpte_v;
 796        unsigned long pteg_count;
 797        int psize, apsize, ssize;
 798
 799        pteg_count = htab_hash_mask + 1;
 800
 801        slots = pteg_count * HPTES_PER_GROUP;
 802
 803        for (slot = 0; slot < slots; slot++, hptep++) {
 804                /*
 805                 * we could lock the pte here, but we are the only cpu
 806                 * running,  right?  and for crash dump, we probably
 807                 * don't want to wait for a maybe bad cpu.
 808                 */
 809                hpte_v = be64_to_cpu(hptep->v);
 810
 811                /*
 812                 * Call __tlbie() here rather than tlbie() since we can't take the
 813                 * native_tlbie_lock.
 814                 */
 815                if (hpte_v & HPTE_V_VALID) {
 816                        hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
 817                        hptep->v = 0;
 818                        ___tlbie(vpn, psize, apsize, ssize);
 819                }
 820        }
 821
 822        asm volatile("eieio; tlbsync; ptesync":::"memory");
 823}
 824
 825/*
 826 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
 827 * the lock all the time
 828 */
 829static void native_flush_hash_range(unsigned long number, int local)
 830{
 831        unsigned long vpn = 0;
 832        unsigned long hash, index, hidx, shift, slot;
 833        struct hash_pte *hptep;
 834        unsigned long hpte_v;
 835        unsigned long want_v;
 836        unsigned long flags;
 837        real_pte_t pte;
 838        struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
 839        unsigned long psize = batch->psize;
 840        int ssize = batch->ssize;
 841        int i;
 842        unsigned int use_local;
 843
 844        use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
 845                mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
 846
 847        local_irq_save(flags);
 848
 849        for (i = 0; i < number; i++) {
 850                vpn = batch->vpn[i];
 851                pte = batch->pte[i];
 852
 853                pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
 854                        hash = hpt_hash(vpn, shift, ssize);
 855                        hidx = __rpte_to_hidx(pte, index);
 856                        if (hidx & _PTEIDX_SECONDARY)
 857                                hash = ~hash;
 858                        slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
 859                        slot += hidx & _PTEIDX_GROUP_IX;
 860                        hptep = htab_address + slot;
 861                        want_v = hpte_encode_avpn(vpn, psize, ssize);
 862                        hpte_v = hpte_get_old_v(hptep);
 863
 864                        if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
 865                                continue;
 866                        /* lock and try again */
 867                        native_lock_hpte(hptep);
 868                        hpte_v = hpte_get_old_v(hptep);
 869
 870                        if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
 871                                native_unlock_hpte(hptep);
 872                        else
 873                                hptep->v = 0;
 874
 875                } pte_iterate_hashed_end();
 876        }
 877
 878        if (use_local) {
 879                asm volatile("ptesync":::"memory");
 880                for (i = 0; i < number; i++) {
 881                        vpn = batch->vpn[i];
 882                        pte = batch->pte[i];
 883
 884                        pte_iterate_hashed_subpages(pte, psize,
 885                                                    vpn, index, shift) {
 886                                __tlbiel(vpn, psize, psize, ssize);
 887                        } pte_iterate_hashed_end();
 888                }
 889                ppc_after_tlbiel_barrier();
 890        } else {
 891                int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
 892
 893                if (lock_tlbie)
 894                        raw_spin_lock(&native_tlbie_lock);
 895
 896                asm volatile("ptesync":::"memory");
 897                for (i = 0; i < number; i++) {
 898                        vpn = batch->vpn[i];
 899                        pte = batch->pte[i];
 900
 901                        pte_iterate_hashed_subpages(pte, psize,
 902                                                    vpn, index, shift) {
 903                                __tlbie(vpn, psize, psize, ssize);
 904                        } pte_iterate_hashed_end();
 905                }
 906                /*
 907                 * Just do one more with the last used values.
 908                 */
 909                fixup_tlbie_vpn(vpn, psize, psize, ssize);
 910                asm volatile("eieio; tlbsync; ptesync":::"memory");
 911
 912                if (lock_tlbie)
 913                        raw_spin_unlock(&native_tlbie_lock);
 914        }
 915
 916        local_irq_restore(flags);
 917}
 918
 919void __init hpte_init_native(void)
 920{
 921        mmu_hash_ops.hpte_invalidate    = native_hpte_invalidate;
 922        mmu_hash_ops.hpte_updatepp      = native_hpte_updatepp;
 923        mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
 924        mmu_hash_ops.hpte_removebolted = native_hpte_removebolted;
 925        mmu_hash_ops.hpte_insert        = native_hpte_insert;
 926        mmu_hash_ops.hpte_remove        = native_hpte_remove;
 927        mmu_hash_ops.hpte_clear_all     = native_hpte_clear;
 928        mmu_hash_ops.flush_hash_range = native_flush_hash_range;
 929        mmu_hash_ops.hugepage_invalidate   = native_hugepage_invalidate;
 930}
 931