linux/arch/powerpc/include/asm/book3s/64/mmu-hash.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
   2#define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
   3/*
   4 * PowerPC64 memory management structures
   5 *
   6 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
   7 *   PPC64 rework.
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * as published by the Free Software Foundation; either version
  12 * 2 of the License, or (at your option) any later version.
  13 */
  14
  15#include <asm/asm-compat.h>
  16#include <asm/page.h>
  17#include <asm/bug.h>
  18
  19/*
  20 * This is necessary to get the definition of PGTABLE_RANGE which we
  21 * need for various slices related matters. Note that this isn't the
  22 * complete pgtable.h but only a portion of it.
  23 */
  24#include <asm/book3s/64/pgtable.h>
  25#include <asm/bug.h>
  26#include <asm/processor.h>
  27#include <asm/cpu_has_feature.h>
  28
  29/*
  30 * SLB
  31 */
  32
  33#define SLB_NUM_BOLTED          3
  34#define SLB_CACHE_ENTRIES       8
  35#define SLB_MIN_SIZE            32
  36
  37/* Bits in the SLB ESID word */
  38#define SLB_ESID_V              ASM_CONST(0x0000000008000000) /* valid */
  39
  40/* Bits in the SLB VSID word */
  41#define SLB_VSID_SHIFT          12
  42#define SLB_VSID_SHIFT_1T       24
  43#define SLB_VSID_SSIZE_SHIFT    62
  44#define SLB_VSID_B              ASM_CONST(0xc000000000000000)
  45#define SLB_VSID_B_256M         ASM_CONST(0x0000000000000000)
  46#define SLB_VSID_B_1T           ASM_CONST(0x4000000000000000)
  47#define SLB_VSID_KS             ASM_CONST(0x0000000000000800)
  48#define SLB_VSID_KP             ASM_CONST(0x0000000000000400)
  49#define SLB_VSID_N              ASM_CONST(0x0000000000000200) /* no-execute */
  50#define SLB_VSID_L              ASM_CONST(0x0000000000000100)
  51#define SLB_VSID_C              ASM_CONST(0x0000000000000080) /* class */
  52#define SLB_VSID_LP             ASM_CONST(0x0000000000000030)
  53#define SLB_VSID_LP_00          ASM_CONST(0x0000000000000000)
  54#define SLB_VSID_LP_01          ASM_CONST(0x0000000000000010)
  55#define SLB_VSID_LP_10          ASM_CONST(0x0000000000000020)
  56#define SLB_VSID_LP_11          ASM_CONST(0x0000000000000030)
  57#define SLB_VSID_LLP            (SLB_VSID_L|SLB_VSID_LP)
  58
  59#define SLB_VSID_KERNEL         (SLB_VSID_KP)
  60#define SLB_VSID_USER           (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
  61
  62#define SLBIE_C                 (0x08000000)
  63#define SLBIE_SSIZE_SHIFT       25
  64
  65/*
  66 * Hash table
  67 */
  68
  69#define HPTES_PER_GROUP 8
  70
  71#define HPTE_V_SSIZE_SHIFT      62
  72#define HPTE_V_AVPN_SHIFT       7
  73#define HPTE_V_AVPN             ASM_CONST(0x3fffffffffffff80)
  74#define HPTE_V_AVPN_VAL(x)      (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
  75#define HPTE_V_COMPARE(x,y)     (!(((x) ^ (y)) & 0xffffffffffffff80UL))
  76#define HPTE_V_BOLTED           ASM_CONST(0x0000000000000010)
  77#define HPTE_V_LOCK             ASM_CONST(0x0000000000000008)
  78#define HPTE_V_LARGE            ASM_CONST(0x0000000000000004)
  79#define HPTE_V_SECONDARY        ASM_CONST(0x0000000000000002)
  80#define HPTE_V_VALID            ASM_CONST(0x0000000000000001)
  81
  82/*
  83 * ISA 3.0 have a different HPTE format.
  84 */
  85#define HPTE_R_3_0_SSIZE_SHIFT  58
  86#define HPTE_R_PP0              ASM_CONST(0x8000000000000000)
  87#define HPTE_R_TS               ASM_CONST(0x4000000000000000)
  88#define HPTE_R_KEY_HI           ASM_CONST(0x3000000000000000)
  89#define HPTE_R_RPN_SHIFT        12
  90#define HPTE_R_RPN              ASM_CONST(0x0ffffffffffff000)
  91#define HPTE_R_PP               ASM_CONST(0x0000000000000003)
  92#define HPTE_R_PPP              ASM_CONST(0x8000000000000003)
  93#define HPTE_R_N                ASM_CONST(0x0000000000000004)
  94#define HPTE_R_G                ASM_CONST(0x0000000000000008)
  95#define HPTE_R_M                ASM_CONST(0x0000000000000010)
  96#define HPTE_R_I                ASM_CONST(0x0000000000000020)
  97#define HPTE_R_W                ASM_CONST(0x0000000000000040)
  98#define HPTE_R_WIMG             ASM_CONST(0x0000000000000078)
  99#define HPTE_R_C                ASM_CONST(0x0000000000000080)
 100#define HPTE_R_R                ASM_CONST(0x0000000000000100)
 101#define HPTE_R_KEY_LO           ASM_CONST(0x0000000000000e00)
 102
 103#define HPTE_V_1TB_SEG          ASM_CONST(0x4000000000000000)
 104#define HPTE_V_VRMA_MASK        ASM_CONST(0x4001ffffff000000)
 105
 106/* Values for PP (assumes Ks=0, Kp=1) */
 107#define PP_RWXX 0       /* Supervisor read/write, User none */
 108#define PP_RWRX 1       /* Supervisor read/write, User read */
 109#define PP_RWRW 2       /* Supervisor read/write, User read/write */
 110#define PP_RXRX 3       /* Supervisor read,       User read */
 111#define PP_RXXX (HPTE_R_PP0 | 2)        /* Supervisor read, user none */
 112
 113/* Fields for tlbiel instruction in architecture 2.06 */
 114#define TLBIEL_INVAL_SEL_MASK   0xc00   /* invalidation selector */
 115#define  TLBIEL_INVAL_PAGE      0x000   /* invalidate a single page */
 116#define  TLBIEL_INVAL_SET_LPID  0x800   /* invalidate a set for current LPID */
 117#define  TLBIEL_INVAL_SET       0xc00   /* invalidate a set for all LPIDs */
 118#define TLBIEL_INVAL_SET_MASK   0xfff000        /* set number to inval. */
 119#define TLBIEL_INVAL_SET_SHIFT  12
 120
 121#define POWER7_TLB_SETS         128     /* # sets in POWER7 TLB */
 122#define POWER8_TLB_SETS         512     /* # sets in POWER8 TLB */
 123#define POWER9_TLB_SETS_HASH    256     /* # sets in POWER9 TLB Hash mode */
 124#define POWER9_TLB_SETS_RADIX   128     /* # sets in POWER9 TLB Radix mode */
 125
 126#ifndef __ASSEMBLY__
 127
 128struct mmu_hash_ops {
 129        void            (*hpte_invalidate)(unsigned long slot,
 130                                           unsigned long vpn,
 131                                           int bpsize, int apsize,
 132                                           int ssize, int local);
 133        long            (*hpte_updatepp)(unsigned long slot,
 134                                         unsigned long newpp,
 135                                         unsigned long vpn,
 136                                         int bpsize, int apsize,
 137                                         int ssize, unsigned long flags);
 138        void            (*hpte_updateboltedpp)(unsigned long newpp,
 139                                               unsigned long ea,
 140                                               int psize, int ssize);
 141        long            (*hpte_insert)(unsigned long hpte_group,
 142                                       unsigned long vpn,
 143                                       unsigned long prpn,
 144                                       unsigned long rflags,
 145                                       unsigned long vflags,
 146                                       int psize, int apsize,
 147                                       int ssize);
 148        long            (*hpte_remove)(unsigned long hpte_group);
 149        int             (*hpte_removebolted)(unsigned long ea,
 150                                             int psize, int ssize);
 151        void            (*flush_hash_range)(unsigned long number, int local);
 152        void            (*hugepage_invalidate)(unsigned long vsid,
 153                                               unsigned long addr,
 154                                               unsigned char *hpte_slot_array,
 155                                               int psize, int ssize, int local);
 156        /*
 157         * Special for kexec.
 158         * To be called in real mode with interrupts disabled. No locks are
 159         * taken as such, concurrent access on pre POWER5 hardware could result
 160         * in a deadlock.
 161         * The linear mapping is destroyed as well.
 162         */
 163        void            (*hpte_clear_all)(void);
 164};
 165extern struct mmu_hash_ops mmu_hash_ops;
 166
 167struct hash_pte {
 168        __be64 v;
 169        __be64 r;
 170};
 171
 172extern struct hash_pte *htab_address;
 173extern unsigned long htab_size_bytes;
 174extern unsigned long htab_hash_mask;
 175
 176
 177static inline int shift_to_mmu_psize(unsigned int shift)
 178{
 179        int psize;
 180
 181        for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
 182                if (mmu_psize_defs[psize].shift == shift)
 183                        return psize;
 184        return -1;
 185}
 186
 187static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
 188{
 189        if (mmu_psize_defs[mmu_psize].shift)
 190                return mmu_psize_defs[mmu_psize].shift;
 191        BUG();
 192}
 193
 194static inline unsigned long get_sllp_encoding(int psize)
 195{
 196        unsigned long sllp;
 197
 198        sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) |
 199                ((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4);
 200        return sllp;
 201}
 202
 203#endif /* __ASSEMBLY__ */
 204
 205/*
 206 * Segment sizes.
 207 * These are the values used by hardware in the B field of
 208 * SLB entries and the first dword of MMU hashtable entries.
 209 * The B field is 2 bits; the values 2 and 3 are unused and reserved.
 210 */
 211#define MMU_SEGSIZE_256M        0
 212#define MMU_SEGSIZE_1T          1
 213
 214/*
 215 * encode page number shift.
 216 * in order to fit the 78 bit va in a 64 bit variable we shift the va by
 217 * 12 bits. This enable us to address upto 76 bit va.
 218 * For hpt hash from a va we can ignore the page size bits of va and for
 219 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
 220 * we work in all cases including 4k page size.
 221 */
 222#define VPN_SHIFT       12
 223
 224/*
 225 * HPTE Large Page (LP) details
 226 */
 227#define LP_SHIFT        12
 228#define LP_BITS         8
 229#define LP_MASK(i)      ((0xFF >> (i)) << LP_SHIFT)
 230
 231#ifndef __ASSEMBLY__
 232
 233static inline int slb_vsid_shift(int ssize)
 234{
 235        if (ssize == MMU_SEGSIZE_256M)
 236                return SLB_VSID_SHIFT;
 237        return SLB_VSID_SHIFT_1T;
 238}
 239
 240static inline int segment_shift(int ssize)
 241{
 242        if (ssize == MMU_SEGSIZE_256M)
 243                return SID_SHIFT;
 244        return SID_SHIFT_1T;
 245}
 246
 247/*
 248 * This array is indexed by the LP field of the HPTE second dword.
 249 * Since this field may contain some RPN bits, some entries are
 250 * replicated so that we get the same value irrespective of RPN.
 251 * The top 4 bits are the page size index (MMU_PAGE_*) for the
 252 * actual page size, the bottom 4 bits are the base page size.
 253 */
 254extern u8 hpte_page_sizes[1 << LP_BITS];
 255
 256static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
 257                                             bool is_base_size)
 258{
 259        unsigned int i, lp;
 260
 261        if (!(h & HPTE_V_LARGE))
 262                return 1ul << 12;
 263
 264        /* Look at the 8 bit LP value */
 265        lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
 266        i = hpte_page_sizes[lp];
 267        if (!i)
 268                return 0;
 269        if (!is_base_size)
 270                i >>= 4;
 271        return 1ul << mmu_psize_defs[i & 0xf].shift;
 272}
 273
 274static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
 275{
 276        return __hpte_page_size(h, l, 0);
 277}
 278
 279static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
 280{
 281        return __hpte_page_size(h, l, 1);
 282}
 283
 284/*
 285 * The current system page and segment sizes
 286 */
 287extern int mmu_kernel_ssize;
 288extern int mmu_highuser_ssize;
 289extern u16 mmu_slb_size;
 290extern unsigned long tce_alloc_start, tce_alloc_end;
 291
 292/*
 293 * If the processor supports 64k normal pages but not 64k cache
 294 * inhibited pages, we have to be prepared to switch processes
 295 * to use 4k pages when they create cache-inhibited mappings.
 296 * If this is the case, mmu_ci_restrictions will be set to 1.
 297 */
 298extern int mmu_ci_restrictions;
 299
 300/*
 301 * This computes the AVPN and B fields of the first dword of a HPTE,
 302 * for use when we want to match an existing PTE.  The bottom 7 bits
 303 * of the returned value are zero.
 304 */
 305static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
 306                                             int ssize)
 307{
 308        unsigned long v;
 309        /*
 310         * The AVA field omits the low-order 23 bits of the 78 bits VA.
 311         * These bits are not needed in the PTE, because the
 312         * low-order b of these bits are part of the byte offset
 313         * into the virtual page and, if b < 23, the high-order
 314         * 23-b of these bits are always used in selecting the
 315         * PTEGs to be searched
 316         */
 317        v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
 318        v <<= HPTE_V_AVPN_SHIFT;
 319        if (!cpu_has_feature(CPU_FTR_ARCH_300))
 320                v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
 321        return v;
 322}
 323
 324/*
 325 * This function sets the AVPN and L fields of the HPTE  appropriately
 326 * using the base page size and actual page size.
 327 */
 328static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
 329                                          int actual_psize, int ssize)
 330{
 331        unsigned long v;
 332        v = hpte_encode_avpn(vpn, base_psize, ssize);
 333        if (actual_psize != MMU_PAGE_4K)
 334                v |= HPTE_V_LARGE;
 335        return v;
 336}
 337
 338/*
 339 * This function sets the ARPN, and LP fields of the HPTE appropriately
 340 * for the page size. We assume the pa is already "clean" that is properly
 341 * aligned for the requested page size
 342 */
 343static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
 344                                          int actual_psize, int ssize)
 345{
 346
 347        if (cpu_has_feature(CPU_FTR_ARCH_300))
 348                pa |= ((unsigned long) ssize) << HPTE_R_3_0_SSIZE_SHIFT;
 349
 350        /* A 4K page needs no special encoding */
 351        if (actual_psize == MMU_PAGE_4K)
 352                return pa & HPTE_R_RPN;
 353        else {
 354                unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
 355                unsigned int shift = mmu_psize_defs[actual_psize].shift;
 356                return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
 357        }
 358}
 359
 360/*
 361 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
 362 */
 363static inline unsigned long hpt_vpn(unsigned long ea,
 364                                    unsigned long vsid, int ssize)
 365{
 366        unsigned long mask;
 367        int s_shift = segment_shift(ssize);
 368
 369        mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
 370        return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
 371}
 372
 373/*
 374 * This hashes a virtual address
 375 */
 376static inline unsigned long hpt_hash(unsigned long vpn,
 377                                     unsigned int shift, int ssize)
 378{
 379        int mask;
 380        unsigned long hash, vsid;
 381
 382        /* VPN_SHIFT can be atmost 12 */
 383        if (ssize == MMU_SEGSIZE_256M) {
 384                mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
 385                hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
 386                        ((vpn & mask) >> (shift - VPN_SHIFT));
 387        } else {
 388                mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
 389                vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
 390                hash = vsid ^ (vsid << 25) ^
 391                        ((vpn & mask) >> (shift - VPN_SHIFT)) ;
 392        }
 393        return hash & 0x7fffffffffUL;
 394}
 395
 396#define HPTE_LOCAL_UPDATE       0x1
 397#define HPTE_NOHPTE_UPDATE      0x2
 398
 399extern int __hash_page_4K(unsigned long ea, unsigned long access,
 400                          unsigned long vsid, pte_t *ptep, unsigned long trap,
 401                          unsigned long flags, int ssize, int subpage_prot);
 402extern int __hash_page_64K(unsigned long ea, unsigned long access,
 403                           unsigned long vsid, pte_t *ptep, unsigned long trap,
 404                           unsigned long flags, int ssize);
 405struct mm_struct;
 406unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
 407extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
 408                        unsigned long access, unsigned long trap,
 409                        unsigned long flags);
 410extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
 411                     unsigned long dsisr);
 412int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
 413                     pte_t *ptep, unsigned long trap, unsigned long flags,
 414                     int ssize, unsigned int shift, unsigned int mmu_psize);
 415#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 416extern int __hash_page_thp(unsigned long ea, unsigned long access,
 417                           unsigned long vsid, pmd_t *pmdp, unsigned long trap,
 418                           unsigned long flags, int ssize, unsigned int psize);
 419#else
 420static inline int __hash_page_thp(unsigned long ea, unsigned long access,
 421                                  unsigned long vsid, pmd_t *pmdp,
 422                                  unsigned long trap, unsigned long flags,
 423                                  int ssize, unsigned int psize)
 424{
 425        BUG();
 426        return -1;
 427}
 428#endif
 429extern void hash_failure_debug(unsigned long ea, unsigned long access,
 430                               unsigned long vsid, unsigned long trap,
 431                               int ssize, int psize, int lpsize,
 432                               unsigned long pte);
 433extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
 434                             unsigned long pstart, unsigned long prot,
 435                             int psize, int ssize);
 436int htab_remove_mapping(unsigned long vstart, unsigned long vend,
 437                        int psize, int ssize);
 438extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
 439extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
 440
 441#ifdef CONFIG_PPC_PSERIES
 442void hpte_init_pseries(void);
 443#else
 444static inline void hpte_init_pseries(void) { }
 445#endif
 446
 447extern void hpte_init_native(void);
 448
 449extern void slb_initialize(void);
 450extern void slb_flush_and_rebolt(void);
 451
 452extern void slb_vmalloc_update(void);
 453extern void slb_set_size(u16 size);
 454#endif /* __ASSEMBLY__ */
 455
 456/*
 457 * VSID allocation (256MB segment)
 458 *
 459 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
 460 * from mmu context id and effective segment id of the address.
 461 *
 462 * For user processes max context id is limited to ((1ul << 19) - 5)
 463 * for kernel space, we use the top 4 context ids to map address as below
 464 * NOTE: each context only support 64TB now.
 465 * 0x7fffc -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
 466 * 0x7fffd -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
 467 * 0x7fffe -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
 468 * 0x7ffff -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
 469 *
 470 * The proto-VSIDs are then scrambled into real VSIDs with the
 471 * multiplicative hash:
 472 *
 473 *      VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
 474 *
 475 * VSID_MULTIPLIER is prime, so in particular it is
 476 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
 477 * Because the modulus is 2^n-1 we can compute it efficiently without
 478 * a divide or extra multiply (see below). The scramble function gives
 479 * robust scattering in the hash table (at least based on some initial
 480 * results).
 481 *
 482 * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
 483 * bad address. This enables us to consolidate bad address handling in
 484 * hash_page.
 485 *
 486 * We also need to avoid the last segment of the last context, because that
 487 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
 488 * because of the modulo operation in vsid scramble. But the vmemmap
 489 * (which is what uses region 0xf) will never be close to 64TB in size
 490 * (it's 56 bytes per page of system memory).
 491 */
 492
 493#define CONTEXT_BITS            19
 494#define ESID_BITS               18
 495#define ESID_BITS_1T            6
 496
 497/*
 498 * 256MB segment
 499 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
 500 * available for user + kernel mapping. The top 4 contexts are used for
 501 * kernel mapping. Each segment contains 2^28 bytes. Each
 502 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
 503 * (19 == 37 + 28 - 46).
 504 */
 505#define MAX_USER_CONTEXT        ((ASM_CONST(1) << CONTEXT_BITS) - 5)
 506
 507/*
 508 * This should be computed such that protovosid * vsid_mulitplier
 509 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
 510 */
 511#define VSID_MULTIPLIER_256M    ASM_CONST(12538073)     /* 24-bit prime */
 512#define VSID_BITS_256M          (CONTEXT_BITS + ESID_BITS)
 513#define VSID_MODULUS_256M       ((1UL<<VSID_BITS_256M)-1)
 514
 515#define VSID_MULTIPLIER_1T      ASM_CONST(12538073)     /* 24-bit prime */
 516#define VSID_BITS_1T            (CONTEXT_BITS + ESID_BITS_1T)
 517#define VSID_MODULUS_1T         ((1UL<<VSID_BITS_1T)-1)
 518
 519
 520#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
 521
 522/*
 523 * This macro generates asm code to compute the VSID scramble
 524 * function.  Used in slb_allocate() and do_stab_bolted.  The function
 525 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
 526 *
 527 *      rt = register containing the proto-VSID and into which the
 528 *              VSID will be stored
 529 *      rx = scratch register (clobbered)
 530 *
 531 *      - rt and rx must be different registers
 532 *      - The answer will end up in the low VSID_BITS bits of rt.  The higher
 533 *        bits may contain other garbage, so you may need to mask the
 534 *        result.
 535 */
 536#define ASM_VSID_SCRAMBLE(rt, rx, size)                                 \
 537        lis     rx,VSID_MULTIPLIER_##size@h;                            \
 538        ori     rx,rx,VSID_MULTIPLIER_##size@l;                         \
 539        mulld   rt,rt,rx;               /* rt = rt * MULTIPLIER */      \
 540                                                                        \
 541        srdi    rx,rt,VSID_BITS_##size;                                 \
 542        clrldi  rt,rt,(64-VSID_BITS_##size);                            \
 543        add     rt,rt,rx;               /* add high and low bits */     \
 544        /* NOTE: explanation based on VSID_BITS_##size = 36             \
 545         * Now, r3 == VSID (mod 2^36-1), and lies between 0 and         \
 546         * 2^36-1+2^28-1.  That in particular means that if r3 >=       \
 547         * 2^36-1, then r3+1 has the 2^36 bit set.  So, if r3+1 has     \
 548         * the bit clear, r3 already has the answer we want, if it      \
 549         * doesn't, the answer is the low 36 bits of r3+1.  So in all   \
 550         * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
 551        addi    rx,rt,1;                                                \
 552        srdi    rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */   \
 553        add     rt,rt,rx
 554
 555/* 4 bits per slice and we have one slice per 1TB */
 556#define SLICE_ARRAY_SIZE  (H_PGTABLE_RANGE >> 41)
 557
 558#ifndef __ASSEMBLY__
 559
 560#ifdef CONFIG_PPC_SUBPAGE_PROT
 561/*
 562 * For the sub-page protection option, we extend the PGD with one of
 563 * these.  Basically we have a 3-level tree, with the top level being
 564 * the protptrs array.  To optimize speed and memory consumption when
 565 * only addresses < 4GB are being protected, pointers to the first
 566 * four pages of sub-page protection words are stored in the low_prot
 567 * array.
 568 * Each page of sub-page protection words protects 1GB (4 bytes
 569 * protects 64k).  For the 3-level tree, each page of pointers then
 570 * protects 8TB.
 571 */
 572struct subpage_prot_table {
 573        unsigned long maxaddr;  /* only addresses < this are protected */
 574        unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
 575        unsigned int *low_prot[4];
 576};
 577
 578#define SBP_L1_BITS             (PAGE_SHIFT - 2)
 579#define SBP_L2_BITS             (PAGE_SHIFT - 3)
 580#define SBP_L1_COUNT            (1 << SBP_L1_BITS)
 581#define SBP_L2_COUNT            (1 << SBP_L2_BITS)
 582#define SBP_L2_SHIFT            (PAGE_SHIFT + SBP_L1_BITS)
 583#define SBP_L3_SHIFT            (SBP_L2_SHIFT + SBP_L2_BITS)
 584
 585extern void subpage_prot_free(struct mm_struct *mm);
 586extern void subpage_prot_init_new_context(struct mm_struct *mm);
 587#else
 588static inline void subpage_prot_free(struct mm_struct *mm) {}
 589static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
 590#endif /* CONFIG_PPC_SUBPAGE_PROT */
 591
 592#if 0
 593/*
 594 * The code below is equivalent to this function for arguments
 595 * < 2^VSID_BITS, which is all this should ever be called
 596 * with.  However gcc is not clever enough to compute the
 597 * modulus (2^n-1) without a second multiply.
 598 */
 599#define vsid_scramble(protovsid, size) \
 600        ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
 601
 602#else /* 1 */
 603#define vsid_scramble(protovsid, size) \
 604        ({                                                               \
 605                unsigned long x;                                         \
 606                x = (protovsid) * VSID_MULTIPLIER_##size;                \
 607                x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
 608                (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
 609        })
 610#endif /* 1 */
 611
 612/* Returns the segment size indicator for a user address */
 613static inline int user_segment_size(unsigned long addr)
 614{
 615        /* Use 1T segments if possible for addresses >= 1T */
 616        if (addr >= (1UL << SID_SHIFT_1T))
 617                return mmu_highuser_ssize;
 618        return MMU_SEGSIZE_256M;
 619}
 620
 621static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
 622                                     int ssize)
 623{
 624        /*
 625         * Bad address. We return VSID 0 for that
 626         */
 627        if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
 628                return 0;
 629
 630        if (ssize == MMU_SEGSIZE_256M)
 631                return vsid_scramble((context << ESID_BITS)
 632                                     | (ea >> SID_SHIFT), 256M);
 633        return vsid_scramble((context << ESID_BITS_1T)
 634                             | (ea >> SID_SHIFT_1T), 1T);
 635}
 636
 637/*
 638 * This is only valid for addresses >= PAGE_OFFSET
 639 *
 640 * For kernel space, we use the top 4 context ids to map address as below
 641 * 0x7fffc -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
 642 * 0x7fffd -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
 643 * 0x7fffe -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
 644 * 0x7ffff -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
 645 */
 646static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
 647{
 648        unsigned long context;
 649
 650        /*
 651         * kernel take the top 4 context from the available range
 652         */
 653        context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
 654        return get_vsid(context, ea, ssize);
 655}
 656
 657unsigned htab_shift_for_mem_size(unsigned long mem_size);
 658
 659#endif /* __ASSEMBLY__ */
 660
 661#endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */
 662