linux/arch/powerpc/include/asm/mmu-hash64.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_MMU_HASH64_H_
   2#define _ASM_POWERPC_MMU_HASH64_H_
   3/*
   4 * PowerPC64 memory management structures
   5 *
   6 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
   7 *   PPC64 rework.
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * as published by the Free Software Foundation; either version
  12 * 2 of the License, or (at your option) any later version.
  13 */
  14
  15#include <asm/asm-compat.h>
  16#include <asm/page.h>
  17
  18/*
  19 * This is necessary to get the definition of PGTABLE_RANGE which we
  20 * need for various slices related matters. Note that this isn't the
  21 * complete pgtable.h but only a portion of it.
  22 */
  23#include <asm/pgtable-ppc64.h>
  24#include <asm/bug.h>
  25#include <asm/processor.h>
  26
  27/*
  28 * SLB
  29 */
  30
  31#define SLB_NUM_BOLTED          3
  32#define SLB_CACHE_ENTRIES       8
  33#define SLB_MIN_SIZE            32
  34
  35/* Bits in the SLB ESID word */
  36#define SLB_ESID_V              ASM_CONST(0x0000000008000000) /* valid */
  37
  38/* Bits in the SLB VSID word */
  39#define SLB_VSID_SHIFT          12
  40#define SLB_VSID_SHIFT_1T       24
  41#define SLB_VSID_SSIZE_SHIFT    62
  42#define SLB_VSID_B              ASM_CONST(0xc000000000000000)
  43#define SLB_VSID_B_256M         ASM_CONST(0x0000000000000000)
  44#define SLB_VSID_B_1T           ASM_CONST(0x4000000000000000)
  45#define SLB_VSID_KS             ASM_CONST(0x0000000000000800)
  46#define SLB_VSID_KP             ASM_CONST(0x0000000000000400)
  47#define SLB_VSID_N              ASM_CONST(0x0000000000000200) /* no-execute */
  48#define SLB_VSID_L              ASM_CONST(0x0000000000000100)
  49#define SLB_VSID_C              ASM_CONST(0x0000000000000080) /* class */
  50#define SLB_VSID_LP             ASM_CONST(0x0000000000000030)
  51#define SLB_VSID_LP_00          ASM_CONST(0x0000000000000000)
  52#define SLB_VSID_LP_01          ASM_CONST(0x0000000000000010)
  53#define SLB_VSID_LP_10          ASM_CONST(0x0000000000000020)
  54#define SLB_VSID_LP_11          ASM_CONST(0x0000000000000030)
  55#define SLB_VSID_LLP            (SLB_VSID_L|SLB_VSID_LP)
  56
  57#define SLB_VSID_KERNEL         (SLB_VSID_KP)
  58#define SLB_VSID_USER           (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
  59
  60#define SLBIE_C                 (0x08000000)
  61#define SLBIE_SSIZE_SHIFT       25
  62
  63/*
  64 * Hash table
  65 */
  66
  67#define HPTES_PER_GROUP 8
  68
  69#define HPTE_V_SSIZE_SHIFT      62
  70#define HPTE_V_AVPN_SHIFT       7
  71#define HPTE_V_AVPN             ASM_CONST(0x3fffffffffffff80)
  72#define HPTE_V_AVPN_VAL(x)      (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
  73#define HPTE_V_COMPARE(x,y)     (!(((x) ^ (y)) & 0xffffffffffffff80UL))
  74#define HPTE_V_BOLTED           ASM_CONST(0x0000000000000010)
  75#define HPTE_V_LOCK             ASM_CONST(0x0000000000000008)
  76#define HPTE_V_LARGE            ASM_CONST(0x0000000000000004)
  77#define HPTE_V_SECONDARY        ASM_CONST(0x0000000000000002)
  78#define HPTE_V_VALID            ASM_CONST(0x0000000000000001)
  79
  80#define HPTE_R_PP0              ASM_CONST(0x8000000000000000)
  81#define HPTE_R_TS               ASM_CONST(0x4000000000000000)
  82#define HPTE_R_KEY_HI           ASM_CONST(0x3000000000000000)
  83#define HPTE_R_RPN_SHIFT        12
  84#define HPTE_R_RPN              ASM_CONST(0x0ffffffffffff000)
  85#define HPTE_R_PP               ASM_CONST(0x0000000000000003)
  86#define HPTE_R_N                ASM_CONST(0x0000000000000004)
  87#define HPTE_R_G                ASM_CONST(0x0000000000000008)
  88#define HPTE_R_M                ASM_CONST(0x0000000000000010)
  89#define HPTE_R_I                ASM_CONST(0x0000000000000020)
  90#define HPTE_R_W                ASM_CONST(0x0000000000000040)
  91#define HPTE_R_WIMG             ASM_CONST(0x0000000000000078)
  92#define HPTE_R_C                ASM_CONST(0x0000000000000080)
  93#define HPTE_R_R                ASM_CONST(0x0000000000000100)
  94#define HPTE_R_KEY_LO           ASM_CONST(0x0000000000000e00)
  95
  96#define HPTE_V_1TB_SEG          ASM_CONST(0x4000000000000000)
  97#define HPTE_V_VRMA_MASK        ASM_CONST(0x4001ffffff000000)
  98
  99/* Values for PP (assumes Ks=0, Kp=1) */
 100#define PP_RWXX 0       /* Supervisor read/write, User none */
 101#define PP_RWRX 1       /* Supervisor read/write, User read */
 102#define PP_RWRW 2       /* Supervisor read/write, User read/write */
 103#define PP_RXRX 3       /* Supervisor read,       User read */
 104#define PP_RXXX (HPTE_R_PP0 | 2)        /* Supervisor read, user none */
 105
 106/* Fields for tlbiel instruction in architecture 2.06 */
 107#define TLBIEL_INVAL_SEL_MASK   0xc00   /* invalidation selector */
 108#define  TLBIEL_INVAL_PAGE      0x000   /* invalidate a single page */
 109#define  TLBIEL_INVAL_SET_LPID  0x800   /* invalidate a set for current LPID */
 110#define  TLBIEL_INVAL_SET       0xc00   /* invalidate a set for all LPIDs */
 111#define TLBIEL_INVAL_SET_MASK   0xfff000        /* set number to inval. */
 112#define TLBIEL_INVAL_SET_SHIFT  12
 113
 114#define POWER7_TLB_SETS         128     /* # sets in POWER7 TLB */
 115#define POWER8_TLB_SETS         512     /* # sets in POWER8 TLB */
 116
 117#ifndef __ASSEMBLY__
 118
 119struct hash_pte {
 120        __be64 v;
 121        __be64 r;
 122};
 123
 124extern struct hash_pte *htab_address;
 125extern unsigned long htab_size_bytes;
 126extern unsigned long htab_hash_mask;
 127
 128/*
 129 * Page size definition
 130 *
 131 *    shift : is the "PAGE_SHIFT" value for that page size
 132 *    sllp  : is a bit mask with the value of SLB L || LP to be or'ed
 133 *            directly to a slbmte "vsid" value
 134 *    penc  : is the HPTE encoding mask for the "LP" field:
 135 *
 136 */
 137struct mmu_psize_def
 138{
 139        unsigned int    shift;  /* number of bits */
 140        int             penc[MMU_PAGE_COUNT];   /* HPTE encoding */
 141        unsigned int    tlbiel; /* tlbiel supported for that page size */
 142        unsigned long   avpnm;  /* bits to mask out in AVPN in the HPTE */
 143        unsigned long   sllp;   /* SLB L||LP (exact mask to use in slbmte) */
 144};
 145extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
 146
 147static inline int shift_to_mmu_psize(unsigned int shift)
 148{
 149        int psize;
 150
 151        for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
 152                if (mmu_psize_defs[psize].shift == shift)
 153                        return psize;
 154        return -1;
 155}
 156
 157static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
 158{
 159        if (mmu_psize_defs[mmu_psize].shift)
 160                return mmu_psize_defs[mmu_psize].shift;
 161        BUG();
 162}
 163
 164#endif /* __ASSEMBLY__ */
 165
 166/*
 167 * Segment sizes.
 168 * These are the values used by hardware in the B field of
 169 * SLB entries and the first dword of MMU hashtable entries.
 170 * The B field is 2 bits; the values 2 and 3 are unused and reserved.
 171 */
 172#define MMU_SEGSIZE_256M        0
 173#define MMU_SEGSIZE_1T          1
 174
 175/*
 176 * encode page number shift.
 177 * in order to fit the 78 bit va in a 64 bit variable we shift the va by
 178 * 12 bits. This enable us to address upto 76 bit va.
 179 * For hpt hash from a va we can ignore the page size bits of va and for
 180 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
 181 * we work in all cases including 4k page size.
 182 */
 183#define VPN_SHIFT       12
 184
 185/*
 186 * HPTE Large Page (LP) details
 187 */
 188#define LP_SHIFT        12
 189#define LP_BITS         8
 190#define LP_MASK(i)      ((0xFF >> (i)) << LP_SHIFT)
 191
 192#ifndef __ASSEMBLY__
 193
 194static inline int slb_vsid_shift(int ssize)
 195{
 196        if (ssize == MMU_SEGSIZE_256M)
 197                return SLB_VSID_SHIFT;
 198        return SLB_VSID_SHIFT_1T;
 199}
 200
 201static inline int segment_shift(int ssize)
 202{
 203        if (ssize == MMU_SEGSIZE_256M)
 204                return SID_SHIFT;
 205        return SID_SHIFT_1T;
 206}
 207
 208/*
 209 * The current system page and segment sizes
 210 */
 211extern int mmu_linear_psize;
 212extern int mmu_virtual_psize;
 213extern int mmu_vmalloc_psize;
 214extern int mmu_vmemmap_psize;
 215extern int mmu_io_psize;
 216extern int mmu_kernel_ssize;
 217extern int mmu_highuser_ssize;
 218extern u16 mmu_slb_size;
 219extern unsigned long tce_alloc_start, tce_alloc_end;
 220
 221/*
 222 * If the processor supports 64k normal pages but not 64k cache
 223 * inhibited pages, we have to be prepared to switch processes
 224 * to use 4k pages when they create cache-inhibited mappings.
 225 * If this is the case, mmu_ci_restrictions will be set to 1.
 226 */
 227extern int mmu_ci_restrictions;
 228
 229/*
 230 * This computes the AVPN and B fields of the first dword of a HPTE,
 231 * for use when we want to match an existing PTE.  The bottom 7 bits
 232 * of the returned value are zero.
 233 */
 234static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
 235                                             int ssize)
 236{
 237        unsigned long v;
 238        /*
 239         * The AVA field omits the low-order 23 bits of the 78 bits VA.
 240         * These bits are not needed in the PTE, because the
 241         * low-order b of these bits are part of the byte offset
 242         * into the virtual page and, if b < 23, the high-order
 243         * 23-b of these bits are always used in selecting the
 244         * PTEGs to be searched
 245         */
 246        v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
 247        v <<= HPTE_V_AVPN_SHIFT;
 248        v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
 249        return v;
 250}
 251
 252/*
 253 * This function sets the AVPN and L fields of the HPTE  appropriately
 254 * using the base page size and actual page size.
 255 */
 256static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
 257                                          int actual_psize, int ssize)
 258{
 259        unsigned long v;
 260        v = hpte_encode_avpn(vpn, base_psize, ssize);
 261        if (actual_psize != MMU_PAGE_4K)
 262                v |= HPTE_V_LARGE;
 263        return v;
 264}
 265
 266/*
 267 * This function sets the ARPN, and LP fields of the HPTE appropriately
 268 * for the page size. We assume the pa is already "clean" that is properly
 269 * aligned for the requested page size
 270 */
 271static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
 272                                          int actual_psize)
 273{
 274        /* A 4K page needs no special encoding */
 275        if (actual_psize == MMU_PAGE_4K)
 276                return pa & HPTE_R_RPN;
 277        else {
 278                unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
 279                unsigned int shift = mmu_psize_defs[actual_psize].shift;
 280                return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
 281        }
 282}
 283
 284/*
 285 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
 286 */
 287static inline unsigned long hpt_vpn(unsigned long ea,
 288                                    unsigned long vsid, int ssize)
 289{
 290        unsigned long mask;
 291        int s_shift = segment_shift(ssize);
 292
 293        mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
 294        return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
 295}
 296
 297/*
 298 * This hashes a virtual address
 299 */
 300static inline unsigned long hpt_hash(unsigned long vpn,
 301                                     unsigned int shift, int ssize)
 302{
 303        int mask;
 304        unsigned long hash, vsid;
 305
 306        /* VPN_SHIFT can be atmost 12 */
 307        if (ssize == MMU_SEGSIZE_256M) {
 308                mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
 309                hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
 310                        ((vpn & mask) >> (shift - VPN_SHIFT));
 311        } else {
 312                mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
 313                vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
 314                hash = vsid ^ (vsid << 25) ^
 315                        ((vpn & mask) >> (shift - VPN_SHIFT)) ;
 316        }
 317        return hash & 0x7fffffffffUL;
 318}
 319
 320#define HPTE_LOCAL_UPDATE       0x1
 321#define HPTE_NOHPTE_UPDATE      0x2
 322
 323extern int __hash_page_4K(unsigned long ea, unsigned long access,
 324                          unsigned long vsid, pte_t *ptep, unsigned long trap,
 325                          unsigned long flags, int ssize, int subpage_prot);
 326extern int __hash_page_64K(unsigned long ea, unsigned long access,
 327                           unsigned long vsid, pte_t *ptep, unsigned long trap,
 328                           unsigned long flags, int ssize);
 329struct mm_struct;
 330unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
 331extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
 332                        unsigned long access, unsigned long trap,
 333                        unsigned long flags);
 334extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
 335                     unsigned long dsisr);
 336int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
 337                     pte_t *ptep, unsigned long trap, unsigned long flags,
 338                     int ssize, unsigned int shift, unsigned int mmu_psize);
 339#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 340extern int __hash_page_thp(unsigned long ea, unsigned long access,
 341                           unsigned long vsid, pmd_t *pmdp, unsigned long trap,
 342                           unsigned long flags, int ssize, unsigned int psize);
 343#else
 344static inline int __hash_page_thp(unsigned long ea, unsigned long access,
 345                                  unsigned long vsid, pmd_t *pmdp,
 346                                  unsigned long trap, unsigned long flags,
 347                                  int ssize, unsigned int psize)
 348{
 349        BUG();
 350        return -1;
 351}
 352#endif
 353extern void hash_failure_debug(unsigned long ea, unsigned long access,
 354                               unsigned long vsid, unsigned long trap,
 355                               int ssize, int psize, int lpsize,
 356                               unsigned long pte);
 357extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
 358                             unsigned long pstart, unsigned long prot,
 359                             int psize, int ssize);
 360int htab_remove_mapping(unsigned long vstart, unsigned long vend,
 361                        int psize, int ssize);
 362extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
 363extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
 364
 365extern void hpte_init_native(void);
 366extern void hpte_init_lpar(void);
 367extern void hpte_init_beat(void);
 368extern void hpte_init_beat_v3(void);
 369
 370extern void slb_initialize(void);
 371extern void slb_flush_and_rebolt(void);
 372
 373extern void slb_vmalloc_update(void);
 374extern void slb_set_size(u16 size);
 375#endif /* __ASSEMBLY__ */
 376
 377/*
 378 * VSID allocation (256MB segment)
 379 *
 380 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
 381 * from mmu context id and effective segment id of the address.
 382 *
 383 * For user processes max context id is limited to ((1ul << 19) - 5)
 384 * for kernel space, we use the top 4 context ids to map address as below
 385 * NOTE: each context only support 64TB now.
 386 * 0x7fffc -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
 387 * 0x7fffd -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
 388 * 0x7fffe -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
 389 * 0x7ffff -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
 390 *
 391 * The proto-VSIDs are then scrambled into real VSIDs with the
 392 * multiplicative hash:
 393 *
 394 *      VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
 395 *
 396 * VSID_MULTIPLIER is prime, so in particular it is
 397 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
 398 * Because the modulus is 2^n-1 we can compute it efficiently without
 399 * a divide or extra multiply (see below). The scramble function gives
 400 * robust scattering in the hash table (at least based on some initial
 401 * results).
 402 *
 403 * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
 404 * bad address. This enables us to consolidate bad address handling in
 405 * hash_page.
 406 *
 407 * We also need to avoid the last segment of the last context, because that
 408 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
 409 * because of the modulo operation in vsid scramble. But the vmemmap
 410 * (which is what uses region 0xf) will never be close to 64TB in size
 411 * (it's 56 bytes per page of system memory).
 412 */
 413
 414#define CONTEXT_BITS            19
 415#define ESID_BITS               18
 416#define ESID_BITS_1T            6
 417
 418/*
 419 * 256MB segment
 420 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
 421 * available for user + kernel mapping. The top 4 contexts are used for
 422 * kernel mapping. Each segment contains 2^28 bytes. Each
 423 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
 424 * (19 == 37 + 28 - 46).
 425 */
 426#define MAX_USER_CONTEXT        ((ASM_CONST(1) << CONTEXT_BITS) - 5)
 427
 428/*
 429 * This should be computed such that protovosid * vsid_mulitplier
 430 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
 431 */
 432#define VSID_MULTIPLIER_256M    ASM_CONST(12538073)     /* 24-bit prime */
 433#define VSID_BITS_256M          (CONTEXT_BITS + ESID_BITS)
 434#define VSID_MODULUS_256M       ((1UL<<VSID_BITS_256M)-1)
 435
 436#define VSID_MULTIPLIER_1T      ASM_CONST(12538073)     /* 24-bit prime */
 437#define VSID_BITS_1T            (CONTEXT_BITS + ESID_BITS_1T)
 438#define VSID_MODULUS_1T         ((1UL<<VSID_BITS_1T)-1)
 439
 440
 441#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
 442
 443/*
 444 * This macro generates asm code to compute the VSID scramble
 445 * function.  Used in slb_allocate() and do_stab_bolted.  The function
 446 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
 447 *
 448 *      rt = register continaing the proto-VSID and into which the
 449 *              VSID will be stored
 450 *      rx = scratch register (clobbered)
 451 *
 452 *      - rt and rx must be different registers
 453 *      - The answer will end up in the low VSID_BITS bits of rt.  The higher
 454 *        bits may contain other garbage, so you may need to mask the
 455 *        result.
 456 */
 457#define ASM_VSID_SCRAMBLE(rt, rx, size)                                 \
 458        lis     rx,VSID_MULTIPLIER_##size@h;                            \
 459        ori     rx,rx,VSID_MULTIPLIER_##size@l;                         \
 460        mulld   rt,rt,rx;               /* rt = rt * MULTIPLIER */      \
 461                                                                        \
 462        srdi    rx,rt,VSID_BITS_##size;                                 \
 463        clrldi  rt,rt,(64-VSID_BITS_##size);                            \
 464        add     rt,rt,rx;               /* add high and low bits */     \
 465        /* NOTE: explanation based on VSID_BITS_##size = 36             \
 466         * Now, r3 == VSID (mod 2^36-1), and lies between 0 and         \
 467         * 2^36-1+2^28-1.  That in particular means that if r3 >=       \
 468         * 2^36-1, then r3+1 has the 2^36 bit set.  So, if r3+1 has     \
 469         * the bit clear, r3 already has the answer we want, if it      \
 470         * doesn't, the answer is the low 36 bits of r3+1.  So in all   \
 471         * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
 472        addi    rx,rt,1;                                                \
 473        srdi    rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */   \
 474        add     rt,rt,rx
 475
 476/* 4 bits per slice and we have one slice per 1TB */
 477#define SLICE_ARRAY_SIZE  (PGTABLE_RANGE >> 41)
 478
 479#ifndef __ASSEMBLY__
 480
 481#ifdef CONFIG_PPC_SUBPAGE_PROT
 482/*
 483 * For the sub-page protection option, we extend the PGD with one of
 484 * these.  Basically we have a 3-level tree, with the top level being
 485 * the protptrs array.  To optimize speed and memory consumption when
 486 * only addresses < 4GB are being protected, pointers to the first
 487 * four pages of sub-page protection words are stored in the low_prot
 488 * array.
 489 * Each page of sub-page protection words protects 1GB (4 bytes
 490 * protects 64k).  For the 3-level tree, each page of pointers then
 491 * protects 8TB.
 492 */
 493struct subpage_prot_table {
 494        unsigned long maxaddr;  /* only addresses < this are protected */
 495        unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
 496        unsigned int *low_prot[4];
 497};
 498
 499#define SBP_L1_BITS             (PAGE_SHIFT - 2)
 500#define SBP_L2_BITS             (PAGE_SHIFT - 3)
 501#define SBP_L1_COUNT            (1 << SBP_L1_BITS)
 502#define SBP_L2_COUNT            (1 << SBP_L2_BITS)
 503#define SBP_L2_SHIFT            (PAGE_SHIFT + SBP_L1_BITS)
 504#define SBP_L3_SHIFT            (SBP_L2_SHIFT + SBP_L2_BITS)
 505
 506extern void subpage_prot_free(struct mm_struct *mm);
 507extern void subpage_prot_init_new_context(struct mm_struct *mm);
 508#else
 509static inline void subpage_prot_free(struct mm_struct *mm) {}
 510static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
 511#endif /* CONFIG_PPC_SUBPAGE_PROT */
 512
 513typedef unsigned long mm_context_id_t;
 514struct spinlock;
 515
 516typedef struct {
 517        mm_context_id_t id;
 518        u16 user_psize;         /* page size index */
 519
 520#ifdef CONFIG_PPC_MM_SLICES
 521        u64 low_slices_psize;   /* SLB page size encodings */
 522        unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
 523#else
 524        u16 sllp;               /* SLB page size encoding */
 525#endif
 526        unsigned long vdso_base;
 527#ifdef CONFIG_PPC_SUBPAGE_PROT
 528        struct subpage_prot_table spt;
 529#endif /* CONFIG_PPC_SUBPAGE_PROT */
 530#ifdef CONFIG_PPC_ICSWX
 531        struct spinlock *cop_lockp; /* guard acop and cop_pid */
 532        unsigned long acop;     /* mask of enabled coprocessor types */
 533        unsigned int cop_pid;   /* pid value used with coprocessors */
 534#endif /* CONFIG_PPC_ICSWX */
 535#ifdef CONFIG_PPC_64K_PAGES
 536        /* for 4K PTE fragment support */
 537        void *pte_frag;
 538#endif
 539#ifdef CONFIG_SPAPR_TCE_IOMMU
 540        struct list_head iommu_group_mem_list;
 541#endif
 542} mm_context_t;
 543
 544
 545#if 0
 546/*
 547 * The code below is equivalent to this function for arguments
 548 * < 2^VSID_BITS, which is all this should ever be called
 549 * with.  However gcc is not clever enough to compute the
 550 * modulus (2^n-1) without a second multiply.
 551 */
 552#define vsid_scramble(protovsid, size) \
 553        ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
 554
 555#else /* 1 */
 556#define vsid_scramble(protovsid, size) \
 557        ({                                                               \
 558                unsigned long x;                                         \
 559                x = (protovsid) * VSID_MULTIPLIER_##size;                \
 560                x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
 561                (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
 562        })
 563#endif /* 1 */
 564
 565/* Returns the segment size indicator for a user address */
 566static inline int user_segment_size(unsigned long addr)
 567{
 568        /* Use 1T segments if possible for addresses >= 1T */
 569        if (addr >= (1UL << SID_SHIFT_1T))
 570                return mmu_highuser_ssize;
 571        return MMU_SEGSIZE_256M;
 572}
 573
 574static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
 575                                     int ssize)
 576{
 577        /*
 578         * Bad address. We return VSID 0 for that
 579         */
 580        if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
 581                return 0;
 582
 583        if (ssize == MMU_SEGSIZE_256M)
 584                return vsid_scramble((context << ESID_BITS)
 585                                     | (ea >> SID_SHIFT), 256M);
 586        return vsid_scramble((context << ESID_BITS_1T)
 587                             | (ea >> SID_SHIFT_1T), 1T);
 588}
 589
 590/*
 591 * This is only valid for addresses >= PAGE_OFFSET
 592 *
 593 * For kernel space, we use the top 4 context ids to map address as below
 594 * 0x7fffc -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
 595 * 0x7fffd -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
 596 * 0x7fffe -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
 597 * 0x7ffff -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
 598 */
 599static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
 600{
 601        unsigned long context;
 602
 603        /*
 604         * kernel take the top 4 context from the available range
 605         */
 606        context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
 607        return get_vsid(context, ea, ssize);
 608}
 609#endif /* __ASSEMBLY__ */
 610
 611#endif /* _ASM_POWERPC_MMU_HASH64_H_ */
 612