linux/arch/sh/include/asm/pgtable_32.h
<<
>>
Prefs
   1#ifndef __ASM_SH_PGTABLE_32_H
   2#define __ASM_SH_PGTABLE_32_H
   3
   4/*
   5 * Linux PTEL encoding.
   6 *
   7 * Hardware and software bit definitions for the PTEL value (see below for
   8 * notes on SH-X2 MMUs and 64-bit PTEs):
   9 *
  10 * - Bits 0 and 7 are reserved on SH-3 (_PAGE_WT and _PAGE_SZ1 on SH-4).
  11 *
  12 * - Bit 1 is the SH-bit, but is unused on SH-3 due to an MMU bug (the
  13 *   hardware PTEL value can't have the SH-bit set when MMUCR.IX is set,
  14 *   which is the default in cpu-sh3/mmu_context.h:MMU_CONTROL_INIT).
  15 *
  16 *   In order to keep this relatively clean, do not use these for defining
  17 *   SH-3 specific flags until all of the other unused bits have been
  18 *   exhausted.
  19 *
  20 * - Bit 9 is reserved by everyone and used by _PAGE_PROTNONE.
  21 *
  22 * - Bits 10 and 11 are low bits of the PPN that are reserved on >= 4K pages.
  23 *   Bit 10 is used for _PAGE_ACCESSED, and bit 11 is used for _PAGE_SPECIAL.
  24 *
  25 * - On 29 bit platforms, bits 31 to 29 are used for the space attributes
  26 *   and timing control which (together with bit 0) are moved into the
  27 *   old-style PTEA on the parts that support it.
  28 *
  29 * XXX: Leave the _PAGE_FILE and _PAGE_WT overhaul for a rainy day.
  30 *
  31 * SH-X2 MMUs and extended PTEs
  32 *
  33 * SH-X2 supports an extended mode TLB with split data arrays due to the
  34 * number of bits needed for PR and SZ (now EPR and ESZ) encodings. The PR and
  35 * SZ bit placeholders still exist in data array 1, but are implemented as
  36 * reserved bits, with the real logic existing in data array 2.
  37 *
  38 * The downside to this is that we can no longer fit everything in to a 32-bit
  39 * PTE encoding, so a 64-bit pte_t is necessary for these parts. On the plus
  40 * side, this gives us quite a few spare bits to play with for future usage.
  41 */
  42/* Legacy and compat mode bits */
  43#define _PAGE_WT        0x001           /* WT-bit on SH-4, 0 on SH-3 */
  44#define _PAGE_HW_SHARED 0x002           /* SH-bit  : shared among processes */
  45#define _PAGE_DIRTY     0x004           /* D-bit   : page changed */
  46#define _PAGE_CACHABLE  0x008           /* C-bit   : cachable */
  47#define _PAGE_SZ0       0x010           /* SZ0-bit : Size of page */
  48#define _PAGE_RW        0x020           /* PR0-bit : write access allowed */
  49#define _PAGE_USER      0x040           /* PR1-bit : user space access allowed*/
  50#define _PAGE_SZ1       0x080           /* SZ1-bit : Size of page (on SH-4) */
  51#define _PAGE_PRESENT   0x100           /* V-bit   : page is valid */
  52#define _PAGE_PROTNONE  0x200           /* software: if not present  */
  53#define _PAGE_ACCESSED  0x400           /* software: page referenced */
  54#define _PAGE_FILE      _PAGE_WT        /* software: pagecache or swap? */
  55#define _PAGE_SPECIAL   0x800           /* software: special page */
  56
  57#define _PAGE_SZ_MASK   (_PAGE_SZ0 | _PAGE_SZ1)
  58#define _PAGE_PR_MASK   (_PAGE_RW | _PAGE_USER)
  59
  60/* Extended mode bits */
  61#define _PAGE_EXT_ESZ0          0x0010  /* ESZ0-bit: Size of page */
  62#define _PAGE_EXT_ESZ1          0x0020  /* ESZ1-bit: Size of page */
  63#define _PAGE_EXT_ESZ2          0x0040  /* ESZ2-bit: Size of page */
  64#define _PAGE_EXT_ESZ3          0x0080  /* ESZ3-bit: Size of page */
  65
  66#define _PAGE_EXT_USER_EXEC     0x0100  /* EPR0-bit: User space executable */
  67#define _PAGE_EXT_USER_WRITE    0x0200  /* EPR1-bit: User space writable */
  68#define _PAGE_EXT_USER_READ     0x0400  /* EPR2-bit: User space readable */
  69
  70#define _PAGE_EXT_KERN_EXEC     0x0800  /* EPR3-bit: Kernel space executable */
  71#define _PAGE_EXT_KERN_WRITE    0x1000  /* EPR4-bit: Kernel space writable */
  72#define _PAGE_EXT_KERN_READ     0x2000  /* EPR5-bit: Kernel space readable */
  73
  74#define _PAGE_EXT_WIRED         0x4000  /* software: Wire TLB entry */
  75
  76/* Wrapper for extended mode pgprot twiddling */
  77#define _PAGE_EXT(x)            ((unsigned long long)(x) << 32)
  78
  79#ifdef CONFIG_X2TLB
  80#define _PAGE_PCC_MASK  0x00000000      /* No legacy PTEA support */
  81#else
  82
  83/* software: moves to PTEA.TC (Timing Control) */
  84#define _PAGE_PCC_AREA5 0x00000000      /* use BSC registers for area5 */
  85#define _PAGE_PCC_AREA6 0x80000000      /* use BSC registers for area6 */
  86
  87/* software: moves to PTEA.SA[2:0] (Space Attributes) */
  88#define _PAGE_PCC_IODYN 0x00000001      /* IO space, dynamically sized bus */
  89#define _PAGE_PCC_IO8   0x20000000      /* IO space, 8 bit bus */
  90#define _PAGE_PCC_IO16  0x20000001      /* IO space, 16 bit bus */
  91#define _PAGE_PCC_COM8  0x40000000      /* Common Memory space, 8 bit bus */
  92#define _PAGE_PCC_COM16 0x40000001      /* Common Memory space, 16 bit bus */
  93#define _PAGE_PCC_ATR8  0x60000000      /* Attribute Memory space, 8 bit bus */
  94#define _PAGE_PCC_ATR16 0x60000001      /* Attribute Memory space, 6 bit bus */
  95
  96#define _PAGE_PCC_MASK  0xe0000001
  97
  98/* copy the ptea attributes */
  99static inline unsigned long copy_ptea_attributes(unsigned long x)
 100{
 101        return  ((x >> 28) & 0xe) | (x & 0x1);
 102}
 103#endif
 104
 105/* Mask which drops unused bits from the PTEL value */
 106#if defined(CONFIG_CPU_SH3)
 107#define _PAGE_CLEAR_FLAGS       (_PAGE_PROTNONE | _PAGE_ACCESSED| \
 108                                 _PAGE_FILE     | _PAGE_SZ1     | \
 109                                 _PAGE_HW_SHARED)
 110#elif defined(CONFIG_X2TLB)
 111/* Get rid of the legacy PR/SZ bits when using extended mode */
 112#define _PAGE_CLEAR_FLAGS       (_PAGE_PROTNONE | _PAGE_ACCESSED | \
 113                                 _PAGE_FILE | _PAGE_PR_MASK | _PAGE_SZ_MASK)
 114#else
 115#define _PAGE_CLEAR_FLAGS       (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE)
 116#endif
 117
 118#define _PAGE_FLAGS_HARDWARE_MASK       (phys_addr_mask() & ~(_PAGE_CLEAR_FLAGS))
 119
 120/* Hardware flags, page size encoding */
 121#if !defined(CONFIG_MMU)
 122# define _PAGE_FLAGS_HARD       0ULL
 123#elif defined(CONFIG_X2TLB)
 124# if defined(CONFIG_PAGE_SIZE_4KB)
 125#  define _PAGE_FLAGS_HARD      _PAGE_EXT(_PAGE_EXT_ESZ0)
 126# elif defined(CONFIG_PAGE_SIZE_8KB)
 127#  define _PAGE_FLAGS_HARD      _PAGE_EXT(_PAGE_EXT_ESZ1)
 128# elif defined(CONFIG_PAGE_SIZE_64KB)
 129#  define _PAGE_FLAGS_HARD      _PAGE_EXT(_PAGE_EXT_ESZ2)
 130# endif
 131#else
 132# if defined(CONFIG_PAGE_SIZE_4KB)
 133#  define _PAGE_FLAGS_HARD      _PAGE_SZ0
 134# elif defined(CONFIG_PAGE_SIZE_64KB)
 135#  define _PAGE_FLAGS_HARD      _PAGE_SZ1
 136# endif
 137#endif
 138
 139#if defined(CONFIG_X2TLB)
 140# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
 141#  define _PAGE_SZHUGE  (_PAGE_EXT_ESZ2)
 142# elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
 143#  define _PAGE_SZHUGE  (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ2)
 144# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
 145#  define _PAGE_SZHUGE  (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ1 | _PAGE_EXT_ESZ2)
 146# elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
 147#  define _PAGE_SZHUGE  (_PAGE_EXT_ESZ3)
 148# elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
 149#  define _PAGE_SZHUGE  (_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3)
 150# endif
 151# define _PAGE_WIRED    (_PAGE_EXT(_PAGE_EXT_WIRED))
 152#else
 153# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
 154#  define _PAGE_SZHUGE  (_PAGE_SZ1)
 155# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
 156#  define _PAGE_SZHUGE  (_PAGE_SZ0 | _PAGE_SZ1)
 157# endif
 158# define _PAGE_WIRED    (0)
 159#endif
 160
 161/*
 162 * Stub out _PAGE_SZHUGE if we don't have a good definition for it,
 163 * to make pte_mkhuge() happy.
 164 */
 165#ifndef _PAGE_SZHUGE
 166# define _PAGE_SZHUGE   (_PAGE_FLAGS_HARD)
 167#endif
 168
 169/*
 170 * Mask of bits that are to be preserved accross pgprot changes.
 171 */
 172#define _PAGE_CHG_MASK \
 173        (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | \
 174         _PAGE_DIRTY | _PAGE_SPECIAL)
 175
 176#ifndef __ASSEMBLY__
 177
 178#if defined(CONFIG_X2TLB) /* SH-X2 TLB */
 179#define PAGE_NONE       __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
 180                                 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
 181
 182#define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
 183                                 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
 184                                 _PAGE_EXT(_PAGE_EXT_KERN_READ  | \
 185                                           _PAGE_EXT_KERN_WRITE | \
 186                                           _PAGE_EXT_USER_READ  | \
 187                                           _PAGE_EXT_USER_WRITE))
 188
 189#define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
 190                                 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
 191                                 _PAGE_EXT(_PAGE_EXT_KERN_EXEC | \
 192                                           _PAGE_EXT_KERN_READ | \
 193                                           _PAGE_EXT_USER_EXEC | \
 194                                           _PAGE_EXT_USER_READ))
 195
 196#define PAGE_COPY       PAGE_EXECREAD
 197
 198#define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
 199                                 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
 200                                 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
 201                                           _PAGE_EXT_USER_READ))
 202
 203#define PAGE_WRITEONLY  __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
 204                                 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
 205                                 _PAGE_EXT(_PAGE_EXT_KERN_WRITE | \
 206                                           _PAGE_EXT_USER_WRITE))
 207
 208#define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
 209                                 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
 210                                 _PAGE_EXT(_PAGE_EXT_KERN_WRITE | \
 211                                           _PAGE_EXT_KERN_READ  | \
 212                                           _PAGE_EXT_KERN_EXEC  | \
 213                                           _PAGE_EXT_USER_WRITE | \
 214                                           _PAGE_EXT_USER_READ  | \
 215                                           _PAGE_EXT_USER_EXEC))
 216
 217#define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
 218                                 _PAGE_DIRTY | _PAGE_ACCESSED | \
 219                                 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
 220                                 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
 221                                           _PAGE_EXT_KERN_WRITE | \
 222                                           _PAGE_EXT_KERN_EXEC))
 223
 224#define PAGE_KERNEL_NOCACHE \
 225                        __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
 226                                 _PAGE_ACCESSED | _PAGE_HW_SHARED | \
 227                                 _PAGE_FLAGS_HARD | \
 228                                 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
 229                                           _PAGE_EXT_KERN_WRITE | \
 230                                           _PAGE_EXT_KERN_EXEC))
 231
 232#define PAGE_KERNEL_RO  __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
 233                                 _PAGE_DIRTY | _PAGE_ACCESSED | \
 234                                 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
 235                                 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
 236                                           _PAGE_EXT_KERN_EXEC))
 237
 238#define PAGE_KERNEL_PCC(slot, type) \
 239                        __pgprot(0)
 240
 241#elif defined(CONFIG_MMU) /* SH-X TLB */
 242#define PAGE_NONE       __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
 243                                 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
 244
 245#define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
 246                                 _PAGE_CACHABLE | _PAGE_ACCESSED | \
 247                                 _PAGE_FLAGS_HARD)
 248
 249#define PAGE_COPY       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \
 250                                 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
 251
 252#define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \
 253                                 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
 254
 255#define PAGE_EXECREAD   PAGE_READONLY
 256#define PAGE_RWX        PAGE_SHARED
 257#define PAGE_WRITEONLY  PAGE_SHARED
 258
 259#define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | \
 260                                 _PAGE_DIRTY | _PAGE_ACCESSED | \
 261                                 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
 262
 263#define PAGE_KERNEL_NOCACHE \
 264                        __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
 265                                 _PAGE_ACCESSED | _PAGE_HW_SHARED | \
 266                                 _PAGE_FLAGS_HARD)
 267
 268#define PAGE_KERNEL_RO  __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
 269                                 _PAGE_DIRTY | _PAGE_ACCESSED | \
 270                                 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
 271
 272#define PAGE_KERNEL_PCC(slot, type) \
 273                        __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
 274                                 _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
 275                                 (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
 276                                 (type))
 277#else /* no mmu */
 278#define PAGE_NONE               __pgprot(0)
 279#define PAGE_SHARED             __pgprot(0)
 280#define PAGE_COPY               __pgprot(0)
 281#define PAGE_EXECREAD           __pgprot(0)
 282#define PAGE_RWX                __pgprot(0)
 283#define PAGE_READONLY           __pgprot(0)
 284#define PAGE_WRITEONLY          __pgprot(0)
 285#define PAGE_KERNEL             __pgprot(0)
 286#define PAGE_KERNEL_NOCACHE     __pgprot(0)
 287#define PAGE_KERNEL_RO          __pgprot(0)
 288
 289#define PAGE_KERNEL_PCC(slot, type) \
 290                                __pgprot(0)
 291#endif
 292
 293#endif /* __ASSEMBLY__ */
 294
 295#ifndef __ASSEMBLY__
 296
 297/*
 298 * Certain architectures need to do special things when PTEs
 299 * within a page table are directly modified.  Thus, the following
 300 * hook is made available.
 301 */
 302#ifdef CONFIG_X2TLB
 303static inline void set_pte(pte_t *ptep, pte_t pte)
 304{
 305        ptep->pte_high = pte.pte_high;
 306        smp_wmb();
 307        ptep->pte_low = pte.pte_low;
 308}
 309#else
 310#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
 311#endif
 312
 313#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 314
 315/*
 316 * (pmds are folded into pgds so this doesn't get actually called,
 317 * but the define is needed for a generic inline function.)
 318 */
 319#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
 320
 321#define pfn_pte(pfn, prot) \
 322        __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
 323#define pfn_pmd(pfn, prot) \
 324        __pmd(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
 325
 326#define pte_none(x)             (!pte_val(x))
 327#define pte_present(x)          ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
 328
 329#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
 330
 331#define pmd_none(x)     (!pmd_val(x))
 332#define pmd_present(x)  (pmd_val(x))
 333#define pmd_clear(xp)   do { set_pmd(xp, __pmd(0)); } while (0)
 334#define pmd_bad(x)      (pmd_val(x) & ~PAGE_MASK)
 335
 336#define pages_to_mb(x)  ((x) >> (20-PAGE_SHIFT))
 337#define pte_page(x)     pfn_to_page(pte_pfn(x))
 338
 339/*
 340 * The following only work if pte_present() is true.
 341 * Undefined behaviour if not..
 342 */
 343#define pte_not_present(pte)    (!((pte).pte_low & _PAGE_PRESENT))
 344#define pte_dirty(pte)          ((pte).pte_low & _PAGE_DIRTY)
 345#define pte_young(pte)          ((pte).pte_low & _PAGE_ACCESSED)
 346#define pte_file(pte)           ((pte).pte_low & _PAGE_FILE)
 347#define pte_special(pte)        ((pte).pte_low & _PAGE_SPECIAL)
 348
 349#ifdef CONFIG_X2TLB
 350#define pte_write(pte) \
 351        ((pte).pte_high & (_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE))
 352#else
 353#define pte_write(pte)          ((pte).pte_low & _PAGE_RW)
 354#endif
 355
 356#define PTE_BIT_FUNC(h,fn,op) \
 357static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }
 358
 359#ifdef CONFIG_X2TLB
 360/*
 361 * We cheat a bit in the SH-X2 TLB case. As the permission bits are
 362 * individually toggled (and user permissions are entirely decoupled from
 363 * kernel permissions), we attempt to couple them a bit more sanely here.
 364 */
 365PTE_BIT_FUNC(high, wrprotect, &= ~(_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE));
 366PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
 367PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
 368#else
 369PTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW);
 370PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW);
 371PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE);
 372#endif
 373
 374PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY);
 375PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY);
 376PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED);
 377PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);
 378PTE_BIT_FUNC(low, mkspecial, |= _PAGE_SPECIAL);
 379
 380/*
 381 * Macro and implementation to make a page protection as uncachable.
 382 */
 383#define pgprot_writecombine(prot) \
 384        __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
 385
 386#define pgprot_noncached         pgprot_writecombine
 387
 388/*
 389 * Conversion functions: convert a page and protection to a page entry,
 390 * and a page entry and page directory to the page they refer to.
 391 *
 392 * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
 393 */
 394#define mk_pte(page, pgprot)    pfn_pte(page_to_pfn(page), (pgprot))
 395
 396static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 397{
 398        pte.pte_low &= _PAGE_CHG_MASK;
 399        pte.pte_low |= pgprot_val(newprot);
 400
 401#ifdef CONFIG_X2TLB
 402        pte.pte_high |= pgprot_val(newprot) >> 32;
 403#endif
 404
 405        return pte;
 406}
 407
 408#define pmd_page_vaddr(pmd)     ((unsigned long)pmd_val(pmd))
 409#define pmd_page(pmd)           (virt_to_page(pmd_val(pmd)))
 410
 411/* to find an entry in a page-table-directory. */
 412#define pgd_index(address)      (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
 413#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
 414#define __pgd_offset(address)   pgd_index(address)
 415
 416/* to find an entry in a kernel page-table-directory */
 417#define pgd_offset_k(address)   pgd_offset(&init_mm, address)
 418
 419#define __pud_offset(address)   (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
 420#define __pmd_offset(address)   (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
 421
 422/* Find an entry in the third-level page table.. */
 423#define pte_index(address)      ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 424#define __pte_offset(address)   pte_index(address)
 425
 426#define pte_offset_kernel(dir, address) \
 427        ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
 428#define pte_offset_map(dir, address)            pte_offset_kernel(dir, address)
 429#define pte_unmap(pte)          do { } while (0)
 430
 431#ifdef CONFIG_X2TLB
 432#define pte_ERROR(e) \
 433        printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \
 434               &(e), (e).pte_high, (e).pte_low)
 435#define pgd_ERROR(e) \
 436        printk("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
 437#else
 438#define pte_ERROR(e) \
 439        printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
 440#define pgd_ERROR(e) \
 441        printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 442#endif
 443
 444/*
 445 * Encode and de-code a swap entry
 446 *
 447 * Constraints:
 448 *      _PAGE_FILE at bit 0
 449 *      _PAGE_PRESENT at bit 8
 450 *      _PAGE_PROTNONE at bit 9
 451 *
 452 * For the normal case, we encode the swap type into bits 0:7 and the
 453 * swap offset into bits 10:30. For the 64-bit PTE case, we keep the
 454 * preserved bits in the low 32-bits and use the upper 32 as the swap
 455 * offset (along with a 5-bit type), following the same approach as x86
 456 * PAE. This keeps the logic quite simple, and allows for a full 32
 457 * PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with
 458 * in the pte_low case.
 459 *
 460 * As is evident by the Alpha code, if we ever get a 64-bit unsigned
 461 * long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes
 462 * much cleaner..
 463 *
 464 * NOTE: We should set ZEROs at the position of _PAGE_PRESENT
 465 *       and _PAGE_PROTNONE bits
 466 */
 467#ifdef CONFIG_X2TLB
 468#define __swp_type(x)                   ((x).val & 0x1f)
 469#define __swp_offset(x)                 ((x).val >> 5)
 470#define __swp_entry(type, offset)       ((swp_entry_t){ (type) | (offset) << 5})
 471#define __pte_to_swp_entry(pte)         ((swp_entry_t){ (pte).pte_high })
 472#define __swp_entry_to_pte(x)           ((pte_t){ 0, (x).val })
 473
 474/*
 475 * Encode and decode a nonlinear file mapping entry
 476 */
 477#define pte_to_pgoff(pte)               ((pte).pte_high)
 478#define pgoff_to_pte(off)               ((pte_t) { _PAGE_FILE, (off) })
 479
 480#define PTE_FILE_MAX_BITS               32
 481#else
 482#define __swp_type(x)                   ((x).val & 0xff)
 483#define __swp_offset(x)                 ((x).val >> 10)
 484#define __swp_entry(type, offset)       ((swp_entry_t){(type) | (offset) <<10})
 485
 486#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) >> 1 })
 487#define __swp_entry_to_pte(x)           ((pte_t) { (x).val << 1 })
 488
 489/*
 490 * Encode and decode a nonlinear file mapping entry
 491 */
 492#define PTE_FILE_MAX_BITS       29
 493#define pte_to_pgoff(pte)       (pte_val(pte) >> 1)
 494#define pgoff_to_pte(off)       ((pte_t) { ((off) << 1) | _PAGE_FILE })
 495#endif
 496
 497#endif /* __ASSEMBLY__ */
 498#endif /* __ASM_SH_PGTABLE_32_H */
 499