linux/arch/sh/include/asm/pgtable_64.h
<<
>>
Prefs
   1#ifndef __ASM_SH_PGTABLE_64_H
   2#define __ASM_SH_PGTABLE_64_H
   3
   4/*
   5 * include/asm-sh/pgtable_64.h
   6 *
   7 * This file contains the functions and defines necessary to modify and use
   8 * the SuperH page table tree.
   9 *
  10 * Copyright (C) 2000, 2001  Paolo Alberelli
  11 * Copyright (C) 2003, 2004  Paul Mundt
  12 * Copyright (C) 2003, 2004  Richard Curnow
  13 *
  14 * This file is subject to the terms and conditions of the GNU General Public
  15 * License.  See the file "COPYING" in the main directory of this archive
  16 * for more details.
  17 */
  18#include <linux/threads.h>
  19#include <asm/processor.h>
  20#include <asm/page.h>
  21
  22/*
  23 * Error outputs.
  24 */
  25#define pte_ERROR(e) \
  26        printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
  27#define pgd_ERROR(e) \
  28        printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
  29
  30/*
  31 * Table setting routines. Used within arch/mm only.
  32 */
  33#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
  34
  35static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
  36{
  37        unsigned long long x = ((unsigned long long) pteval.pte_low);
  38        unsigned long long *xp = (unsigned long long *) pteptr;
  39        /*
  40         * Sign-extend based on NPHYS.
  41         */
  42        *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x;
  43}
  44#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
  45
  46/*
  47 * PGD defines. Top level.
  48 */
  49
  50/* To find an entry in a generic PGD. */
  51#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  52#define __pgd_offset(address) pgd_index(address)
  53#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
  54
  55/* To find an entry in a kernel PGD. */
  56#define pgd_offset_k(address) pgd_offset(&init_mm, address)
  57
  58#define __pud_offset(address)   (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
  59#define __pmd_offset(address)   (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  60
  61/*
  62 * PMD level access routines. Same notes as above.
  63 */
  64#define _PMD_EMPTY              0x0
  65/* Either the PMD is empty or present, it's not paged out */
  66#define pmd_present(pmd_entry)  (pmd_val(pmd_entry) & _PAGE_PRESENT)
  67#define pmd_clear(pmd_entry_p)  (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY)))
  68#define pmd_none(pmd_entry)     (pmd_val((pmd_entry)) == _PMD_EMPTY)
  69#define pmd_bad(pmd_entry)      ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
  70
  71#define pmd_page_vaddr(pmd_entry) \
  72        ((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK))
  73
  74#define pmd_page(pmd) \
  75        (virt_to_page(pmd_val(pmd)))
  76
  77/* PMD to PTE dereferencing */
  78#define pte_index(address) \
  79                ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  80
  81#define __pte_offset(address)   pte_index(address)
  82
  83#define pte_offset_kernel(dir, addr) \
  84                ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
  85
  86#define pte_offset_map(dir,addr)        pte_offset_kernel(dir, addr)
  87#define pte_unmap(pte)          do { } while (0)
  88
  89#ifndef __ASSEMBLY__
  90/*
  91 * PTEL coherent flags.
  92 * See Chapter 17 ST50 CPU Core Volume 1, Architecture.
  93 */
  94/* The bits that are required in the SH-5 TLB are placed in the h/w-defined
  95   positions, to avoid expensive bit shuffling on every refill.  The remaining
  96   bits are used for s/w purposes and masked out on each refill.
  97
  98   Note, the PTE slots are used to hold data of type swp_entry_t when a page is
  99   swapped out.  Only the _PAGE_PRESENT flag is significant when the page is
 100   swapped out, and it must be placed so that it doesn't overlap either the
 101   type or offset fields of swp_entry_t.  For x86, offset is at [31:8] and type
 102   at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t.  This
 103   scheme doesn't map to SH-5 because bit [0] controls cacheability.  So bit
 104   [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split
 105   into 2 pieces.  That is handled by SWP_ENTRY and SWP_TYPE below. */
 106#define _PAGE_WT        0x001  /* CB0: if cacheable, 1->write-thru, 0->write-back */
 107#define _PAGE_DEVICE    0x001  /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
 108#define _PAGE_CACHABLE  0x002  /* CB1: uncachable/cachable */
 109#define _PAGE_PRESENT   0x004  /* software: page referenced */
 110#define _PAGE_FILE      0x004  /* software: only when !present */
 111#define _PAGE_SIZE0     0x008  /* SZ0-bit : size of page */
 112#define _PAGE_SIZE1     0x010  /* SZ1-bit : size of page */
 113#define _PAGE_SHARED    0x020  /* software: reflects PTEH's SH */
 114#define _PAGE_READ      0x040  /* PR0-bit : read access allowed */
 115#define _PAGE_EXECUTE   0x080  /* PR1-bit : execute access allowed */
 116#define _PAGE_WRITE     0x100  /* PR2-bit : write access allowed */
 117#define _PAGE_USER      0x200  /* PR3-bit : user space access allowed */
 118#define _PAGE_DIRTY     0x400  /* software: page accessed in write */
 119#define _PAGE_ACCESSED  0x800  /* software: page referenced */
 120
 121/* Wrapper for extended mode pgprot twiddling */
 122#define _PAGE_EXT(x)            ((unsigned long long)(x) << 32)
 123
 124/*
 125 * We can use the sign-extended bits in the PTEL to get 32 bits of
 126 * software flags. This works for now because no implementations uses
 127 * anything above the PPN field.
 128 */
 129#define _PAGE_WIRED     _PAGE_EXT(0x001) /* software: wire the tlb entry */
 130#define _PAGE_SPECIAL   _PAGE_EXT(0x002)
 131
 132#define _PAGE_CLEAR_FLAGS       (_PAGE_PRESENT | _PAGE_FILE | _PAGE_SHARED | \
 133                                 _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
 134
 135/* Mask which drops software flags */
 136#define _PAGE_FLAGS_HARDWARE_MASK       (NEFF_MASK & ~(_PAGE_CLEAR_FLAGS))
 137
 138/*
 139 * HugeTLB support
 140 */
 141#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
 142#define _PAGE_SZHUGE    (_PAGE_SIZE0)
 143#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
 144#define _PAGE_SZHUGE    (_PAGE_SIZE1)
 145#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
 146#define _PAGE_SZHUGE    (_PAGE_SIZE0 | _PAGE_SIZE1)
 147#endif
 148
 149/*
 150 * Stub out _PAGE_SZHUGE if we don't have a good definition for it,
 151 * to make pte_mkhuge() happy.
 152 */
 153#ifndef _PAGE_SZHUGE
 154# define _PAGE_SZHUGE   (0)
 155#endif
 156
 157/*
 158 * Default flags for a Kernel page.
 159 * This is fundametally also SHARED because the main use of this define
 160 * (other than for PGD/PMD entries) is for the VMALLOC pool which is
 161 * contextless.
 162 *
 163 * _PAGE_EXECUTE is required for modules
 164 *
 165 */
 166#define _KERNPG_TABLE   (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
 167                         _PAGE_EXECUTE | \
 168                         _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \
 169                         _PAGE_SHARED)
 170
 171/* Default flags for a User page */
 172#define _PAGE_TABLE     (_KERNPG_TABLE | _PAGE_USER)
 173
 174#define _PAGE_CHG_MASK  (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
 175                         _PAGE_SPECIAL)
 176
 177/*
 178 * We have full permissions (Read/Write/Execute/Shared).
 179 */
 180#define _PAGE_COMMON    (_PAGE_PRESENT | _PAGE_USER | \
 181                         _PAGE_CACHABLE | _PAGE_ACCESSED)
 182
 183#define PAGE_NONE       __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED)
 184#define PAGE_SHARED     __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_WRITE | \
 185                                 _PAGE_SHARED)
 186#define PAGE_EXECREAD   __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_EXECUTE)
 187
 188/*
 189 * We need to include PAGE_EXECUTE in PAGE_COPY because it is the default
 190 * protection mode for the stack.
 191 */
 192#define PAGE_COPY       PAGE_EXECREAD
 193
 194#define PAGE_READONLY   __pgprot(_PAGE_COMMON | _PAGE_READ)
 195#define PAGE_WRITEONLY  __pgprot(_PAGE_COMMON | _PAGE_WRITE)
 196#define PAGE_RWX        __pgprot(_PAGE_COMMON | _PAGE_READ | \
 197                                 _PAGE_WRITE | _PAGE_EXECUTE)
 198#define PAGE_KERNEL     __pgprot(_KERNPG_TABLE)
 199
 200#define PAGE_KERNEL_NOCACHE \
 201                        __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
 202                                 _PAGE_EXECUTE | _PAGE_ACCESSED | \
 203                                 _PAGE_DIRTY | _PAGE_SHARED)
 204
 205/* Make it a device mapping for maximum safety (e.g. for mapping device
 206   registers into user-space via /dev/map).  */
 207#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
 208#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
 209
 210/*
 211 * PTE level access routines.
 212 *
 213 * Note1:
 214 * It's the tree walk leaf. This is physical address to be stored.
 215 *
 216 * Note 2:
 217 * Regarding the choice of _PTE_EMPTY:
 218
 219   We must choose a bit pattern that cannot be valid, whether or not the page
 220   is present.  bit[2]==1 => present, bit[2]==0 => swapped out.  If swapped
 221   out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is
 222   left for us to select.  If we force bit[7]==0 when swapped out, we could use
 223   the combination bit[7,2]=2'b10 to indicate an empty PTE.  Alternatively, if
 224   we force bit[7]==1 when swapped out, we can use all zeroes to indicate
 225   empty.  This is convenient, because the page tables get cleared to zero
 226   when they are allocated.
 227
 228 */
 229#define _PTE_EMPTY      0x0
 230#define pte_present(x)  (pte_val(x) & _PAGE_PRESENT)
 231#define pte_clear(mm,addr,xp)   (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY)))
 232#define pte_none(x)     (pte_val(x) == _PTE_EMPTY)
 233
 234/*
 235 * Some definitions to translate between mem_map, PTEs, and page
 236 * addresses:
 237 */
 238
 239/*
 240 * Given a PTE, return the index of the mem_map[] entry corresponding
 241 * to the page frame the PTE. Get the absolute physical address, make
 242 * a relative physical address and translate it to an index.
 243 */
 244#define pte_pagenr(x)           (((unsigned long) (pte_val(x)) - \
 245                                 __MEMORY_START) >> PAGE_SHIFT)
 246
 247/*
 248 * Given a PTE, return the "struct page *".
 249 */
 250#define pte_page(x)             (mem_map + pte_pagenr(x))
 251
 252/*
 253 * Return number of (down rounded) MB corresponding to x pages.
 254 */
 255#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
 256
 257
 258/*
 259 * The following have defined behavior only work if pte_present() is true.
 260 */
 261static inline int pte_dirty(pte_t pte)  { return pte_val(pte) & _PAGE_DIRTY; }
 262static inline int pte_young(pte_t pte)  { return pte_val(pte) & _PAGE_ACCESSED; }
 263static inline int pte_file(pte_t pte)   { return pte_val(pte) & _PAGE_FILE; }
 264static inline int pte_write(pte_t pte)  { return pte_val(pte) & _PAGE_WRITE; }
 265static inline int pte_special(pte_t pte){ return pte_val(pte) & _PAGE_SPECIAL; }
 266
 267static inline pte_t pte_wrprotect(pte_t pte)    { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
 268static inline pte_t pte_mkclean(pte_t pte)      { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
 269static inline pte_t pte_mkold(pte_t pte)        { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
 270static inline pte_t pte_mkwrite(pte_t pte)      { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
 271static inline pte_t pte_mkdirty(pte_t pte)      { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
 272static inline pte_t pte_mkyoung(pte_t pte)      { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
 273static inline pte_t pte_mkhuge(pte_t pte)       { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
 274static inline pte_t pte_mkspecial(pte_t pte)    { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SPECIAL)); return pte; }
 275
 276/*
 277 * Conversion functions: convert a page and protection to a page entry.
 278 *
 279 * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
 280 */
 281#define mk_pte(page,pgprot)                                                     \
 282({                                                                              \
 283        pte_t __pte;                                                            \
 284                                                                                \
 285        set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) |                \
 286                __MEMORY_START | pgprot_val((pgprot))));                        \
 287        __pte;                                                                  \
 288})
 289
 290/*
 291 * This takes a (absolute) physical page address that is used
 292 * by the remapping functions
 293 */
 294#define mk_pte_phys(physpage, pgprot) \
 295({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
 296
 297static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 298{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
 299
 300/* Encode and decode a swap entry */
 301#define __swp_type(x)                   (((x).val & 3) + (((x).val >> 1) & 0x3c))
 302#define __swp_offset(x)                 ((x).val >> 8)
 303#define __swp_entry(type, offset)       ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) })
 304#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) })
 305#define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
 306
 307/* Encode and decode a nonlinear file mapping entry */
 308#define PTE_FILE_MAX_BITS               29
 309#define pte_to_pgoff(pte)               (pte_val(pte))
 310#define pgoff_to_pte(off)               ((pte_t) { (off) | _PAGE_FILE })
 311
 312#endif /* !__ASSEMBLY__ */
 313
 314#define pfn_pte(pfn, prot)      __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 315#define pfn_pmd(pfn, prot)      __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 316
 317#endif /* __ASM_SH_PGTABLE_64_H */
 318