linux/arch/alpha/include/asm/pgtable.h
<<
>>
Prefs
   1#ifndef _ALPHA_PGTABLE_H
   2#define _ALPHA_PGTABLE_H
   3
   4#include <asm-generic/4level-fixup.h>
   5
   6/*
   7 * This file contains the functions and defines necessary to modify and use
   8 * the Alpha page table tree.
   9 *
  10 * This hopefully works with any standard Alpha page-size, as defined
  11 * in <asm/page.h> (currently 8192).
  12 */
  13#include <linux/mmzone.h>
  14
  15#include <asm/page.h>
  16#include <asm/processor.h>      /* For TASK_SIZE */
  17#include <asm/machvec.h>
  18
  19struct mm_struct;
  20struct vm_area_struct;
  21
  22/* Certain architectures need to do special things when PTEs
  23 * within a page table are directly modified.  Thus, the following
  24 * hook is made available.
  25 */
  26#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
  27#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
  28
  29/* PMD_SHIFT determines the size of the area a second-level page table can map */
  30#define PMD_SHIFT       (PAGE_SHIFT + (PAGE_SHIFT-3))
  31#define PMD_SIZE        (1UL << PMD_SHIFT)
  32#define PMD_MASK        (~(PMD_SIZE-1))
  33
  34/* PGDIR_SHIFT determines what a third-level page table entry can map */
  35#define PGDIR_SHIFT     (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
  36#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  37#define PGDIR_MASK      (~(PGDIR_SIZE-1))
  38
  39/*
  40 * Entries per page directory level:  the Alpha is three-level, with
  41 * all levels having a one-page page table.
  42 */
  43#define PTRS_PER_PTE    (1UL << (PAGE_SHIFT-3))
  44#define PTRS_PER_PMD    (1UL << (PAGE_SHIFT-3))
  45#define PTRS_PER_PGD    (1UL << (PAGE_SHIFT-3))
  46#define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
  47#define FIRST_USER_ADDRESS      0
  48
  49/* Number of pointers that fit on a page:  this will go away. */
  50#define PTRS_PER_PAGE   (1UL << (PAGE_SHIFT-3))
  51
  52#ifdef CONFIG_ALPHA_LARGE_VMALLOC
  53#define VMALLOC_START           0xfffffe0000000000
  54#else
  55#define VMALLOC_START           (-2*PGDIR_SIZE)
  56#endif
  57#define VMALLOC_END             (-PGDIR_SIZE)
  58
  59/*
  60 * OSF/1 PAL-code-imposed page table bits
  61 */
  62#define _PAGE_VALID     0x0001
  63#define _PAGE_FOR       0x0002  /* used for page protection (fault on read) */
  64#define _PAGE_FOW       0x0004  /* used for page protection (fault on write) */
  65#define _PAGE_FOE       0x0008  /* used for page protection (fault on exec) */
  66#define _PAGE_ASM       0x0010
  67#define _PAGE_KRE       0x0100  /* xxx - see below on the "accessed" bit */
  68#define _PAGE_URE       0x0200  /* xxx */
  69#define _PAGE_KWE       0x1000  /* used to do the dirty bit in software */
  70#define _PAGE_UWE       0x2000  /* used to do the dirty bit in software */
  71
  72/* .. and these are ours ... */
  73#define _PAGE_DIRTY     0x20000
  74#define _PAGE_ACCESSED  0x40000
  75#define _PAGE_FILE      0x80000 /* set:pagecache, unset:swap */
  76
  77/*
  78 * NOTE! The "accessed" bit isn't necessarily exact:  it can be kept exactly
  79 * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
  80 * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
  81 * the KRE/URE bits to watch for it. That way we don't need to overload the
  82 * KWE/UWE bits with both handling dirty and accessed.
  83 *
  84 * Note that the kernel uses the accessed bit just to check whether to page
  85 * out a page or not, so it doesn't have to be exact anyway.
  86 */
  87
  88#define __DIRTY_BITS    (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
  89#define __ACCESS_BITS   (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
  90
  91#define _PFN_MASK       0xFFFFFFFF00000000UL
  92
  93#define _PAGE_TABLE     (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
  94#define _PAGE_CHG_MASK  (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
  95
  96/*
  97 * All the normal masks have the "page accessed" bits on, as any time they are used,
  98 * the page is accessed. They are cleared only by the page-out routines
  99 */
 100#define PAGE_NONE       __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
 101#define PAGE_SHARED     __pgprot(_PAGE_VALID | __ACCESS_BITS)
 102#define PAGE_COPY       __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
 103#define PAGE_READONLY   __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
 104#define PAGE_KERNEL     __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
 105
 106#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
 107
 108#define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
 109#define _PAGE_S(x) _PAGE_NORMAL(x)
 110
 111/*
 112 * The hardware can handle write-only mappings, but as the Alpha
 113 * architecture does byte-wide writes with a read-modify-write
 114 * sequence, it's not practical to have write-without-read privs.
 115 * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
 116 * arch/alpha/mm/fault.c)
 117 */
 118        /* xwr */
 119#define __P000  _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
 120#define __P001  _PAGE_P(_PAGE_FOE | _PAGE_FOW)
 121#define __P010  _PAGE_P(_PAGE_FOE)
 122#define __P011  _PAGE_P(_PAGE_FOE)
 123#define __P100  _PAGE_P(_PAGE_FOW | _PAGE_FOR)
 124#define __P101  _PAGE_P(_PAGE_FOW)
 125#define __P110  _PAGE_P(0)
 126#define __P111  _PAGE_P(0)
 127
 128#define __S000  _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
 129#define __S001  _PAGE_S(_PAGE_FOE | _PAGE_FOW)
 130#define __S010  _PAGE_S(_PAGE_FOE)
 131#define __S011  _PAGE_S(_PAGE_FOE)
 132#define __S100  _PAGE_S(_PAGE_FOW | _PAGE_FOR)
 133#define __S101  _PAGE_S(_PAGE_FOW)
 134#define __S110  _PAGE_S(0)
 135#define __S111  _PAGE_S(0)
 136
 137/*
 138 * pgprot_noncached() is only for infiniband pci support, and a real
 139 * implementation for RAM would be more complicated.
 140 */
 141#define pgprot_noncached(prot)  (prot)
 142
 143/*
 144 * BAD_PAGETABLE is used when we need a bogus page-table, while
 145 * BAD_PAGE is used for a bogus page.
 146 *
 147 * ZERO_PAGE is a global shared page that is always zero:  used
 148 * for zero-mapped memory areas etc..
 149 */
 150extern pte_t __bad_page(void);
 151extern pmd_t * __bad_pagetable(void);
 152
 153extern unsigned long __zero_page(void);
 154
 155#define BAD_PAGETABLE   __bad_pagetable()
 156#define BAD_PAGE        __bad_page()
 157#define ZERO_PAGE(vaddr)        (virt_to_page(ZERO_PGE))
 158
 159/* number of bits that fit into a memory pointer */
 160#define BITS_PER_PTR                    (8*sizeof(unsigned long))
 161
 162/* to align the pointer to a pointer address */
 163#define PTR_MASK                        (~(sizeof(void*)-1))
 164
 165/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
 166#define SIZEOF_PTR_LOG2                 3
 167
 168/* to find an entry in a page-table */
 169#define PAGE_PTR(address)               \
 170  ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
 171
 172/*
 173 * On certain platforms whose physical address space can overlap KSEG,
 174 * namely EV6 and above, we must re-twiddle the physaddr to restore the
 175 * correct high-order bits.
 176 *
 177 * This is extremely confusing until you realize that this is actually
 178 * just working around a userspace bug.  The X server was intending to
 179 * provide the physical address but instead provided the KSEG address.
 180 * Or tried to, except it's not representable.
 181 * 
 182 * On Tsunami there's nothing meaningful at 0x40000000000, so this is
 183 * a safe thing to do.  Come the first core logic that does put something
 184 * in this area -- memory or whathaveyou -- then this hack will have
 185 * to go away.  So be prepared!
 186 */
 187
 188#if defined(CONFIG_ALPHA_GENERIC) && defined(USE_48_BIT_KSEG)
 189#error "EV6-only feature in a generic kernel"
 190#endif
 191#if defined(CONFIG_ALPHA_GENERIC) || \
 192    (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG))
 193#define KSEG_PFN        (0xc0000000000UL >> PAGE_SHIFT)
 194#define PHYS_TWIDDLE(pfn) \
 195  ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \
 196  ? ((pfn) ^= KSEG_PFN) : (pfn))
 197#else
 198#define PHYS_TWIDDLE(pfn) (pfn)
 199#endif
 200
 201/*
 202 * Conversion functions:  convert a page and protection to a page entry,
 203 * and a page entry and page directory to the page they refer to.
 204 */
 205#ifndef CONFIG_DISCONTIGMEM
 206#define page_to_pa(page)        (((page) - mem_map) << PAGE_SHIFT)
 207
 208#define pte_pfn(pte)    (pte_val(pte) >> 32)
 209#define pte_page(pte)   pfn_to_page(pte_pfn(pte))
 210#define mk_pte(page, pgprot)                                            \
 211({                                                                      \
 212        pte_t pte;                                                      \
 213                                                                        \
 214        pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot);  \
 215        pte;                                                            \
 216})
 217#endif
 218
 219extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot)
 220{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; }
 221
 222extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 223{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
 224
 225extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
 226{ pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
 227
 228extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
 229{ pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
 230
 231
 232extern inline unsigned long
 233pmd_page_vaddr(pmd_t pmd)
 234{
 235        return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET;
 236}
 237
 238#ifndef CONFIG_DISCONTIGMEM
 239#define pmd_page(pmd)   (mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32))
 240#define pgd_page(pgd)   (mem_map + ((pgd_val(pgd) & _PFN_MASK) >> 32))
 241#endif
 242
 243extern inline unsigned long pgd_page_vaddr(pgd_t pgd)
 244{ return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 245
 246extern inline int pte_none(pte_t pte)           { return !pte_val(pte); }
 247extern inline int pte_present(pte_t pte)        { return pte_val(pte) & _PAGE_VALID; }
 248extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 249{
 250        pte_val(*ptep) = 0;
 251}
 252
 253extern inline int pmd_none(pmd_t pmd)           { return !pmd_val(pmd); }
 254extern inline int pmd_bad(pmd_t pmd)            { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; }
 255extern inline int pmd_present(pmd_t pmd)        { return pmd_val(pmd) & _PAGE_VALID; }
 256extern inline void pmd_clear(pmd_t * pmdp)      { pmd_val(*pmdp) = 0; }
 257
 258extern inline int pgd_none(pgd_t pgd)           { return !pgd_val(pgd); }
 259extern inline int pgd_bad(pgd_t pgd)            { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE; }
 260extern inline int pgd_present(pgd_t pgd)        { return pgd_val(pgd) & _PAGE_VALID; }
 261extern inline void pgd_clear(pgd_t * pgdp)      { pgd_val(*pgdp) = 0; }
 262
 263/*
 264 * The following only work if pte_present() is true.
 265 * Undefined behaviour if not..
 266 */
 267extern inline int pte_write(pte_t pte)          { return !(pte_val(pte) & _PAGE_FOW); }
 268extern inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
 269extern inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
 270extern inline int pte_file(pte_t pte)           { return pte_val(pte) & _PAGE_FILE; }
 271extern inline int pte_special(pte_t pte)        { return 0; }
 272
 273extern inline pte_t pte_wrprotect(pte_t pte)    { pte_val(pte) |= _PAGE_FOW; return pte; }
 274extern inline pte_t pte_mkclean(pte_t pte)      { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
 275extern inline pte_t pte_mkold(pte_t pte)        { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
 276extern inline pte_t pte_mkwrite(pte_t pte)      { pte_val(pte) &= ~_PAGE_FOW; return pte; }
 277extern inline pte_t pte_mkdirty(pte_t pte)      { pte_val(pte) |= __DIRTY_BITS; return pte; }
 278extern inline pte_t pte_mkyoung(pte_t pte)      { pte_val(pte) |= __ACCESS_BITS; return pte; }
 279extern inline pte_t pte_mkspecial(pte_t pte)    { return pte; }
 280
 281#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
 282
 283/* to find an entry in a kernel page-table-directory */
 284#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
 285
 286/* to find an entry in a page-table-directory. */
 287#define pgd_index(address)      (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
 288#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
 289
 290/*
 291 * The smp_read_barrier_depends() in the following functions are required to
 292 * order the load of *dir (the pointer in the top level page table) with any
 293 * subsequent load of the returned pmd_t *ret (ret is data dependent on *dir).
 294 *
 295 * If this ordering is not enforced, the CPU might load an older value of
 296 * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for
 297 * more details.
 298 *
 299 * Note that we never change the mm->pgd pointer after the task is running, so
 300 * pgd_offset does not require such a barrier.
 301 */
 302
 303/* Find an entry in the second-level page table.. */
 304extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
 305{
 306        pmd_t *ret = (pmd_t *) pgd_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
 307        smp_read_barrier_depends(); /* see above */
 308        return ret;
 309}
 310
 311/* Find an entry in the third-level page table.. */
 312extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
 313{
 314        pte_t *ret = (pte_t *) pmd_page_vaddr(*dir)
 315                + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
 316        smp_read_barrier_depends(); /* see above */
 317        return ret;
 318}
 319
 320#define pte_offset_map(dir,addr)        pte_offset_kernel((dir),(addr))
 321#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr))
 322#define pte_unmap(pte)                  do { } while (0)
 323#define pte_unmap_nested(pte)           do { } while (0)
 324
 325extern pgd_t swapper_pg_dir[1024];
 326
 327/*
 328 * The Alpha doesn't have any external MMU info:  the kernel page
 329 * tables contain all the necessary information.
 330 */
 331extern inline void update_mmu_cache(struct vm_area_struct * vma,
 332        unsigned long address, pte_t pte)
 333{
 334}
 335
 336/*
 337 * Non-present pages:  high 24 bits are offset, next 8 bits type,
 338 * low 32 bits zero.
 339 */
 340extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
 341{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
 342
 343#define __swp_type(x)           (((x).val >> 32) & 0xff)
 344#define __swp_offset(x)         ((x).val >> 40)
 345#define __swp_entry(type, off)  ((swp_entry_t) { pte_val(mk_swap_pte((type), (off))) })
 346#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
 347#define __swp_entry_to_pte(x)   ((pte_t) { (x).val })
 348
 349#define pte_to_pgoff(pte)       (pte_val(pte) >> 32)
 350#define pgoff_to_pte(off)       ((pte_t) { ((off) << 32) | _PAGE_FILE })
 351
 352#define PTE_FILE_MAX_BITS       32
 353
 354#ifndef CONFIG_DISCONTIGMEM
 355#define kern_addr_valid(addr)   (1)
 356#endif
 357
 358#define io_remap_pfn_range(vma, start, pfn, size, prot) \
 359                remap_pfn_range(vma, start, pfn, size, prot)
 360
 361#define pte_ERROR(e) \
 362        printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
 363#define pmd_ERROR(e) \
 364        printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
 365#define pgd_ERROR(e) \
 366        printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
 367
 368extern void paging_init(void);
 369
 370#include <asm-generic/pgtable.h>
 371
 372/*
 373 * No page table caches to initialise
 374 */
 375#define pgtable_cache_init()    do { } while (0)
 376
 377/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT.  */
 378#define HAVE_ARCH_UNMAPPED_AREA
 379
 380#endif /* _ALPHA_PGTABLE_H */
 381