linux/arch/mips/include/asm/pgtable-64.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
   7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
   8 */
   9#ifndef _ASM_PGTABLE_64_H
  10#define _ASM_PGTABLE_64_H
  11
  12#include <linux/linkage.h>
  13
  14#include <asm/addrspace.h>
  15#include <asm/page.h>
  16#include <asm/cachectl.h>
  17#include <asm/fixmap.h>
  18
  19#ifdef CONFIG_PAGE_SIZE_64KB
  20#include <asm-generic/pgtable-nopmd.h>
  21#else
  22#include <asm-generic/pgtable-nopud.h>
  23#endif
  24
  25/*
  26 * Each address space has 2 4K pages as its page directory, giving 1024
  27 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
  28 * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
  29 * tables. Each page table is also a single 4K page, giving 512 (==
  30 * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
  31 * invalid_pmd_table, each pmd entry is initialized to point to
  32 * invalid_pte_table, each pte is initialized to 0. When memory is low,
  33 * and a pmd table or a page table allocation fails, empty_bad_pmd_table
  34 * and empty_bad_page_table is returned back to higher layer code, so
  35 * that the failure is recognized later on. Linux does not seem to
  36 * handle these failures very well though. The empty_bad_page_table has
  37 * invalid pte entries in it, to force page faults.
  38 *
  39 * Kernel mappings: kernel mappings are held in the swapper_pg_table.
  40 * The layout is identical to userspace except it's indexed with the
  41 * fault address - VMALLOC_START.
  42 */
  43
  44
  45/* PGDIR_SHIFT determines what a third-level page table entry can map */
  46#ifdef __PAGETABLE_PMD_FOLDED
  47#define PGDIR_SHIFT     (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
  48#else
  49
  50/* PMD_SHIFT determines the size of the area a second-level page table can map */
  51#define PMD_SHIFT       (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
  52#define PMD_SIZE        (1UL << PMD_SHIFT)
  53#define PMD_MASK        (~(PMD_SIZE-1))
  54
  55
  56#define PGDIR_SHIFT     (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
  57#endif
  58#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  59#define PGDIR_MASK      (~(PGDIR_SIZE-1))
  60
  61/*
  62 * For 4kB page size we use a 3 level page tree and an 8kB pud, which
  63 * permits us mapping 40 bits of virtual address space.
  64 *
  65 * We used to implement 41 bits by having an order 1 pmd level but that seemed
  66 * rather pointless.
  67 *
  68 * For 8kB page size we use a 3 level page tree which permits a total of
  69 * 8TB of address space.  Alternatively a 33-bit / 8GB organization using
  70 * two levels would be easy to implement.
  71 *
  72 * For 16kB page size we use a 2 level page tree which permits a total of
  73 * 36 bits of virtual address space.  We could add a third level but it seems
  74 * like at the moment there's no need for this.
  75 *
  76 * For 64kB page size we use a 2 level page table tree for a total of 42 bits
  77 * of virtual address space.
  78 */
  79#ifdef CONFIG_PAGE_SIZE_4KB
  80#define PGD_ORDER               1
  81#define PUD_ORDER               aieeee_attempt_to_allocate_pud
  82#define PMD_ORDER               0
  83#define PTE_ORDER               0
  84#endif
  85#ifdef CONFIG_PAGE_SIZE_8KB
  86#define PGD_ORDER               0
  87#define PUD_ORDER               aieeee_attempt_to_allocate_pud
  88#define PMD_ORDER               0
  89#define PTE_ORDER               0
  90#endif
  91#ifdef CONFIG_PAGE_SIZE_16KB
  92#define PGD_ORDER               0
  93#define PUD_ORDER               aieeee_attempt_to_allocate_pud
  94#define PMD_ORDER               0
  95#define PTE_ORDER               0
  96#endif
  97#ifdef CONFIG_PAGE_SIZE_32KB
  98#define PGD_ORDER               0
  99#define PUD_ORDER               aieeee_attempt_to_allocate_pud
 100#define PMD_ORDER               0
 101#define PTE_ORDER               0
 102#endif
 103#ifdef CONFIG_PAGE_SIZE_64KB
 104#define PGD_ORDER               0
 105#define PUD_ORDER               aieeee_attempt_to_allocate_pud
 106#define PMD_ORDER               aieeee_attempt_to_allocate_pmd
 107#define PTE_ORDER               0
 108#endif
 109
 110#define PTRS_PER_PGD    ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
 111#ifndef __PAGETABLE_PMD_FOLDED
 112#define PTRS_PER_PMD    ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
 113#endif
 114#define PTRS_PER_PTE    ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
 115
 116#if PGDIR_SIZE >= TASK_SIZE64
 117#define USER_PTRS_PER_PGD       (1)
 118#else
 119#define USER_PTRS_PER_PGD       (TASK_SIZE64 / PGDIR_SIZE)
 120#endif
 121#define FIRST_USER_ADDRESS      0UL
 122
 123/*
 124 * TLB refill handlers also map the vmalloc area into xuseg.  Avoid
 125 * the first couple of pages so NULL pointer dereferences will still
 126 * reliably trap.
 127 */
 128#define VMALLOC_START           (MAP_BASE + (2 * PAGE_SIZE))
 129#define VMALLOC_END     \
 130        (MAP_BASE + \
 131         min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
 132             (1UL << cpu_vmbits)) - (1UL << 32))
 133
 134#if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
 135        VMALLOC_START != CKSSEG
 136/* Load modules into 32bit-compatible segment. */
 137#define MODULE_START    CKSSEG
 138#define MODULE_END      (FIXADDR_START-2*PAGE_SIZE)
 139#endif
 140
 141#define pte_ERROR(e) \
 142        printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
 143#ifndef __PAGETABLE_PMD_FOLDED
 144#define pmd_ERROR(e) \
 145        printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
 146#endif
 147#define pgd_ERROR(e) \
 148        printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
 149
 150extern pte_t invalid_pte_table[PTRS_PER_PTE];
 151extern pte_t empty_bad_page_table[PTRS_PER_PTE];
 152
 153
 154#ifndef __PAGETABLE_PMD_FOLDED
 155/*
 156 * For 3-level pagetables we defines these ourselves, for 2-level the
 157 * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
 158 */
 159typedef struct { unsigned long pmd; } pmd_t;
 160#define pmd_val(x)      ((x).pmd)
 161#define __pmd(x)        ((pmd_t) { (x) } )
 162
 163
 164extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
 165extern pmd_t empty_bad_pmd_table[PTRS_PER_PMD];
 166#endif
 167
 168/*
 169 * Empty pgd/pmd entries point to the invalid_pte_table.
 170 */
 171static inline int pmd_none(pmd_t pmd)
 172{
 173        return pmd_val(pmd) == (unsigned long) invalid_pte_table;
 174}
 175
 176#define pmd_bad(pmd)            (pmd_val(pmd) & ~PAGE_MASK)
 177
 178static inline int pmd_present(pmd_t pmd)
 179{
 180        return pmd_val(pmd) != (unsigned long) invalid_pte_table;
 181}
 182
 183static inline void pmd_clear(pmd_t *pmdp)
 184{
 185        pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
 186}
 187#ifndef __PAGETABLE_PMD_FOLDED
 188
 189/*
 190 * Empty pud entries point to the invalid_pmd_table.
 191 */
 192static inline int pud_none(pud_t pud)
 193{
 194        return pud_val(pud) == (unsigned long) invalid_pmd_table;
 195}
 196
 197static inline int pud_bad(pud_t pud)
 198{
 199        return pud_val(pud) & ~PAGE_MASK;
 200}
 201
 202static inline int pud_present(pud_t pud)
 203{
 204        return pud_val(pud) != (unsigned long) invalid_pmd_table;
 205}
 206
 207static inline void pud_clear(pud_t *pudp)
 208{
 209        pud_val(*pudp) = ((unsigned long) invalid_pmd_table);
 210}
 211#endif
 212
 213#define pte_page(x)             pfn_to_page(pte_pfn(x))
 214
 215#ifdef CONFIG_CPU_VR41XX
 216#define pte_pfn(x)              ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
 217#define pfn_pte(pfn, prot)      __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
 218#else
 219#define pte_pfn(x)              ((unsigned long)((x).pte >> _PFN_SHIFT))
 220#define pfn_pte(pfn, prot)      __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
 221#endif
 222
 223#define __pgd_offset(address)   pgd_index(address)
 224#define __pud_offset(address)   (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
 225#define __pmd_offset(address)   pmd_index(address)
 226
 227/* to find an entry in a kernel page-table-directory */
 228#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 229
 230#define pgd_index(address)      (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
 231#define pmd_index(address)      (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
 232
 233/* to find an entry in a page-table-directory */
 234#define pgd_offset(mm, addr)    ((mm)->pgd + pgd_index(addr))
 235
 236#ifndef __PAGETABLE_PMD_FOLDED
 237static inline unsigned long pud_page_vaddr(pud_t pud)
 238{
 239        return pud_val(pud);
 240}
 241#define pud_phys(pud)           virt_to_phys((void *)pud_val(pud))
 242#define pud_page(pud)           (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
 243
 244/* Find an entry in the second-level page table.. */
 245static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
 246{
 247        return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address);
 248}
 249#endif
 250
 251/* Find an entry in the third-level page table.. */
 252#define __pte_offset(address)                                           \
 253        (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 254#define pte_offset(dir, address)                                        \
 255        ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
 256#define pte_offset_kernel(dir, address)                                 \
 257        ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
 258#define pte_offset_map(dir, address)                                    \
 259        ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
 260#define pte_unmap(pte) ((void)(pte))
 261
 262/*
 263 * Initialize a new pgd / pmd table with invalid pointers.
 264 */
 265extern void pgd_init(unsigned long page);
 266extern void pmd_init(unsigned long page, unsigned long pagetable);
 267
 268/*
 269 * Non-present pages:  high 24 bits are offset, next 8 bits type,
 270 * low 32 bits zero.
 271 */
 272static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
 273{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
 274
 275#define __swp_type(x)           (((x).val >> 32) & 0xff)
 276#define __swp_offset(x)         ((x).val >> 40)
 277#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
 278#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
 279#define __swp_entry_to_pte(x)   ((pte_t) { (x).val })
 280
 281/*
 282 * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to
 283 * make things easier, and only use the upper 56 bits for the page offset...
 284 */
 285#define PTE_FILE_MAX_BITS       56
 286
 287#define pte_to_pgoff(_pte)      ((_pte).pte >> 8)
 288#define pgoff_to_pte(off)       ((pte_t) { ((off) << 8) | _PAGE_FILE })
 289
 290#endif /* _ASM_PGTABLE_64_H */
 291