linux/arch/mips/include/asm/pgtable-64.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
   7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
   8 */
   9#ifndef _ASM_PGTABLE_64_H
  10#define _ASM_PGTABLE_64_H
  11
  12#include <linux/compiler.h>
  13#include <linux/linkage.h>
  14
  15#include <asm/addrspace.h>
  16#include <asm/page.h>
  17#include <asm/cachectl.h>
  18#include <asm/fixmap.h>
  19
  20#ifdef CONFIG_PAGE_SIZE_64KB
  21#include <asm-generic/pgtable-nopmd.h>
  22#else
  23#include <asm-generic/pgtable-nopud.h>
  24#endif
  25
  26/*
  27 * Each address space has 2 4K pages as its page directory, giving 1024
  28 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
  29 * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
  30 * tables. Each page table is also a single 4K page, giving 512 (==
  31 * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
  32 * invalid_pmd_table, each pmd entry is initialized to point to
  33 * invalid_pte_table, each pte is initialized to 0. When memory is low,
  34 * and a pmd table or a page table allocation fails, empty_bad_pmd_table
  35 * and empty_bad_page_table is returned back to higher layer code, so
  36 * that the failure is recognized later on. Linux does not seem to
  37 * handle these failures very well though. The empty_bad_page_table has
  38 * invalid pte entries in it, to force page faults.
  39 *
  40 * Kernel mappings: kernel mappings are held in the swapper_pg_table.
  41 * The layout is identical to userspace except it's indexed with the
  42 * fault address - VMALLOC_START.
  43 */
  44
  45
  46/* PGDIR_SHIFT determines what a third-level page table entry can map */
  47#ifdef __PAGETABLE_PMD_FOLDED
  48#define PGDIR_SHIFT     (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
  49#else
  50
  51/* PMD_SHIFT determines the size of the area a second-level page table can map */
  52#define PMD_SHIFT       (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
  53#define PMD_SIZE        (1UL << PMD_SHIFT)
  54#define PMD_MASK        (~(PMD_SIZE-1))
  55
  56
  57#define PGDIR_SHIFT     (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
  58#endif
  59#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  60#define PGDIR_MASK      (~(PGDIR_SIZE-1))
  61
  62/*
  63 * For 4kB page size we use a 3 level page tree and an 8kB pud, which
  64 * permits us mapping 40 bits of virtual address space.
  65 *
  66 * We used to implement 41 bits by having an order 1 pmd level but that seemed
  67 * rather pointless.
  68 *
  69 * For 8kB page size we use a 3 level page tree which permits a total of
  70 * 8TB of address space.  Alternatively a 33-bit / 8GB organization using
  71 * two levels would be easy to implement.
  72 *
  73 * For 16kB page size we use a 2 level page tree which permits a total of
  74 * 36 bits of virtual address space.  We could add a third level but it seems
  75 * like at the moment there's no need for this.
  76 *
  77 * For 64kB page size we use a 2 level page table tree for a total of 42 bits
  78 * of virtual address space.
  79 */
  80#ifdef CONFIG_PAGE_SIZE_4KB
  81#define PGD_ORDER               1
  82#define PUD_ORDER               aieeee_attempt_to_allocate_pud
  83#define PMD_ORDER               0
  84#define PTE_ORDER               0
  85#endif
  86#ifdef CONFIG_PAGE_SIZE_8KB
  87#define PGD_ORDER               0
  88#define PUD_ORDER               aieeee_attempt_to_allocate_pud
  89#define PMD_ORDER               0
  90#define PTE_ORDER               0
  91#endif
  92#ifdef CONFIG_PAGE_SIZE_16KB
  93#define PGD_ORDER               0
  94#define PUD_ORDER               aieeee_attempt_to_allocate_pud
  95#define PMD_ORDER               0
  96#define PTE_ORDER               0
  97#endif
  98#ifdef CONFIG_PAGE_SIZE_32KB
  99#define PGD_ORDER               0
 100#define PUD_ORDER               aieeee_attempt_to_allocate_pud
 101#define PMD_ORDER               0
 102#define PTE_ORDER               0
 103#endif
 104#ifdef CONFIG_PAGE_SIZE_64KB
 105#define PGD_ORDER               0
 106#define PUD_ORDER               aieeee_attempt_to_allocate_pud
 107#define PMD_ORDER               aieeee_attempt_to_allocate_pmd
 108#define PTE_ORDER               0
 109#endif
 110
 111#define PTRS_PER_PGD    ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
 112#ifndef __PAGETABLE_PMD_FOLDED
 113#define PTRS_PER_PMD    ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
 114#endif
 115#define PTRS_PER_PTE    ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
 116
 117#if PGDIR_SIZE >= TASK_SIZE64
 118#define USER_PTRS_PER_PGD       (1)
 119#else
 120#define USER_PTRS_PER_PGD       (TASK_SIZE64 / PGDIR_SIZE)
 121#endif
 122#define FIRST_USER_ADDRESS      0UL
 123
 124/*
 125 * TLB refill handlers also map the vmalloc area into xuseg.  Avoid
 126 * the first couple of pages so NULL pointer dereferences will still
 127 * reliably trap.
 128 */
 129#define VMALLOC_START           (MAP_BASE + (2 * PAGE_SIZE))
 130#define VMALLOC_END     \
 131        (MAP_BASE + \
 132         min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
 133             (1UL << cpu_vmbits)) - (1UL << 32))
 134
 135#if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
 136        VMALLOC_START != CKSSEG
 137/* Load modules into 32bit-compatible segment. */
 138#define MODULE_START    CKSSEG
 139#define MODULE_END      (FIXADDR_START-2*PAGE_SIZE)
 140#endif
 141
 142#define pte_ERROR(e) \
 143        printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
 144#ifndef __PAGETABLE_PMD_FOLDED
 145#define pmd_ERROR(e) \
 146        printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
 147#endif
 148#define pgd_ERROR(e) \
 149        printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
 150
 151extern pte_t invalid_pte_table[PTRS_PER_PTE];
 152extern pte_t empty_bad_page_table[PTRS_PER_PTE];
 153
 154
 155#ifndef __PAGETABLE_PMD_FOLDED
 156/*
 157 * For 3-level pagetables we defines these ourselves, for 2-level the
 158 * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
 159 */
 160typedef struct { unsigned long pmd; } pmd_t;
 161#define pmd_val(x)      ((x).pmd)
 162#define __pmd(x)        ((pmd_t) { (x) } )
 163
 164
 165extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
 166#endif
 167
 168/*
 169 * Empty pgd/pmd entries point to the invalid_pte_table.
 170 */
 171static inline int pmd_none(pmd_t pmd)
 172{
 173        return pmd_val(pmd) == (unsigned long) invalid_pte_table;
 174}
 175
 176static inline int pmd_bad(pmd_t pmd)
 177{
 178#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 179        /* pmd_huge(pmd) but inline */
 180        if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
 181                return 0;
 182#endif
 183
 184        if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
 185                return 1;
 186
 187        return 0;
 188}
 189
 190static inline int pmd_present(pmd_t pmd)
 191{
 192        return pmd_val(pmd) != (unsigned long) invalid_pte_table;
 193}
 194
 195static inline void pmd_clear(pmd_t *pmdp)
 196{
 197        pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
 198}
 199#ifndef __PAGETABLE_PMD_FOLDED
 200
 201/*
 202 * Empty pud entries point to the invalid_pmd_table.
 203 */
 204static inline int pud_none(pud_t pud)
 205{
 206        return pud_val(pud) == (unsigned long) invalid_pmd_table;
 207}
 208
 209static inline int pud_bad(pud_t pud)
 210{
 211        return pud_val(pud) & ~PAGE_MASK;
 212}
 213
 214static inline int pud_present(pud_t pud)
 215{
 216        return pud_val(pud) != (unsigned long) invalid_pmd_table;
 217}
 218
 219static inline void pud_clear(pud_t *pudp)
 220{
 221        pud_val(*pudp) = ((unsigned long) invalid_pmd_table);
 222}
 223#endif
 224
 225#define pte_page(x)             pfn_to_page(pte_pfn(x))
 226
 227#ifdef CONFIG_CPU_VR41XX
 228#define pte_pfn(x)              ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
 229#define pfn_pte(pfn, prot)      __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
 230#else
 231#define pte_pfn(x)              ((unsigned long)((x).pte >> _PFN_SHIFT))
 232#define pfn_pte(pfn, prot)      __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
 233#define pfn_pmd(pfn, prot)      __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
 234#endif
 235
 236#define __pgd_offset(address)   pgd_index(address)
 237#define __pud_offset(address)   (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
 238#define __pmd_offset(address)   pmd_index(address)
 239
 240/* to find an entry in a kernel page-table-directory */
 241#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 242
 243#define pgd_index(address)      (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
 244#define pmd_index(address)      (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
 245
 246/* to find an entry in a page-table-directory */
 247#define pgd_offset(mm, addr)    ((mm)->pgd + pgd_index(addr))
 248
 249#ifndef __PAGETABLE_PMD_FOLDED
 250static inline unsigned long pud_page_vaddr(pud_t pud)
 251{
 252        return pud_val(pud);
 253}
 254#define pud_phys(pud)           virt_to_phys((void *)pud_val(pud))
 255#define pud_page(pud)           (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
 256
 257/* Find an entry in the second-level page table.. */
 258static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
 259{
 260        return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address);
 261}
 262#endif
 263
 264/* Find an entry in the third-level page table.. */
 265#define __pte_offset(address)                                           \
 266        (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 267#define pte_offset(dir, address)                                        \
 268        ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
 269#define pte_offset_kernel(dir, address)                                 \
 270        ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
 271#define pte_offset_map(dir, address)                                    \
 272        ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
 273#define pte_unmap(pte) ((void)(pte))
 274
 275/*
 276 * Initialize a new pgd / pmd table with invalid pointers.
 277 */
 278extern void pgd_init(unsigned long page);
 279extern void pmd_init(unsigned long page, unsigned long pagetable);
 280
 281/*
 282 * Non-present pages:  high 40 bits are offset, next 8 bits type,
 283 * low 16 bits zero.
 284 */
 285static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
 286{ pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
 287
 288#define __swp_type(x)           (((x).val >> 16) & 0xff)
 289#define __swp_offset(x)         ((x).val >> 24)
 290#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
 291#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
 292#define __swp_entry_to_pte(x)   ((pte_t) { (x).val })
 293
 294#endif /* _ASM_PGTABLE_64_H */
 295