linux/arch/mips/include/asm/pgtable-32.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
   7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
   8 */
   9#ifndef _ASM_PGTABLE_32_H
  10#define _ASM_PGTABLE_32_H
  11
  12#include <asm/addrspace.h>
  13#include <asm/page.h>
  14
  15#include <linux/linkage.h>
  16#include <asm/cachectl.h>
  17#include <asm/fixmap.h>
  18
  19#include <asm-generic/pgtable-nopmd.h>
  20
  21#ifdef CONFIG_HIGHMEM
  22#include <asm/highmem.h>
  23#endif
  24
  25/*
  26 * Regarding 32-bit MIPS huge page support (and the tradeoff it entails):
  27 *
  28 *  We use the same huge page sizes as 64-bit MIPS. Assuming a 4KB page size,
  29 * our 2-level table layout would normally have a PGD entry cover a contiguous
  30 * 4MB virtual address region (pointing to a 4KB PTE page of 1,024 32-bit pte_t
  31 * pointers, each pointing to a 4KB physical page). The problem is that 4MB,
  32 * spanning both halves of a TLB EntryLo0,1 pair, requires 2MB hardware page
  33 * support, not one of the standard supported sizes (1MB,4MB,16MB,...).
  34 *  To correct for this, when huge pages are enabled, we halve the number of
  35 * pointers a PTE page holds, making its last half go to waste. Correspondingly,
  36 * we double the number of PGD pages. Overall, page table memory overhead
  37 * increases to match 64-bit MIPS, but PTE lookups remain CPU cache-friendly.
  38 *
  39 * NOTE: We don't yet support huge pages if extended-addressing is enabled
  40 *       (i.e. EVA, XPA, 36-bit Alchemy/Netlogic).
  41 */
  42
  43extern int temp_tlb_entry;
  44
  45/*
  46 * - add_temporary_entry() add a temporary TLB entry. We use TLB entries
  47 *      starting at the top and working down. This is for populating the
  48 *      TLB before trap_init() puts the TLB miss handler in place. It
  49 *      should be used only for entries matching the actual page tables,
  50 *      to prevent inconsistencies.
  51 */
  52extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
  53                               unsigned long entryhi, unsigned long pagemask);
  54
  55/*
  56 * Basically we have the same two-level (which is the logical three level
  57 * Linux page table layout folded) page tables as the i386.  Some day
  58 * when we have proper page coloring support we can have a 1% quicker
  59 * tlb refill handling mechanism, but for now it is a bit slower but
  60 * works even with the cache aliasing problem the R4k and above have.
  61 */
  62
  63/* PGDIR_SHIFT determines what a third-level page table entry can map */
  64#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
  65# define PGDIR_SHIFT    (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2 - 1)
  66#else
  67# define PGDIR_SHIFT    (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
  68#endif
  69
  70#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  71#define PGDIR_MASK      (~(PGDIR_SIZE-1))
  72
  73/*
  74 * Entries per page directory level: we use two-level, so
  75 * we don't really have any PUD/PMD directory physically.
  76 */
  77#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
  78# define __PGD_ORDER    (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1)
  79#else
  80# define __PGD_ORDER    (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
  81#endif
  82
  83#define PGD_ORDER       (__PGD_ORDER >= 0 ? __PGD_ORDER : 0)
  84#define PUD_ORDER       aieeee_attempt_to_allocate_pud
  85#define PMD_ORDER       aieeee_attempt_to_allocate_pmd
  86#define PTE_ORDER       0
  87
  88#define PTRS_PER_PGD    (USER_PTRS_PER_PGD * 2)
  89#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
  90# define PTRS_PER_PTE   ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t) / 2)
  91#else
  92# define PTRS_PER_PTE   ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
  93#endif
  94
  95#define USER_PTRS_PER_PGD       (0x80000000UL/PGDIR_SIZE)
  96
  97#define VMALLOC_START     MAP_BASE
  98
  99#define PKMAP_END       ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
 100#define PKMAP_BASE      (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
 101
 102#ifdef CONFIG_HIGHMEM
 103# define VMALLOC_END    (PKMAP_BASE-2*PAGE_SIZE)
 104#else
 105# define VMALLOC_END    (FIXADDR_START-2*PAGE_SIZE)
 106#endif
 107
 108#ifdef CONFIG_PHYS_ADDR_T_64BIT
 109#define pte_ERROR(e) \
 110        printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
 111#else
 112#define pte_ERROR(e) \
 113        printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
 114#endif
 115#define pgd_ERROR(e) \
 116        printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 117
 118extern void load_pgd(unsigned long pg_dir);
 119
 120extern pte_t invalid_pte_table[PTRS_PER_PTE];
 121
 122/*
 123 * Empty pgd/pmd entries point to the invalid_pte_table.
 124 */
 125static inline int pmd_none(pmd_t pmd)
 126{
 127        return pmd_val(pmd) == (unsigned long) invalid_pte_table;
 128}
 129
 130static inline int pmd_bad(pmd_t pmd)
 131{
 132#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 133        /* pmd_huge(pmd) but inline */
 134        if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
 135                return 0;
 136#endif
 137
 138        if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
 139                return 1;
 140
 141        return 0;
 142}
 143
 144static inline int pmd_present(pmd_t pmd)
 145{
 146        return pmd_val(pmd) != (unsigned long) invalid_pte_table;
 147}
 148
 149static inline void pmd_clear(pmd_t *pmdp)
 150{
 151        pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
 152}
 153
 154#if defined(CONFIG_XPA)
 155
 156#define MAX_POSSIBLE_PHYSMEM_BITS 40
 157#define pte_pfn(x)              (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
 158static inline pte_t
 159pfn_pte(unsigned long pfn, pgprot_t prot)
 160{
 161        pte_t pte;
 162
 163        pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) |
 164                                (pgprot_val(prot) & ~_PFNX_MASK);
 165        pte.pte_high = (pfn << _PFN_SHIFT) |
 166                                (pgprot_val(prot) & ~_PFN_MASK);
 167        return pte;
 168}
 169
 170#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 171
 172#define MAX_POSSIBLE_PHYSMEM_BITS 36
 173#define pte_pfn(x)              ((unsigned long)((x).pte_high >> 6))
 174
 175static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
 176{
 177        pte_t pte;
 178
 179        pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
 180        pte.pte_low = pgprot_val(prot);
 181
 182        return pte;
 183}
 184
 185#else
 186
 187#define MAX_POSSIBLE_PHYSMEM_BITS 32
 188#ifdef CONFIG_CPU_VR41XX
 189#define pte_pfn(x)              ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
 190#define pfn_pte(pfn, prot)      __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
 191#else
 192#define pte_pfn(x)              ((unsigned long)((x).pte >> _PFN_SHIFT))
 193#define pfn_pte(pfn, prot)      __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
 194#define pfn_pmd(pfn, prot)      __pmd(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
 195#endif
 196#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
 197
 198#define pte_page(x)             pfn_to_page(pte_pfn(x))
 199
 200#if defined(CONFIG_CPU_R3K_TLB)
 201
 202/* Swap entries must have VALID bit cleared. */
 203#define __swp_type(x)                   (((x).val >> 10) & 0x1f)
 204#define __swp_offset(x)                 ((x).val >> 15)
 205#define __swp_entry(type,offset)        ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
 206#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) })
 207#define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
 208
 209#else
 210
 211#if defined(CONFIG_XPA)
 212
 213/* Swap entries must have VALID and GLOBAL bits cleared. */
 214#define __swp_type(x)                   (((x).val >> 4) & 0x1f)
 215#define __swp_offset(x)                  ((x).val >> 9)
 216#define __swp_entry(type,offset)        ((swp_entry_t)  { ((type) << 4) | ((offset) << 9) })
 217#define __pte_to_swp_entry(pte)         ((swp_entry_t) { (pte).pte_high })
 218#define __swp_entry_to_pte(x)           ((pte_t) { 0, (x).val })
 219
 220#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 221
 222/* Swap entries must have VALID and GLOBAL bits cleared. */
 223#define __swp_type(x)                   (((x).val >> 2) & 0x1f)
 224#define __swp_offset(x)                  ((x).val >> 7)
 225#define __swp_entry(type, offset)       ((swp_entry_t)  { ((type) << 2) | ((offset) << 7) })
 226#define __pte_to_swp_entry(pte)         ((swp_entry_t) { (pte).pte_high })
 227#define __swp_entry_to_pte(x)           ((pte_t) { 0, (x).val })
 228
 229#else
 230/*
 231 * Constraints:
 232 *      _PAGE_PRESENT at bit 0
 233 *      _PAGE_MODIFIED at bit 4
 234 *      _PAGE_GLOBAL at bit 6
 235 *      _PAGE_VALID at bit 7
 236 */
 237#define __swp_type(x)                   (((x).val >> 8) & 0x1f)
 238#define __swp_offset(x)                  ((x).val >> 13)
 239#define __swp_entry(type,offset)        ((swp_entry_t)  { ((type) << 8) | ((offset) << 13) })
 240#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) })
 241#define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
 242
 243#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
 244
 245#endif /* defined(CONFIG_CPU_R3K_TLB) */
 246
 247#endif /* _ASM_PGTABLE_32_H */
 248