linux/arch/metag/include/asm/pgtable.h
<<
>>
Prefs
   1/*
   2 * Macros and functions to manipulate Meta page tables.
   3 */
   4
   5#ifndef _METAG_PGTABLE_H
   6#define _METAG_PGTABLE_H
   7
   8#include <asm/pgtable-bits.h>
   9#include <asm-generic/pgtable-nopmd.h>
  10
  11/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
  12#if PAGE_OFFSET >= LINGLOBAL_BASE
  13#define CONSISTENT_START        0xF7000000
  14#define CONSISTENT_END          0xF73FFFFF
  15#define VMALLOC_START           0xF8000000
  16#define VMALLOC_END             0xFFFEFFFF
  17#else
  18#define CONSISTENT_START        0x77000000
  19#define CONSISTENT_END          0x773FFFFF
  20#define VMALLOC_START           0x78000000
  21#define VMALLOC_END             0x7FFFFFFF
  22#endif
  23
  24/*
  25 * The Linux memory management assumes a three-level page table setup. On
  26 * Meta, we use that, but "fold" the mid level into the top-level page
  27 * table.
  28 */
  29
  30/* PGDIR_SHIFT determines the size of the area a second-level page table can
  31 * map. This is always 4MB.
  32 */
  33
  34#define PGDIR_SHIFT     22
  35#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  36#define PGDIR_MASK      (~(PGDIR_SIZE-1))
  37
  38/*
  39 * Entries per page directory level: we use a two-level, so
  40 * we don't really have any PMD directory physically. First level tables
  41 * always map 2Gb (local or global) at a granularity of 4MB, second-level
  42 * tables map 4MB with a granularity between 4MB and 4kB (between 1 and
  43 * 1024 entries).
  44 */
  45#define PTRS_PER_PTE    (PGDIR_SIZE/PAGE_SIZE)
  46#define HPTRS_PER_PTE   (PGDIR_SIZE/HPAGE_SIZE)
  47#define PTRS_PER_PGD    512
  48
  49#define USER_PTRS_PER_PGD       256
  50#define FIRST_USER_ADDRESS      META_MEMORY_BASE
  51#define FIRST_USER_PGD_NR       pgd_index(FIRST_USER_ADDRESS)
  52
  53#define PAGE_NONE       __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
  54                                 _PAGE_CACHEABLE)
  55
  56#define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
  57                                 _PAGE_ACCESSED | _PAGE_CACHEABLE)
  58#define PAGE_SHARED_C   PAGE_SHARED
  59#define PAGE_COPY       __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
  60                                 _PAGE_CACHEABLE)
  61#define PAGE_COPY_C     PAGE_COPY
  62
  63#define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
  64                                 _PAGE_CACHEABLE)
  65#define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
  66                                 _PAGE_ACCESSED | _PAGE_WRITE | \
  67                                 _PAGE_CACHEABLE | _PAGE_KERNEL)
  68
  69#define __P000  PAGE_NONE
  70#define __P001  PAGE_READONLY
  71#define __P010  PAGE_COPY
  72#define __P011  PAGE_COPY
  73#define __P100  PAGE_READONLY
  74#define __P101  PAGE_READONLY
  75#define __P110  PAGE_COPY_C
  76#define __P111  PAGE_COPY_C
  77
  78#define __S000  PAGE_NONE
  79#define __S001  PAGE_READONLY
  80#define __S010  PAGE_SHARED
  81#define __S011  PAGE_SHARED
  82#define __S100  PAGE_READONLY
  83#define __S101  PAGE_READONLY
  84#define __S110  PAGE_SHARED_C
  85#define __S111  PAGE_SHARED_C
  86
  87#ifndef __ASSEMBLY__
  88
  89#include <asm/page.h>
  90
  91/* zero page used for uninitialized stuff */
  92extern unsigned long empty_zero_page;
  93#define ZERO_PAGE(vaddr)        (virt_to_page(empty_zero_page))
  94
  95/* Certain architectures need to do special things when pte's
  96 * within a page table are directly modified.  Thus, the following
  97 * hook is made available.
  98 */
  99#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
 100#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
 101
 102#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
 103
 104#define pte_pfn(pte)            (pte_val(pte) >> PAGE_SHIFT)
 105
 106#define pfn_pte(pfn, prot)      __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 107
 108#define pte_none(x)             (!pte_val(x))
 109#define pte_present(x)          (pte_val(x) & _PAGE_PRESENT)
 110#define pte_clear(mm, addr, xp) do { pte_val(*(xp)) = 0; } while (0)
 111
 112#define pmd_none(x)             (!pmd_val(x))
 113#define pmd_bad(x)              ((pmd_val(x) & ~(PAGE_MASK | _PAGE_SZ_MASK)) \
 114                                        != (_PAGE_TABLE & ~_PAGE_SZ_MASK))
 115#define pmd_present(x)          (pmd_val(x) & _PAGE_PRESENT)
 116#define pmd_clear(xp)           do { pmd_val(*(xp)) = 0; } while (0)
 117
 118#define pte_page(x)             pfn_to_page(pte_pfn(x))
 119
 120/*
 121 * The following only work if pte_present() is true.
 122 * Undefined behaviour if not..
 123 */
 124
 125static inline int pte_write(pte_t pte)   { return pte_val(pte) & _PAGE_WRITE; }
 126static inline int pte_dirty(pte_t pte)   { return pte_val(pte) & _PAGE_DIRTY; }
 127static inline int pte_young(pte_t pte)   { return pte_val(pte) & _PAGE_ACCESSED; }
 128static inline int pte_special(pte_t pte) { return 0; }
 129
 130static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= (~_PAGE_WRITE); return pte; }
 131static inline pte_t pte_mkclean(pte_t pte)   { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
 132static inline pte_t pte_mkold(pte_t pte)     { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
 133static inline pte_t pte_mkwrite(pte_t pte)   { pte_val(pte) |= _PAGE_WRITE; return pte; }
 134static inline pte_t pte_mkdirty(pte_t pte)   { pte_val(pte) |= _PAGE_DIRTY; return pte; }
 135static inline pte_t pte_mkyoung(pte_t pte)   { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
 136static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
 137static inline pte_t pte_mkhuge(pte_t pte)    { return pte; }
 138
 139/*
 140 * Macro and implementation to make a page protection as uncacheable.
 141 */
 142#define pgprot_writecombine(prot)                                       \
 143        __pgprot(pgprot_val(prot) & ~(_PAGE_CACHE_CTRL1 | _PAGE_CACHE_CTRL0))
 144
 145#define pgprot_noncached(prot)                                          \
 146        __pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE)
 147
 148
 149/*
 150 * Conversion functions: convert a page and protection to a page entry,
 151 * and a page entry and page directory to the page they refer to.
 152 */
 153
 154#define mk_pte(page, pgprot)    pfn_pte(page_to_pfn(page), (pgprot))
 155
 156static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 157{
 158        pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
 159        return pte;
 160}
 161
 162static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 163{
 164        unsigned long paddr = pmd_val(pmd) & PAGE_MASK;
 165        if (!paddr)
 166                return 0;
 167        return (unsigned long)__va(paddr);
 168}
 169
 170#define pmd_page(pmd)           (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
 171#define pmd_page_shift(pmd)     (12 + ((pmd_val(pmd) & _PAGE_SZ_MASK) \
 172                                        >> _PAGE_SZ_SHIFT))
 173#define pmd_num_ptrs(pmd)       (PGDIR_SIZE >> pmd_page_shift(pmd))
 174
 175/*
 176 * Each pgd is only 2k, mapping 2Gb (local or global). If we're in global
 177 * space drop the top bit before indexing the pgd.
 178 */
 179#if PAGE_OFFSET >= LINGLOBAL_BASE
 180#define pgd_index(address)      ((((address) & ~0x80000000) >> PGDIR_SHIFT) \
 181                                                        & (PTRS_PER_PGD-1))
 182#else
 183#define pgd_index(address)      (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
 184#endif
 185
 186#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
 187
 188#define pgd_offset_k(address)   pgd_offset(&init_mm, address)
 189
 190#define pmd_index(address)      (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
 191
 192/* Find an entry in the second-level page table.. */
 193#if !defined(CONFIG_HUGETLB_PAGE)
 194  /* all pages are of size (1 << PAGE_SHIFT), so no need to read 1st level pt */
 195# define pte_index(pmd, address) \
 196        (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 197#else
 198  /* some pages are huge, so read 1st level pt to find out */
 199# define pte_index(pmd, address) \
 200        (((address) >> pmd_page_shift(pmd)) & (pmd_num_ptrs(pmd) - 1))
 201#endif
 202#define pte_offset_kernel(dir, address) \
 203        ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(*(dir), address))
 204#define pte_offset_map(dir, address)            pte_offset_kernel(dir, address)
 205#define pte_offset_map_nested(dir, address)     pte_offset_kernel(dir, address)
 206
 207#define pte_unmap(pte)          do { } while (0)
 208#define pte_unmap_nested(pte)   do { } while (0)
 209
 210#define pte_ERROR(e) \
 211        pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
 212#define pgd_ERROR(e) \
 213        pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 214
 215/*
 216 * Meta doesn't have any external MMU info: the kernel page
 217 * tables contain all the necessary information.
 218 */
 219static inline void update_mmu_cache(struct vm_area_struct *vma,
 220                                    unsigned long address, pte_t *pte)
 221{
 222}
 223
 224/*
 225 * Encode and decode a swap entry (must be !pte_none(e) && !pte_present(e))
 226 * Since PAGE_PRESENT is bit 1, we can use the bits above that.
 227 */
 228#define __swp_type(x)                   (((x).val >> 1) & 0xff)
 229#define __swp_offset(x)                 ((x).val >> 10)
 230#define __swp_entry(type, offset)       ((swp_entry_t) { ((type) << 1) | \
 231                                         ((offset) << 10) })
 232#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) })
 233#define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
 234
 235#define kern_addr_valid(addr)   (1)
 236
 237/*
 238 * No page table caches to initialise
 239 */
 240#define pgtable_cache_init()    do { } while (0)
 241
 242extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 243void paging_init(unsigned long mem_end);
 244
 245#ifdef CONFIG_METAG_META12
 246/* This is a workaround for an issue in Meta 1 cores. These cores cache
 247 * invalid entries in the TLB so we always need to flush whenever we add
 248 * a new pte. Unfortunately we can only flush the whole TLB not shoot down
 249 * single entries so this is sub-optimal. This implementation ensures that
 250 * we will get a flush at the second attempt, so we may still get repeated
 251 * faults, we just don't overflow the kernel stack handling them.
 252 */
 253#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 254#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
 255({                                                                        \
 256        int __changed = !pte_same(*(__ptep), __entry);                    \
 257        if (__changed) {                                                  \
 258                set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
 259        }                                                                 \
 260        flush_tlb_page(__vma, __address);                                 \
 261        __changed;                                                        \
 262})
 263#endif
 264
 265#include <asm-generic/pgtable.h>
 266
 267#endif /* __ASSEMBLY__ */
 268#endif /* _METAG_PGTABLE_H */
 269