linux/arch/m32r/include/asm/pgtable.h
<<
>>
Prefs
   1#ifndef _ASM_M32R_PGTABLE_H
   2#define _ASM_M32R_PGTABLE_H
   3
   4#include <asm-generic/4level-fixup.h>
   5
   6#ifdef __KERNEL__
   7/*
   8 * The Linux memory management assumes a three-level page table setup. On
   9 * the M32R, we use that, but "fold" the mid level into the top-level page
  10 * table, so that we physically have the same two-level page table as the
  11 * M32R mmu expects.
  12 *
  13 * This file contains the functions and defines necessary to modify and use
  14 * the M32R page table tree.
  15 */
  16
  17/* CAUTION!: If you change macro definitions in this file, you might have to
  18 * change arch/m32r/mmu.S manually.
  19 */
  20
  21#ifndef __ASSEMBLY__
  22
  23#include <linux/threads.h>
  24#include <linux/bitops.h>
  25#include <asm/processor.h>
  26#include <asm/addrspace.h>
  27#include <asm/page.h>
  28
  29struct mm_struct;
  30struct vm_area_struct;
  31
  32extern pgd_t swapper_pg_dir[1024];
  33extern void paging_init(void);
  34
  35/*
  36 * ZERO_PAGE is a global shared page that is always zero: used
  37 * for zero-mapped memory areas etc..
  38 */
  39extern unsigned long empty_zero_page[1024];
  40#define ZERO_PAGE(vaddr)        (virt_to_page(empty_zero_page))
  41
  42#endif /* !__ASSEMBLY__ */
  43
  44#ifndef __ASSEMBLY__
  45#include <asm/pgtable-2level.h>
  46#endif
  47
  48#define pgtable_cache_init()    do { } while (0)
  49
  50#define PMD_SIZE        (1UL << PMD_SHIFT)
  51#define PMD_MASK        (~(PMD_SIZE - 1))
  52#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  53#define PGDIR_MASK      (~(PGDIR_SIZE - 1))
  54
  55#define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
  56#define FIRST_USER_ADDRESS      0
  57
  58#ifndef __ASSEMBLY__
  59/* Just any arbitrary offset to the start of the vmalloc VM area: the
  60 * current 8MB value just means that there will be a 8MB "hole" after the
  61 * physical memory until the kernel virtual memory starts.  That means that
  62 * any out-of-bounds memory accesses will hopefully be caught.
  63 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  64 * area for the same reason. ;)
  65 */
  66#define VMALLOC_START           KSEG2
  67#define VMALLOC_END             KSEG3
  68
  69/*
  70 *     M32R TLB format
  71 *
  72 *     [0]    [1:19]           [20:23]       [24:31]
  73 *     +-----------------------+----+-------------+
  74 *     |          VPN          |0000|    ASID     |
  75 *     +-----------------------+----+-------------+
  76 *     +-+---------------------+----+-+---+-+-+-+-+
  77 *     |0         PPN          |0000|N|AC |L|G|V| |
  78 *     +-+---------------------+----+-+---+-+-+-+-+
  79 *                                     RWX
  80 */
  81
  82#define _PAGE_BIT_DIRTY         0       /* software: page changed */
  83#define _PAGE_BIT_FILE          0       /* when !present: nonlinear file
  84                                           mapping */
  85#define _PAGE_BIT_PRESENT       1       /* Valid: page is valid */
  86#define _PAGE_BIT_GLOBAL        2       /* Global */
  87#define _PAGE_BIT_LARGE         3       /* Large */
  88#define _PAGE_BIT_EXEC          4       /* Execute */
  89#define _PAGE_BIT_WRITE         5       /* Write */
  90#define _PAGE_BIT_READ          6       /* Read */
  91#define _PAGE_BIT_NONCACHABLE   7       /* Non cachable */
  92#define _PAGE_BIT_ACCESSED      8       /* software: page referenced */
  93#define _PAGE_BIT_PROTNONE      9       /* software: if not present */
  94
  95#define _PAGE_DIRTY             (1UL << _PAGE_BIT_DIRTY)
  96#define _PAGE_FILE              (1UL << _PAGE_BIT_FILE)
  97#define _PAGE_PRESENT           (1UL << _PAGE_BIT_PRESENT)
  98#define _PAGE_GLOBAL            (1UL << _PAGE_BIT_GLOBAL)
  99#define _PAGE_LARGE             (1UL << _PAGE_BIT_LARGE)
 100#define _PAGE_EXEC              (1UL << _PAGE_BIT_EXEC)
 101#define _PAGE_WRITE             (1UL << _PAGE_BIT_WRITE)
 102#define _PAGE_READ              (1UL << _PAGE_BIT_READ)
 103#define _PAGE_NONCACHABLE       (1UL << _PAGE_BIT_NONCACHABLE)
 104#define _PAGE_ACCESSED          (1UL << _PAGE_BIT_ACCESSED)
 105#define _PAGE_PROTNONE          (1UL << _PAGE_BIT_PROTNONE)
 106
 107#define _PAGE_TABLE     \
 108        ( _PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED \
 109        | _PAGE_DIRTY )
 110#define _KERNPG_TABLE   \
 111        ( _PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED \
 112        | _PAGE_DIRTY )
 113#define _PAGE_CHG_MASK  \
 114        ( PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY )
 115
 116#ifdef CONFIG_MMU
 117#define PAGE_NONE       \
 118        __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
 119#define PAGE_SHARED     \
 120        __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED)
 121#define PAGE_SHARED_EXEC \
 122        __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ \
 123                | _PAGE_ACCESSED)
 124#define PAGE_COPY       \
 125        __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)
 126#define PAGE_COPY_EXEC  \
 127        __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_ACCESSED)
 128#define PAGE_READONLY   \
 129        __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)
 130#define PAGE_READONLY_EXEC \
 131        __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_ACCESSED)
 132
 133#define __PAGE_KERNEL   \
 134        ( _PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ | _PAGE_DIRTY \
 135        | _PAGE_ACCESSED )
 136#define __PAGE_KERNEL_RO        ( __PAGE_KERNEL & ~_PAGE_WRITE )
 137#define __PAGE_KERNEL_NOCACHE   ( __PAGE_KERNEL | _PAGE_NONCACHABLE)
 138
 139#define MAKE_GLOBAL(x)  __pgprot((x) | _PAGE_GLOBAL)
 140
 141#define PAGE_KERNEL             MAKE_GLOBAL(__PAGE_KERNEL)
 142#define PAGE_KERNEL_RO          MAKE_GLOBAL(__PAGE_KERNEL_RO)
 143#define PAGE_KERNEL_NOCACHE     MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
 144
 145#else
 146#define PAGE_NONE               __pgprot(0)
 147#define PAGE_SHARED             __pgprot(0)
 148#define PAGE_SHARED_EXEC        __pgprot(0)
 149#define PAGE_COPY               __pgprot(0)
 150#define PAGE_COPY_EXEC          __pgprot(0)
 151#define PAGE_READONLY           __pgprot(0)
 152#define PAGE_READONLY_EXEC      __pgprot(0)
 153
 154#define PAGE_KERNEL             __pgprot(0)
 155#define PAGE_KERNEL_RO          __pgprot(0)
 156#define PAGE_KERNEL_NOCACHE     __pgprot(0)
 157#endif /* CONFIG_MMU */
 158
 159        /* xwr */
 160#define __P000  PAGE_NONE
 161#define __P001  PAGE_READONLY
 162#define __P010  PAGE_COPY
 163#define __P011  PAGE_COPY
 164#define __P100  PAGE_READONLY_EXEC
 165#define __P101  PAGE_READONLY_EXEC
 166#define __P110  PAGE_COPY_EXEC
 167#define __P111  PAGE_COPY_EXEC
 168
 169#define __S000  PAGE_NONE
 170#define __S001  PAGE_READONLY
 171#define __S010  PAGE_SHARED
 172#define __S011  PAGE_SHARED
 173#define __S100  PAGE_READONLY_EXEC
 174#define __S101  PAGE_READONLY_EXEC
 175#define __S110  PAGE_SHARED_EXEC
 176#define __S111  PAGE_SHARED_EXEC
 177
 178/* page table for 0-4MB for everybody */
 179
 180#define pte_present(x)  (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
 181#define pte_clear(mm,addr,xp)   do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
 182
 183#define pmd_none(x)     (!pmd_val(x))
 184#define pmd_present(x)  (pmd_val(x) & _PAGE_PRESENT)
 185#define pmd_clear(xp)   do { set_pmd(xp, __pmd(0)); } while (0)
 186#define pmd_bad(x)      ((pmd_val(x) & ~PAGE_MASK) != _KERNPG_TABLE)
 187
 188#define pages_to_mb(x)  ((x) >> (20 - PAGE_SHIFT))
 189
 190/*
 191 * The following only work if pte_present() is true.
 192 * Undefined behaviour if not..
 193 */
 194static inline int pte_dirty(pte_t pte)
 195{
 196        return pte_val(pte) & _PAGE_DIRTY;
 197}
 198
 199static inline int pte_young(pte_t pte)
 200{
 201        return pte_val(pte) & _PAGE_ACCESSED;
 202}
 203
 204static inline int pte_write(pte_t pte)
 205{
 206        return pte_val(pte) & _PAGE_WRITE;
 207}
 208
 209/*
 210 * The following only works if pte_present() is not true.
 211 */
 212static inline int pte_file(pte_t pte)
 213{
 214        return pte_val(pte) & _PAGE_FILE;
 215}
 216
 217static inline int pte_special(pte_t pte)
 218{
 219        return 0;
 220}
 221
 222static inline pte_t pte_mkclean(pte_t pte)
 223{
 224        pte_val(pte) &= ~_PAGE_DIRTY;
 225        return pte;
 226}
 227
 228static inline pte_t pte_mkold(pte_t pte)
 229{
 230        pte_val(pte) &= ~_PAGE_ACCESSED;
 231        return pte;
 232}
 233
 234static inline pte_t pte_wrprotect(pte_t pte)
 235{
 236        pte_val(pte) &= ~_PAGE_WRITE;
 237        return pte;
 238}
 239
 240static inline pte_t pte_mkdirty(pte_t pte)
 241{
 242        pte_val(pte) |= _PAGE_DIRTY;
 243        return pte;
 244}
 245
 246static inline pte_t pte_mkyoung(pte_t pte)
 247{
 248        pte_val(pte) |= _PAGE_ACCESSED;
 249        return pte;
 250}
 251
 252static inline pte_t pte_mkwrite(pte_t pte)
 253{
 254        pte_val(pte) |= _PAGE_WRITE;
 255        return pte;
 256}
 257
 258static inline pte_t pte_mkspecial(pte_t pte)
 259{
 260        return pte;
 261}
 262
 263static inline  int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
 264{
 265        return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
 266}
 267
 268static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 269{
 270        clear_bit(_PAGE_BIT_WRITE, ptep);
 271}
 272
 273/*
 274 * Macro and implementation to make a page protection as uncachable.
 275 */
 276static inline pgprot_t pgprot_noncached(pgprot_t _prot)
 277{
 278        unsigned long prot = pgprot_val(_prot);
 279
 280        prot |= _PAGE_NONCACHABLE;
 281        return __pgprot(prot);
 282}
 283
 284#define pgprot_writecombine(prot) pgprot_noncached(prot)
 285
 286/*
 287 * Conversion functions: convert a page and protection to a page entry,
 288 * and a page entry and page directory to the page they refer to.
 289 */
 290#define mk_pte(page, pgprot)    pfn_pte(page_to_pfn(page), pgprot)
 291
 292static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 293{
 294        set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) \
 295                | pgprot_val(newprot)));
 296
 297        return pte;
 298}
 299
 300/*
 301 * Conversion functions: convert a page and protection to a page entry,
 302 * and a page entry and page directory to the page they refer to.
 303 */
 304
 305static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
 306{
 307        pmd_val(*pmdp) = (((unsigned long) ptep) & PAGE_MASK);
 308}
 309
 310#define pmd_page_vaddr(pmd)     \
 311        ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
 312
 313#ifndef CONFIG_DISCONTIGMEM
 314#define pmd_page(pmd)   (mem_map + ((pmd_val(pmd) >> PAGE_SHIFT) - PFN_BASE))
 315#endif /* !CONFIG_DISCONTIGMEM */
 316
 317/* to find an entry in a page-table-directory. */
 318#define pgd_index(address)      \
 319        (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
 320
 321#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
 322
 323/* to find an entry in a kernel page-table-directory */
 324#define pgd_offset_k(address)   pgd_offset(&init_mm, address)
 325
 326#define pmd_index(address)      \
 327        (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
 328
 329#define pte_index(address)      \
 330        (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 331#define pte_offset_kernel(dir, address) \
 332        ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index(address))
 333#define pte_offset_map(dir, address)    \
 334        ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
 335#define pte_offset_map_nested(dir, address)     pte_offset_map(dir, address)
 336#define pte_unmap(pte)          do { } while (0)
 337#define pte_unmap_nested(pte)   do { } while (0)
 338
 339/* Encode and de-code a swap entry */
 340#define __swp_type(x)                   (((x).val >> 2) & 0x1f)
 341#define __swp_offset(x)                 ((x).val >> 10)
 342#define __swp_entry(type, offset)       \
 343        ((swp_entry_t) { ((type) << 2) | ((offset) << 10) })
 344#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) })
 345#define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
 346
 347#endif /* !__ASSEMBLY__ */
 348
 349/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
 350#define kern_addr_valid(addr)   (1)
 351
 352#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
 353                remap_pfn_range(vma, vaddr, pfn, size, prot)
 354
 355#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 356#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 357#define __HAVE_ARCH_PTEP_SET_WRPROTECT
 358#define __HAVE_ARCH_PTE_SAME
 359#include <asm-generic/pgtable.h>
 360
 361#endif /* __KERNEL__ */
 362
 363#endif /* _ASM_M32R_PGTABLE_H */
 364